diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/scsi/lpfc/lpfc.h | 49 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_attr.c | 83 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_crtn.h | 59 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_ct.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_debugfs.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_els.c | 4 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hbadisc.c | 34 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw.h | 140 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_hw4.h | 2141 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 3800 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mbox.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_mem.c | 204 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_nportdisc.c | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.c | 492 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_scsi.h | 2 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.c | 1263 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli.h | 27 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_sli4.h | 467 | ||||
-rw-r--r-- | drivers/scsi/lpfc/lpfc_vport.c | 23 |
19 files changed, 8485 insertions, 311 deletions
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 6c24c9aabe7b..13ac108a244c 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -105,9 +105,11 @@ struct lpfc_dma_pool { | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | struct hbq_dmabuf { | 107 | struct hbq_dmabuf { |
108 | struct lpfc_dmabuf hbuf; | ||
108 | struct lpfc_dmabuf dbuf; | 109 | struct lpfc_dmabuf dbuf; |
109 | uint32_t size; | 110 | uint32_t size; |
110 | uint32_t tag; | 111 | uint32_t tag; |
112 | struct lpfc_rcqe rcqe; | ||
111 | }; | 113 | }; |
112 | 114 | ||
113 | /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ | 115 | /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ |
@@ -141,7 +143,10 @@ typedef struct lpfc_vpd { | |||
141 | } rev; | 143 | } rev; |
142 | struct { | 144 | struct { |
143 | #ifdef __BIG_ENDIAN_BITFIELD | 145 | #ifdef __BIG_ENDIAN_BITFIELD |
144 | uint32_t rsvd2 :24; /* Reserved */ | 146 | uint32_t rsvd3 :19; /* Reserved */ |
147 | uint32_t cdss : 1; /* Configure Data Security SLI */ | ||
148 | uint32_t rsvd2 : 3; /* Reserved */ | ||
149 | uint32_t cbg : 1; /* Configure BlockGuard */ | ||
145 | uint32_t cmv : 1; /* Configure Max VPIs */ | 150 | uint32_t cmv : 1; /* Configure Max VPIs */ |
146 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 151 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
147 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ | 152 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ |
@@ -159,7 +164,10 @@ typedef struct lpfc_vpd { | |||
159 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ | 164 | uint32_t csah : 1; /* Configure Synchronous Abort Handling */ |
160 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 165 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
161 | uint32_t cmv : 1; /* Configure Max VPIs */ | 166 | uint32_t cmv : 1; /* Configure Max VPIs */ |
162 | uint32_t rsvd2 :24; /* Reserved */ | 167 | uint32_t cbg : 1; /* Configure BlockGuard */ |
168 | uint32_t rsvd2 : 3; /* Reserved */ | ||
169 | uint32_t cdss : 1; /* Configure Data Security SLI */ | ||
170 | uint32_t rsvd3 :19; /* Reserved */ | ||
163 | #endif | 171 | #endif |
164 | } sli3Feat; | 172 | } sli3Feat; |
165 | } lpfc_vpd_t; | 173 | } lpfc_vpd_t; |
@@ -280,6 +288,9 @@ struct lpfc_vport { | |||
280 | enum discovery_state port_state; | 288 | enum discovery_state port_state; |
281 | 289 | ||
282 | uint16_t vpi; | 290 | uint16_t vpi; |
291 | uint16_t vfi; | ||
292 | uint8_t vfi_state; | ||
293 | #define LPFC_VFI_REGISTERED 0x1 | ||
283 | 294 | ||
284 | uint32_t fc_flag; /* FC flags */ | 295 | uint32_t fc_flag; /* FC flags */ |
285 | /* Several of these flags are HBA centric and should be moved to | 296 | /* Several of these flags are HBA centric and should be moved to |
@@ -392,6 +403,9 @@ struct lpfc_vport { | |||
392 | #endif | 403 | #endif |
393 | uint8_t stat_data_enabled; | 404 | uint8_t stat_data_enabled; |
394 | uint8_t stat_data_blocked; | 405 | uint8_t stat_data_blocked; |
406 | struct list_head rcv_buffer_list; | ||
407 | uint32_t vport_flag; | ||
408 | #define STATIC_VPORT 1 | ||
395 | }; | 409 | }; |
396 | 410 | ||
397 | struct hbq_s { | 411 | struct hbq_s { |
@@ -494,6 +508,7 @@ struct lpfc_hba { | |||
494 | #define LPFC_SLI3_CRP_ENABLED 0x08 | 508 | #define LPFC_SLI3_CRP_ENABLED 0x08 |
495 | #define LPFC_SLI3_INB_ENABLED 0x10 | 509 | #define LPFC_SLI3_INB_ENABLED 0x10 |
496 | #define LPFC_SLI3_BG_ENABLED 0x20 | 510 | #define LPFC_SLI3_BG_ENABLED 0x20 |
511 | #define LPFC_SLI3_DSS_ENABLED 0x40 | ||
497 | uint32_t iocb_cmd_size; | 512 | uint32_t iocb_cmd_size; |
498 | uint32_t iocb_rsp_size; | 513 | uint32_t iocb_rsp_size; |
499 | 514 | ||
@@ -507,8 +522,13 @@ struct lpfc_hba { | |||
507 | 522 | ||
508 | uint32_t hba_flag; /* hba generic flags */ | 523 | uint32_t hba_flag; /* hba generic flags */ |
509 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ | 524 | #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ |
510 | 525 | #define DEFER_ERATT 0x2 /* Deferred error attention in progress */ | |
511 | #define DEFER_ERATT 0x4 /* Deferred error attention in progress */ | 526 | #define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ |
527 | #define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ | ||
528 | #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ | ||
529 | #define FCP_XRI_ABORT_EVENT 0x20 | ||
530 | #define ELS_XRI_ABORT_EVENT 0x40 | ||
531 | #define ASYNC_EVENT 0x80 | ||
512 | struct lpfc_dmabuf slim2p; | 532 | struct lpfc_dmabuf slim2p; |
513 | 533 | ||
514 | MAILBOX_t *mbox; | 534 | MAILBOX_t *mbox; |
@@ -567,6 +587,9 @@ struct lpfc_hba { | |||
567 | uint32_t cfg_poll; | 587 | uint32_t cfg_poll; |
568 | uint32_t cfg_poll_tmo; | 588 | uint32_t cfg_poll_tmo; |
569 | uint32_t cfg_use_msi; | 589 | uint32_t cfg_use_msi; |
590 | uint32_t cfg_fcp_imax; | ||
591 | uint32_t cfg_fcp_wq_count; | ||
592 | uint32_t cfg_fcp_eq_count; | ||
570 | uint32_t cfg_sg_seg_cnt; | 593 | uint32_t cfg_sg_seg_cnt; |
571 | uint32_t cfg_prot_sg_seg_cnt; | 594 | uint32_t cfg_prot_sg_seg_cnt; |
572 | uint32_t cfg_sg_dma_buf_size; | 595 | uint32_t cfg_sg_dma_buf_size; |
@@ -576,6 +599,8 @@ struct lpfc_hba { | |||
576 | uint32_t cfg_enable_hba_reset; | 599 | uint32_t cfg_enable_hba_reset; |
577 | uint32_t cfg_enable_hba_heartbeat; | 600 | uint32_t cfg_enable_hba_heartbeat; |
578 | uint32_t cfg_enable_bg; | 601 | uint32_t cfg_enable_bg; |
602 | uint32_t cfg_enable_fip; | ||
603 | uint32_t cfg_log_verbose; | ||
579 | 604 | ||
580 | lpfc_vpd_t vpd; /* vital product data */ | 605 | lpfc_vpd_t vpd; /* vital product data */ |
581 | 606 | ||
@@ -659,7 +684,8 @@ struct lpfc_hba { | |||
659 | /* pci_mem_pools */ | 684 | /* pci_mem_pools */ |
660 | struct pci_pool *lpfc_scsi_dma_buf_pool; | 685 | struct pci_pool *lpfc_scsi_dma_buf_pool; |
661 | struct pci_pool *lpfc_mbuf_pool; | 686 | struct pci_pool *lpfc_mbuf_pool; |
662 | struct pci_pool *lpfc_hbq_pool; | 687 | struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ |
688 | struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ | ||
663 | struct lpfc_dma_pool lpfc_mbuf_safety_pool; | 689 | struct lpfc_dma_pool lpfc_mbuf_safety_pool; |
664 | 690 | ||
665 | mempool_t *mbox_mem_pool; | 691 | mempool_t *mbox_mem_pool; |
@@ -675,6 +701,14 @@ struct lpfc_hba { | |||
675 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ | 701 | struct lpfc_vport *pport; /* physical lpfc_vport pointer */ |
676 | uint16_t max_vpi; /* Maximum virtual nports */ | 702 | uint16_t max_vpi; /* Maximum virtual nports */ |
677 | #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ | 703 | #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ |
704 | uint16_t max_vports; /* | ||
705 | * For IOV HBAs max_vpi can change | ||
706 | * after a reset. max_vports is max | ||
707 | * number of vports present. This can | ||
708 | * be greater than max_vpi. | ||
709 | */ | ||
710 | uint16_t vpi_base; | ||
711 | uint16_t vfi_base; | ||
678 | unsigned long *vpi_bmask; /* vpi allocation table */ | 712 | unsigned long *vpi_bmask; /* vpi allocation table */ |
679 | 713 | ||
680 | /* Data structure used by fabric iocb scheduler */ | 714 | /* Data structure used by fabric iocb scheduler */ |
@@ -733,6 +767,11 @@ struct lpfc_hba { | |||
733 | /* Maximum number of events that can be outstanding at any time*/ | 767 | /* Maximum number of events that can be outstanding at any time*/ |
734 | #define LPFC_MAX_EVT_COUNT 512 | 768 | #define LPFC_MAX_EVT_COUNT 512 |
735 | atomic_t fast_event_count; | 769 | atomic_t fast_event_count; |
770 | struct lpfc_fcf fcf; | ||
771 | uint8_t fc_map[3]; | ||
772 | uint8_t valid_vlan; | ||
773 | uint16_t vlan_id; | ||
774 | struct list_head fcf_conn_rec_list; | ||
736 | }; | 775 | }; |
737 | 776 | ||
738 | static inline struct Scsi_Host * | 777 | static inline struct Scsi_Host * |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index c14f0cbdb125..82016fc672b1 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -30,8 +30,10 @@ | |||
30 | #include <scsi/scsi_tcq.h> | 30 | #include <scsi/scsi_tcq.h> |
31 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
32 | 32 | ||
33 | #include "lpfc_hw4.h" | ||
33 | #include "lpfc_hw.h" | 34 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 35 | #include "lpfc_sli.h" |
36 | #include "lpfc_sli4.h" | ||
35 | #include "lpfc_nl.h" | 37 | #include "lpfc_nl.h" |
36 | #include "lpfc_disc.h" | 38 | #include "lpfc_disc.h" |
37 | #include "lpfc_scsi.h" | 39 | #include "lpfc_scsi.h" |
@@ -828,18 +830,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba, | |||
828 | return 0; | 830 | return 0; |
829 | } | 831 | } |
830 | 832 | ||
831 | if (mrpi) | 833 | if (phba->sli_rev == LPFC_SLI_REV4) { |
832 | *mrpi = pmb->un.varRdConfig.max_rpi; | 834 | rd_config = &pmboxq->u.mqe.un.rd_config; |
833 | if (arpi) | 835 | if (mrpi) |
834 | *arpi = pmb->un.varRdConfig.avail_rpi; | 836 | *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); |
835 | if (mxri) | 837 | if (arpi) |
836 | *mxri = pmb->un.varRdConfig.max_xri; | 838 | *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - |
837 | if (axri) | 839 | phba->sli4_hba.max_cfg_param.rpi_used; |
838 | *axri = pmb->un.varRdConfig.avail_xri; | 840 | if (mxri) |
839 | if (mvpi) | 841 | *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); |
840 | *mvpi = pmb->un.varRdConfig.max_vpi; | 842 | if (axri) |
841 | if (avpi) | 843 | *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - |
842 | *avpi = pmb->un.varRdConfig.avail_vpi; | 844 | phba->sli4_hba.max_cfg_param.xri_used; |
845 | if (mvpi) | ||
846 | *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); | ||
847 | if (avpi) | ||
848 | *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - | ||
849 | phba->sli4_hba.max_cfg_param.vpi_used; | ||
850 | } else { | ||
851 | if (mrpi) | ||
852 | *mrpi = pmb->un.varRdConfig.max_rpi; | ||
853 | if (arpi) | ||
854 | *arpi = pmb->un.varRdConfig.avail_rpi; | ||
855 | if (mxri) | ||
856 | *mxri = pmb->un.varRdConfig.max_xri; | ||
857 | if (axri) | ||
858 | *axri = pmb->un.varRdConfig.avail_xri; | ||
859 | if (mvpi) | ||
860 | *mvpi = pmb->un.varRdConfig.max_vpi; | ||
861 | if (avpi) | ||
862 | *avpi = pmb->un.varRdConfig.avail_vpi; | ||
863 | } | ||
843 | 864 | ||
844 | mempool_free(pmboxq, phba->mbox_mem_pool); | 865 | mempool_free(pmboxq, phba->mbox_mem_pool); |
845 | return 1; | 866 | return 1; |
@@ -2844,15 +2865,39 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
2844 | /* | 2865 | /* |
2845 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that | 2866 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that |
2846 | # support this feature | 2867 | # support this feature |
2847 | # 0 = MSI disabled | 2868 | # 0 = MSI disabled (default) |
2848 | # 1 = MSI enabled | 2869 | # 1 = MSI enabled |
2849 | # 2 = MSI-X enabled (default) | 2870 | # 2 = MSI-X enabled |
2850 | # Value range is [0,2]. Default value is 2. | 2871 | # Value range is [0,2]. Default value is 0. |
2851 | */ | 2872 | */ |
2852 | LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " | 2873 | LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " |
2853 | "MSI-X (2), if possible"); | 2874 | "MSI-X (2), if possible"); |
2854 | 2875 | ||
2855 | /* | 2876 | /* |
2877 | # lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second | ||
2878 | # | ||
2879 | # Value range is [636,651042]. Default value is 10000. | ||
2880 | */ | ||
2881 | LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, | ||
2882 | "Set the maximum number of fast-path FCP interrupts per second"); | ||
2883 | |||
2884 | /* | ||
2885 | # lpfc_fcp_wq_count: Set the number of fast-path FCP work queues | ||
2886 | # | ||
2887 | # Value range is [1,31]. Default value is 4. | ||
2888 | */ | ||
2889 | LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, | ||
2890 | "Set the number of fast-path FCP work queues, if possible"); | ||
2891 | |||
2892 | /* | ||
2893 | # lpfc_fcp_eq_count: Set the number of fast-path FCP event queues | ||
2894 | # | ||
2895 | # Value range is [1,7]. Default value is 1. | ||
2896 | */ | ||
2897 | LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, | ||
2898 | "Set the number of fast-path FCP event queues, if possible"); | ||
2899 | |||
2900 | /* | ||
2856 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. | 2901 | # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. |
2857 | # 0 = HBA resets disabled | 2902 | # 0 = HBA resets disabled |
2858 | # 1 = HBA resets enabled (default) | 2903 | # 1 = HBA resets enabled (default) |
@@ -2969,6 +3014,9 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
2969 | &dev_attr_lpfc_poll, | 3014 | &dev_attr_lpfc_poll, |
2970 | &dev_attr_lpfc_poll_tmo, | 3015 | &dev_attr_lpfc_poll_tmo, |
2971 | &dev_attr_lpfc_use_msi, | 3016 | &dev_attr_lpfc_use_msi, |
3017 | &dev_attr_lpfc_fcp_imax, | ||
3018 | &dev_attr_lpfc_fcp_wq_count, | ||
3019 | &dev_attr_lpfc_fcp_eq_count, | ||
2972 | &dev_attr_lpfc_enable_bg, | 3020 | &dev_attr_lpfc_enable_bg, |
2973 | &dev_attr_lpfc_soft_wwnn, | 3021 | &dev_attr_lpfc_soft_wwnn, |
2974 | &dev_attr_lpfc_soft_wwpn, | 3022 | &dev_attr_lpfc_soft_wwpn, |
@@ -4105,6 +4153,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
4105 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); | 4153 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); |
4106 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); | 4154 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); |
4107 | lpfc_use_msi_init(phba, lpfc_use_msi); | 4155 | lpfc_use_msi_init(phba, lpfc_use_msi); |
4156 | lpfc_fcp_imax_init(phba, lpfc_fcp_imax); | ||
4157 | lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); | ||
4158 | lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); | ||
4108 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); | 4159 | lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); |
4109 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); | 4160 | lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); |
4110 | lpfc_enable_bg_init(phba, lpfc_enable_bg); | 4161 | lpfc_enable_bg_init(phba, lpfc_enable_bg); |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index f88ce3f26190..3802e455734f 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -35,17 +35,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
35 | int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); | 35 | int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); |
36 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); | 36 | void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); |
37 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); | 37 | void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); |
38 | int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, | 38 | int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, |
39 | LPFC_MBOXQ_t *, uint32_t); | 39 | LPFC_MBOXQ_t *, uint32_t); |
40 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 40 | void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
41 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 41 | void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); |
42 | void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); | 42 | void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); |
43 | void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); | 43 | void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); |
44 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); | 44 | void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); |
45 | void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); | ||
45 | 46 | ||
46 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); | 47 | struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); |
47 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); | 48 | void lpfc_cleanup_rpis(struct lpfc_vport *, int); |
48 | int lpfc_linkdown(struct lpfc_hba *); | 49 | int lpfc_linkdown(struct lpfc_hba *); |
50 | void lpfc_linkdown_port(struct lpfc_vport *); | ||
49 | void lpfc_port_link_failure(struct lpfc_vport *); | 51 | void lpfc_port_link_failure(struct lpfc_vport *); |
50 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); | 52 | void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); |
51 | 53 | ||
@@ -54,6 +56,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
54 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 56 | void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
55 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 57 | void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
56 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); | 58 | void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); |
59 | void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
57 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 60 | void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
58 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); | 61 | void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); |
59 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, | 62 | struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, |
@@ -149,15 +152,19 @@ int lpfc_online(struct lpfc_hba *); | |||
149 | void lpfc_unblock_mgmt_io(struct lpfc_hba *); | 152 | void lpfc_unblock_mgmt_io(struct lpfc_hba *); |
150 | void lpfc_offline_prep(struct lpfc_hba *); | 153 | void lpfc_offline_prep(struct lpfc_hba *); |
151 | void lpfc_offline(struct lpfc_hba *); | 154 | void lpfc_offline(struct lpfc_hba *); |
155 | void lpfc_reset_hba(struct lpfc_hba *); | ||
152 | 156 | ||
153 | int lpfc_sli_setup(struct lpfc_hba *); | 157 | int lpfc_sli_setup(struct lpfc_hba *); |
154 | int lpfc_sli_queue_setup(struct lpfc_hba *); | 158 | int lpfc_sli_queue_setup(struct lpfc_hba *); |
155 | 159 | ||
156 | void lpfc_handle_eratt(struct lpfc_hba *); | 160 | void lpfc_handle_eratt(struct lpfc_hba *); |
157 | void lpfc_handle_latt(struct lpfc_hba *); | 161 | void lpfc_handle_latt(struct lpfc_hba *); |
158 | irqreturn_t lpfc_intr_handler(int, void *); | 162 | irqreturn_t lpfc_sli_intr_handler(int, void *); |
159 | irqreturn_t lpfc_sp_intr_handler(int, void *); | 163 | irqreturn_t lpfc_sli_sp_intr_handler(int, void *); |
160 | irqreturn_t lpfc_fp_intr_handler(int, void *); | 164 | irqreturn_t lpfc_sli_fp_intr_handler(int, void *); |
165 | irqreturn_t lpfc_sli4_intr_handler(int, void *); | ||
166 | irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); | ||
167 | irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); | ||
161 | 168 | ||
162 | void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); | 169 | void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); |
163 | void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); | 170 | void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); |
@@ -165,16 +172,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); | |||
165 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); | 172 | void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); |
166 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); | 173 | void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); |
167 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); | 174 | LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); |
175 | void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
168 | void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); | 176 | void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); |
177 | int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
178 | int lpfc_mbox_dev_check(struct lpfc_hba *); | ||
169 | int lpfc_mbox_tmo_val(struct lpfc_hba *, int); | 179 | int lpfc_mbox_tmo_val(struct lpfc_hba *, int); |
180 | void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); | ||
181 | void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); | ||
182 | void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); | ||
183 | void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); | ||
184 | void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); | ||
185 | void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); | ||
186 | void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); | ||
170 | 187 | ||
171 | void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, | 188 | void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, |
172 | uint32_t , LPFC_MBOXQ_t *); | 189 | uint32_t , LPFC_MBOXQ_t *); |
173 | struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); | 190 | struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); |
174 | void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); | 191 | void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); |
192 | struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); | ||
193 | void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); | ||
194 | void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, | ||
195 | uint16_t); | ||
196 | void lpfc_unregister_unused_fcf(struct lpfc_hba *); | ||
175 | 197 | ||
176 | int lpfc_mem_alloc(struct lpfc_hba *); | 198 | int lpfc_mem_alloc(struct lpfc_hba *, int align); |
177 | void lpfc_mem_free(struct lpfc_hba *); | 199 | void lpfc_mem_free(struct lpfc_hba *); |
200 | void lpfc_mem_free_all(struct lpfc_hba *); | ||
178 | void lpfc_stop_vport_timers(struct lpfc_vport *); | 201 | void lpfc_stop_vport_timers(struct lpfc_vport *); |
179 | 202 | ||
180 | void lpfc_poll_timeout(unsigned long ptr); | 203 | void lpfc_poll_timeout(unsigned long ptr); |
@@ -198,12 +221,13 @@ int lpfc_sli_host_down(struct lpfc_vport *); | |||
198 | int lpfc_sli_hba_down(struct lpfc_hba *); | 221 | int lpfc_sli_hba_down(struct lpfc_hba *); |
199 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 222 | int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
200 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); | 223 | int lpfc_sli_handle_mb_event(struct lpfc_hba *); |
201 | int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); | 224 | void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); |
202 | int lpfc_sli_check_eratt(struct lpfc_hba *); | 225 | int lpfc_sli_check_eratt(struct lpfc_hba *); |
203 | int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, | 226 | void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, |
204 | struct lpfc_sli_ring *, uint32_t); | 227 | struct lpfc_sli_ring *, uint32_t); |
228 | int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); | ||
205 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); | 229 | void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); |
206 | int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, | 230 | int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, |
207 | struct lpfc_iocbq *, uint32_t); | 231 | struct lpfc_iocbq *, uint32_t); |
208 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); | 232 | void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); |
209 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); | 233 | void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); |
@@ -237,7 +261,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, | |||
237 | 261 | ||
238 | int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); | 262 | int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); |
239 | 263 | ||
240 | int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, | 264 | int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, |
241 | struct lpfc_iocbq *, struct lpfc_iocbq *, | 265 | struct lpfc_iocbq *, struct lpfc_iocbq *, |
242 | uint32_t); | 266 | uint32_t); |
243 | void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, | 267 | void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, |
@@ -254,6 +278,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); | |||
254 | const char* lpfc_info(struct Scsi_Host *); | 278 | const char* lpfc_info(struct Scsi_Host *); |
255 | int lpfc_scan_finished(struct Scsi_Host *, unsigned long); | 279 | int lpfc_scan_finished(struct Scsi_Host *, unsigned long); |
256 | 280 | ||
281 | int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); | ||
282 | int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); | ||
283 | int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); | ||
284 | int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); | ||
285 | int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); | ||
286 | |||
257 | void lpfc_get_cfgparam(struct lpfc_hba *); | 287 | void lpfc_get_cfgparam(struct lpfc_hba *); |
258 | void lpfc_get_vport_cfgparam(struct lpfc_vport *); | 288 | void lpfc_get_vport_cfgparam(struct lpfc_vport *); |
259 | int lpfc_alloc_sysfs_attr(struct lpfc_vport *); | 289 | int lpfc_alloc_sysfs_attr(struct lpfc_vport *); |
@@ -314,8 +344,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, | |||
314 | struct lpfc_iocbq *); | 344 | struct lpfc_iocbq *); |
315 | struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); | 345 | struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); |
316 | void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); | 346 | void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); |
347 | void lpfc_create_static_vport(struct lpfc_hba *); | ||
348 | void lpfc_stop_hba_timers(struct lpfc_hba *); | ||
349 | void lpfc_stop_port(struct lpfc_hba *); | ||
350 | void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); | ||
351 | int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | ||
352 | void lpfc_start_fdiscs(struct lpfc_hba *phba); | ||
317 | 353 | ||
318 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) | 354 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) |
319 | #define HBA_EVENT_RSCN 5 | 355 | #define HBA_EVENT_RSCN 5 |
320 | #define HBA_EVENT_LINK_UP 2 | 356 | #define HBA_EVENT_LINK_UP 2 |
321 | #define HBA_EVENT_LINK_DOWN 3 | 357 | #define HBA_EVENT_LINK_DOWN 3 |
358 | |||
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 4164b935ea9f..51990787796f 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -32,8 +32,10 @@ | |||
32 | #include <scsi/scsi_host.h> | 32 | #include <scsi/scsi_host.h> |
33 | #include <scsi/scsi_transport_fc.h> | 33 | #include <scsi/scsi_transport_fc.h> |
34 | 34 | ||
35 | #include "lpfc_hw4.h" | ||
35 | #include "lpfc_hw.h" | 36 | #include "lpfc_hw.h" |
36 | #include "lpfc_sli.h" | 37 | #include "lpfc_sli.h" |
38 | #include "lpfc_sli4.h" | ||
37 | #include "lpfc_nl.h" | 39 | #include "lpfc_nl.h" |
38 | #include "lpfc_disc.h" | 40 | #include "lpfc_disc.h" |
39 | #include "lpfc_scsi.h" | 41 | #include "lpfc_scsi.h" |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 5dd66925f4ca..42ef258c7d52 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c | |||
@@ -33,8 +33,10 @@ | |||
33 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
34 | #include <scsi/scsi_transport_fc.h> | 34 | #include <scsi/scsi_transport_fc.h> |
35 | 35 | ||
36 | #include "lpfc_hw4.h" | ||
36 | #include "lpfc_hw.h" | 37 | #include "lpfc_hw.h" |
37 | #include "lpfc_sli.h" | 38 | #include "lpfc_sli.h" |
39 | #include "lpfc_sli4.h" | ||
38 | #include "lpfc_nl.h" | 40 | #include "lpfc_nl.h" |
39 | #include "lpfc_disc.h" | 41 | #include "lpfc_disc.h" |
40 | #include "lpfc_scsi.h" | 42 | #include "lpfc_scsi.h" |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 8c5c3aea4a19..9fe36bf6fd14 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -28,8 +28,10 @@ | |||
28 | #include <scsi/scsi_host.h> | 28 | #include <scsi/scsi_host.h> |
29 | #include <scsi/scsi_transport_fc.h> | 29 | #include <scsi/scsi_transport_fc.h> |
30 | 30 | ||
31 | #include "lpfc_hw4.h" | ||
31 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 33 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | ||
33 | #include "lpfc_nl.h" | 35 | #include "lpfc_nl.h" |
34 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
35 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
@@ -220,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, | |||
220 | icmd->un.elsreq64.myID = vport->fc_myDID; | 222 | icmd->un.elsreq64.myID = vport->fc_myDID; |
221 | 223 | ||
222 | /* For ELS_REQUEST64_CR, use the VPI by default */ | 224 | /* For ELS_REQUEST64_CR, use the VPI by default */ |
223 | icmd->ulpContext = vport->vpi; | 225 | icmd->ulpContext = vport->vpi + phba->vpi_base; |
224 | icmd->ulpCt_h = 0; | 226 | icmd->ulpCt_h = 0; |
225 | /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ | 227 | /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ |
226 | if (elscmd == ELS_CMD_ECHO) | 228 | if (elscmd == ELS_CMD_ECHO) |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 25fc96c9081f..0fc66005d545 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -29,10 +29,12 @@ | |||
29 | #include <scsi/scsi_host.h> | 29 | #include <scsi/scsi_host.h> |
30 | #include <scsi/scsi_transport_fc.h> | 30 | #include <scsi/scsi_transport_fc.h> |
31 | 31 | ||
32 | #include "lpfc_hw4.h" | ||
32 | #include "lpfc_hw.h" | 33 | #include "lpfc_hw.h" |
33 | #include "lpfc_nl.h" | 34 | #include "lpfc_nl.h" |
34 | #include "lpfc_disc.h" | 35 | #include "lpfc_disc.h" |
35 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_sli4.h" | ||
36 | #include "lpfc_scsi.h" | 38 | #include "lpfc_scsi.h" |
37 | #include "lpfc.h" | 39 | #include "lpfc.h" |
38 | #include "lpfc_logmsg.h" | 40 | #include "lpfc_logmsg.h" |
@@ -491,6 +493,10 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
491 | phba->work_ha = 0; | 493 | phba->work_ha = 0; |
492 | spin_unlock_irq(&phba->hbalock); | 494 | spin_unlock_irq(&phba->hbalock); |
493 | 495 | ||
496 | /* First, try to post the next mailbox command to SLI4 device */ | ||
497 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) | ||
498 | lpfc_sli4_post_async_mbox(phba); | ||
499 | |||
494 | if (ha_copy & HA_ERATT) | 500 | if (ha_copy & HA_ERATT) |
495 | /* Handle the error attention event */ | 501 | /* Handle the error attention event */ |
496 | lpfc_handle_eratt(phba); | 502 | lpfc_handle_eratt(phba); |
@@ -501,9 +507,27 @@ lpfc_work_done(struct lpfc_hba *phba) | |||
501 | if (ha_copy & HA_LATT) | 507 | if (ha_copy & HA_LATT) |
502 | lpfc_handle_latt(phba); | 508 | lpfc_handle_latt(phba); |
503 | 509 | ||
510 | /* Process SLI4 events */ | ||
511 | if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { | ||
512 | if (phba->hba_flag & FCP_XRI_ABORT_EVENT) | ||
513 | lpfc_sli4_fcp_xri_abort_event_proc(phba); | ||
514 | if (phba->hba_flag & ELS_XRI_ABORT_EVENT) | ||
515 | lpfc_sli4_els_xri_abort_event_proc(phba); | ||
516 | if (phba->hba_flag & ASYNC_EVENT) | ||
517 | lpfc_sli4_async_event_proc(phba); | ||
518 | if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { | ||
519 | spin_lock_irq(&phba->hbalock); | ||
520 | phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; | ||
521 | spin_unlock_irq(&phba->hbalock); | ||
522 | lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); | ||
523 | } | ||
524 | if (phba->hba_flag & HBA_RECEIVE_BUFFER) | ||
525 | lpfc_sli4_handle_received_buffer(phba); | ||
526 | } | ||
527 | |||
504 | vports = lpfc_create_vport_work_array(phba); | 528 | vports = lpfc_create_vport_work_array(phba); |
505 | if (vports != NULL) | 529 | if (vports != NULL) |
506 | for(i = 0; i <= phba->max_vpi; i++) { | 530 | for (i = 0; i <= phba->max_vports; i++) { |
507 | /* | 531 | /* |
508 | * We could have no vports in array if unloading, so if | 532 | * We could have no vports in array if unloading, so if |
509 | * this happens then just use the pport | 533 | * this happens then just use the pport |
@@ -2556,7 +2580,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
2556 | * clear_la then don't send it. | 2580 | * clear_la then don't send it. |
2557 | */ | 2581 | */ |
2558 | if ((phba->link_state >= LPFC_CLEAR_LA) || | 2582 | if ((phba->link_state >= LPFC_CLEAR_LA) || |
2559 | (vport->port_type != LPFC_PHYSICAL_PORT)) | 2583 | (vport->port_type != LPFC_PHYSICAL_PORT) || |
2584 | (phba->sli_rev == LPFC_SLI_REV4)) | ||
2560 | return; | 2585 | return; |
2561 | 2586 | ||
2562 | /* Link up discovery */ | 2587 | /* Link up discovery */ |
@@ -2585,7 +2610,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) | |||
2585 | 2610 | ||
2586 | regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2611 | regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
2587 | if (regvpimbox) { | 2612 | if (regvpimbox) { |
2588 | lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); | 2613 | lpfc_reg_vpi(vport, regvpimbox); |
2589 | regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; | 2614 | regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; |
2590 | regvpimbox->vport = vport; | 2615 | regvpimbox->vport = vport; |
2591 | if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) | 2616 | if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) |
@@ -2645,7 +2670,8 @@ lpfc_disc_start(struct lpfc_vport *vport) | |||
2645 | */ | 2670 | */ |
2646 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && | 2671 | if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && |
2647 | !(vport->fc_flag & FC_PT2PT) && | 2672 | !(vport->fc_flag & FC_PT2PT) && |
2648 | !(vport->fc_flag & FC_RSCN_MODE)) { | 2673 | !(vport->fc_flag & FC_RSCN_MODE) && |
2674 | (phba->sli_rev < LPFC_SLI_REV4)) { | ||
2649 | lpfc_issue_reg_vpi(phba, vport); | 2675 | lpfc_issue_reg_vpi(phba, vport); |
2650 | return; | 2676 | return; |
2651 | } | 2677 | } |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 4168c7b498b8..a9d64cfbe5cc 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -471,6 +471,35 @@ struct serv_parm { /* Structure is in Big Endian format */ | |||
471 | }; | 471 | }; |
472 | 472 | ||
473 | /* | 473 | /* |
474 | * Virtual Fabric Tagging Header | ||
475 | */ | ||
476 | struct fc_vft_header { | ||
477 | uint32_t word0; | ||
478 | #define fc_vft_hdr_r_ctl_SHIFT 24 | ||
479 | #define fc_vft_hdr_r_ctl_MASK 0xFF | ||
480 | #define fc_vft_hdr_r_ctl_WORD word0 | ||
481 | #define fc_vft_hdr_ver_SHIFT 22 | ||
482 | #define fc_vft_hdr_ver_MASK 0x3 | ||
483 | #define fc_vft_hdr_ver_WORD word0 | ||
484 | #define fc_vft_hdr_type_SHIFT 18 | ||
485 | #define fc_vft_hdr_type_MASK 0xF | ||
486 | #define fc_vft_hdr_type_WORD word0 | ||
487 | #define fc_vft_hdr_e_SHIFT 16 | ||
488 | #define fc_vft_hdr_e_MASK 0x1 | ||
489 | #define fc_vft_hdr_e_WORD word0 | ||
490 | #define fc_vft_hdr_priority_SHIFT 13 | ||
491 | #define fc_vft_hdr_priority_MASK 0x7 | ||
492 | #define fc_vft_hdr_priority_WORD word0 | ||
493 | #define fc_vft_hdr_vf_id_SHIFT 1 | ||
494 | #define fc_vft_hdr_vf_id_MASK 0xFFF | ||
495 | #define fc_vft_hdr_vf_id_WORD word0 | ||
496 | uint32_t word1; | ||
497 | #define fc_vft_hdr_hopct_SHIFT 24 | ||
498 | #define fc_vft_hdr_hopct_MASK 0xFF | ||
499 | #define fc_vft_hdr_hopct_WORD word1 | ||
500 | }; | ||
501 | |||
502 | /* | ||
474 | * Extended Link Service LS_COMMAND codes (Payload Word 0) | 503 | * Extended Link Service LS_COMMAND codes (Payload Word 0) |
475 | */ | 504 | */ |
476 | #ifdef __BIG_ENDIAN_BITFIELD | 505 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -1152,6 +1181,9 @@ typedef struct { | |||
1152 | #define PCI_DEVICE_ID_HORNET 0xfe05 | 1181 | #define PCI_DEVICE_ID_HORNET 0xfe05 |
1153 | #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 | 1182 | #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 |
1154 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 | 1183 | #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 |
1184 | #define PCI_VENDOR_ID_SERVERENGINE 0x19a2 | ||
1185 | #define PCI_DEVICE_ID_TIGERSHARK 0x0704 | ||
1186 | #define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 | ||
1155 | 1187 | ||
1156 | #define JEDEC_ID_ADDRESS 0x0080001c | 1188 | #define JEDEC_ID_ADDRESS 0x0080001c |
1157 | #define FIREFLY_JEDEC_ID 0x1ACC | 1189 | #define FIREFLY_JEDEC_ID 0x1ACC |
@@ -1342,15 +1374,21 @@ typedef struct { /* FireFly BIU registers */ | |||
1342 | #define MBX_READ_LA64 0x95 | 1374 | #define MBX_READ_LA64 0x95 |
1343 | #define MBX_REG_VPI 0x96 | 1375 | #define MBX_REG_VPI 0x96 |
1344 | #define MBX_UNREG_VPI 0x97 | 1376 | #define MBX_UNREG_VPI 0x97 |
1345 | #define MBX_REG_VNPID 0x96 | ||
1346 | #define MBX_UNREG_VNPID 0x97 | ||
1347 | 1377 | ||
1348 | #define MBX_WRITE_WWN 0x98 | 1378 | #define MBX_WRITE_WWN 0x98 |
1349 | #define MBX_SET_DEBUG 0x99 | 1379 | #define MBX_SET_DEBUG 0x99 |
1350 | #define MBX_LOAD_EXP_ROM 0x9C | 1380 | #define MBX_LOAD_EXP_ROM 0x9C |
1351 | 1381 | #define MBX_SLI4_CONFIG 0x9B | |
1352 | #define MBX_MAX_CMDS 0x9D | 1382 | #define MBX_SLI4_REQ_FTRS 0x9D |
1383 | #define MBX_MAX_CMDS 0x9E | ||
1384 | #define MBX_RESUME_RPI 0x9E | ||
1353 | #define MBX_SLI2_CMD_MASK 0x80 | 1385 | #define MBX_SLI2_CMD_MASK 0x80 |
1386 | #define MBX_REG_VFI 0x9F | ||
1387 | #define MBX_REG_FCFI 0xA0 | ||
1388 | #define MBX_UNREG_VFI 0xA1 | ||
1389 | #define MBX_UNREG_FCFI 0xA2 | ||
1390 | #define MBX_INIT_VFI 0xA3 | ||
1391 | #define MBX_INIT_VPI 0xA4 | ||
1354 | 1392 | ||
1355 | /* IOCB Commands */ | 1393 | /* IOCB Commands */ |
1356 | 1394 | ||
@@ -1440,6 +1478,16 @@ typedef struct { /* FireFly BIU registers */ | |||
1440 | #define CMD_IOCB_LOGENTRY_CN 0x94 | 1478 | #define CMD_IOCB_LOGENTRY_CN 0x94 |
1441 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 | 1479 | #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 |
1442 | 1480 | ||
1481 | /* Unhandled Data Security SLI Commands */ | ||
1482 | #define DSSCMD_IWRITE64_CR 0xD8 | ||
1483 | #define DSSCMD_IWRITE64_CX 0xD9 | ||
1484 | #define DSSCMD_IREAD64_CR 0xDA | ||
1485 | #define DSSCMD_IREAD64_CX 0xDB | ||
1486 | #define DSSCMD_INVALIDATE_DEK 0xDC | ||
1487 | #define DSSCMD_SET_KEK 0xDD | ||
1488 | #define DSSCMD_GET_KEK_ID 0xDE | ||
1489 | #define DSSCMD_GEN_XFER 0xDF | ||
1490 | |||
1443 | #define CMD_MAX_IOCB_CMD 0xE6 | 1491 | #define CMD_MAX_IOCB_CMD 0xE6 |
1444 | #define CMD_IOCB_MASK 0xff | 1492 | #define CMD_IOCB_MASK 0xff |
1445 | 1493 | ||
@@ -1466,6 +1514,7 @@ typedef struct { /* FireFly BIU registers */ | |||
1466 | #define MBXERR_BAD_RCV_LENGTH 14 | 1514 | #define MBXERR_BAD_RCV_LENGTH 14 |
1467 | #define MBXERR_DMA_ERROR 15 | 1515 | #define MBXERR_DMA_ERROR 15 |
1468 | #define MBXERR_ERROR 16 | 1516 | #define MBXERR_ERROR 16 |
1517 | #define MBXERR_LINK_DOWN 0x33 | ||
1469 | #define MBX_NOT_FINISHED 255 | 1518 | #define MBX_NOT_FINISHED 255 |
1470 | 1519 | ||
1471 | #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ | 1520 | #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ |
@@ -1504,32 +1553,6 @@ struct ulp_bde { | |||
1504 | #endif | 1553 | #endif |
1505 | }; | 1554 | }; |
1506 | 1555 | ||
1507 | struct ulp_bde64 { /* SLI-2 */ | ||
1508 | union ULP_BDE_TUS { | ||
1509 | uint32_t w; | ||
1510 | struct { | ||
1511 | #ifdef __BIG_ENDIAN_BITFIELD | ||
1512 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED | ||
1513 | VALUE !! */ | ||
1514 | uint32_t bdeSize:24; /* Size of buffer (in bytes) */ | ||
1515 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
1516 | uint32_t bdeSize:24; /* Size of buffer (in bytes) */ | ||
1517 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED | ||
1518 | VALUE !! */ | ||
1519 | #endif | ||
1520 | #define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ | ||
1521 | #define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ | ||
1522 | #define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ | ||
1523 | #define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ | ||
1524 | #define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ | ||
1525 | #define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ | ||
1526 | #define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ | ||
1527 | } f; | ||
1528 | } tus; | ||
1529 | uint32_t addrLow; | ||
1530 | uint32_t addrHigh; | ||
1531 | }; | ||
1532 | |||
1533 | typedef struct ULP_BDL { /* SLI-2 */ | 1556 | typedef struct ULP_BDL { /* SLI-2 */ |
1534 | #ifdef __BIG_ENDIAN_BITFIELD | 1557 | #ifdef __BIG_ENDIAN_BITFIELD |
1535 | uint32_t bdeFlags:8; /* BDL Flags */ | 1558 | uint32_t bdeFlags:8; /* BDL Flags */ |
@@ -2287,7 +2310,7 @@ typedef struct { | |||
2287 | uint32_t rsvd3; | 2310 | uint32_t rsvd3; |
2288 | uint32_t rsvd4; | 2311 | uint32_t rsvd4; |
2289 | uint32_t rsvd5; | 2312 | uint32_t rsvd5; |
2290 | uint16_t rsvd6; | 2313 | uint16_t vfi; |
2291 | uint16_t vpi; | 2314 | uint16_t vpi; |
2292 | #else /* __LITTLE_ENDIAN */ | 2315 | #else /* __LITTLE_ENDIAN */ |
2293 | uint32_t rsvd1; | 2316 | uint32_t rsvd1; |
@@ -2297,7 +2320,7 @@ typedef struct { | |||
2297 | uint32_t rsvd4; | 2320 | uint32_t rsvd4; |
2298 | uint32_t rsvd5; | 2321 | uint32_t rsvd5; |
2299 | uint16_t vpi; | 2322 | uint16_t vpi; |
2300 | uint16_t rsvd6; | 2323 | uint16_t vfi; |
2301 | #endif | 2324 | #endif |
2302 | } REG_VPI_VAR; | 2325 | } REG_VPI_VAR; |
2303 | 2326 | ||
@@ -2457,7 +2480,7 @@ typedef struct { | |||
2457 | uint32_t entry_index:16; | 2480 | uint32_t entry_index:16; |
2458 | #endif | 2481 | #endif |
2459 | 2482 | ||
2460 | uint32_t rsvd1; | 2483 | uint32_t sli4_length; |
2461 | uint32_t word_cnt; | 2484 | uint32_t word_cnt; |
2462 | uint32_t resp_offset; | 2485 | uint32_t resp_offset; |
2463 | } DUMP_VAR; | 2486 | } DUMP_VAR; |
@@ -2470,9 +2493,32 @@ typedef struct { | |||
2470 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ | 2493 | #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ |
2471 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ | 2494 | #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ |
2472 | 2495 | ||
2496 | #define DMP_REGION_VPORT 0x16 /* VPort info region */ | ||
2497 | #define DMP_VPORT_REGION_SIZE 0x200 | ||
2498 | #define DMP_MBOX_OFFSET_WORD 0x5 | ||
2499 | |||
2500 | #define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ | ||
2501 | #define DMP_FCOEPARAM_RGN_SIZE 0x400 | ||
2502 | |||
2473 | #define WAKE_UP_PARMS_REGION_ID 4 | 2503 | #define WAKE_UP_PARMS_REGION_ID 4 |
2474 | #define WAKE_UP_PARMS_WORD_SIZE 15 | 2504 | #define WAKE_UP_PARMS_WORD_SIZE 15 |
2475 | 2505 | ||
2506 | struct vport_rec { | ||
2507 | uint8_t wwpn[8]; | ||
2508 | uint8_t wwnn[8]; | ||
2509 | }; | ||
2510 | |||
2511 | #define VPORT_INFO_SIG 0x32324752 | ||
2512 | #define VPORT_INFO_REV_MASK 0xff | ||
2513 | #define VPORT_INFO_REV 0x1 | ||
2514 | #define MAX_STATIC_VPORT_COUNT 16 | ||
2515 | struct static_vport_info { | ||
2516 | uint32_t signature; | ||
2517 | uint32_t rev; | ||
2518 | struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; | ||
2519 | uint32_t resvd[66]; | ||
2520 | }; | ||
2521 | |||
2476 | /* Option rom version structure */ | 2522 | /* Option rom version structure */ |
2477 | struct prog_id { | 2523 | struct prog_id { |
2478 | #ifdef __BIG_ENDIAN_BITFIELD | 2524 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -2697,7 +2743,9 @@ typedef struct { | |||
2697 | #endif | 2743 | #endif |
2698 | 2744 | ||
2699 | #ifdef __BIG_ENDIAN_BITFIELD | 2745 | #ifdef __BIG_ENDIAN_BITFIELD |
2700 | uint32_t rsvd1 : 23; /* Reserved */ | 2746 | uint32_t rsvd1 : 19; /* Reserved */ |
2747 | uint32_t cdss : 1; /* Configure Data Security SLI */ | ||
2748 | uint32_t rsvd2 : 3; /* Reserved */ | ||
2701 | uint32_t cbg : 1; /* Configure BlockGuard */ | 2749 | uint32_t cbg : 1; /* Configure BlockGuard */ |
2702 | uint32_t cmv : 1; /* Configure Max VPIs */ | 2750 | uint32_t cmv : 1; /* Configure Max VPIs */ |
2703 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 2751 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
@@ -2717,10 +2765,14 @@ typedef struct { | |||
2717 | uint32_t ccrp : 1; /* Config Command Ring Polling */ | 2765 | uint32_t ccrp : 1; /* Config Command Ring Polling */ |
2718 | uint32_t cmv : 1; /* Configure Max VPIs */ | 2766 | uint32_t cmv : 1; /* Configure Max VPIs */ |
2719 | uint32_t cbg : 1; /* Configure BlockGuard */ | 2767 | uint32_t cbg : 1; /* Configure BlockGuard */ |
2720 | uint32_t rsvd1 : 23; /* Reserved */ | 2768 | uint32_t rsvd2 : 3; /* Reserved */ |
2769 | uint32_t cdss : 1; /* Configure Data Security SLI */ | ||
2770 | uint32_t rsvd1 : 19; /* Reserved */ | ||
2721 | #endif | 2771 | #endif |
2722 | #ifdef __BIG_ENDIAN_BITFIELD | 2772 | #ifdef __BIG_ENDIAN_BITFIELD |
2723 | uint32_t rsvd2 : 23; /* Reserved */ | 2773 | uint32_t rsvd3 : 19; /* Reserved */ |
2774 | uint32_t gdss : 1; /* Configure Data Security SLI */ | ||
2775 | uint32_t rsvd4 : 3; /* Reserved */ | ||
2724 | uint32_t gbg : 1; /* Grant BlockGuard */ | 2776 | uint32_t gbg : 1; /* Grant BlockGuard */ |
2725 | uint32_t gmv : 1; /* Grant Max VPIs */ | 2777 | uint32_t gmv : 1; /* Grant Max VPIs */ |
2726 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ | 2778 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ |
@@ -2740,7 +2792,9 @@ typedef struct { | |||
2740 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ | 2792 | uint32_t gcrp : 1; /* Grant Command Ring Polling */ |
2741 | uint32_t gmv : 1; /* Grant Max VPIs */ | 2793 | uint32_t gmv : 1; /* Grant Max VPIs */ |
2742 | uint32_t gbg : 1; /* Grant BlockGuard */ | 2794 | uint32_t gbg : 1; /* Grant BlockGuard */ |
2743 | uint32_t rsvd2 : 23; /* Reserved */ | 2795 | uint32_t rsvd4 : 3; /* Reserved */ |
2796 | uint32_t gdss : 1; /* Configure Data Security SLI */ | ||
2797 | uint32_t rsvd3 : 19; /* Reserved */ | ||
2744 | #endif | 2798 | #endif |
2745 | 2799 | ||
2746 | #ifdef __BIG_ENDIAN_BITFIELD | 2800 | #ifdef __BIG_ENDIAN_BITFIELD |
@@ -2753,20 +2807,20 @@ typedef struct { | |||
2753 | 2807 | ||
2754 | #ifdef __BIG_ENDIAN_BITFIELD | 2808 | #ifdef __BIG_ENDIAN_BITFIELD |
2755 | uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ | 2809 | uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ |
2756 | uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ | 2810 | uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ |
2757 | #else /* __LITTLE_ENDIAN */ | 2811 | #else /* __LITTLE_ENDIAN */ |
2758 | uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ | 2812 | uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ |
2759 | uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ | 2813 | uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ |
2760 | #endif | 2814 | #endif |
2761 | 2815 | ||
2762 | uint32_t rsvd4; /* Reserved */ | 2816 | uint32_t rsvd6; /* Reserved */ |
2763 | 2817 | ||
2764 | #ifdef __BIG_ENDIAN_BITFIELD | 2818 | #ifdef __BIG_ENDIAN_BITFIELD |
2765 | uint32_t rsvd5 : 16; /* Reserved */ | 2819 | uint32_t rsvd7 : 16; /* Reserved */ |
2766 | uint32_t max_vpi : 16; /* Max number of virt N-Ports */ | 2820 | uint32_t max_vpi : 16; /* Max number of virt N-Ports */ |
2767 | #else /* __LITTLE_ENDIAN */ | 2821 | #else /* __LITTLE_ENDIAN */ |
2768 | uint32_t max_vpi : 16; /* Max number of virt N-Ports */ | 2822 | uint32_t max_vpi : 16; /* Max number of virt N-Ports */ |
2769 | uint32_t rsvd5 : 16; /* Reserved */ | 2823 | uint32_t rsvd7 : 16; /* Reserved */ |
2770 | #endif | 2824 | #endif |
2771 | 2825 | ||
2772 | } CONFIG_PORT_VAR; | 2826 | } CONFIG_PORT_VAR; |
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp) | |||
3666 | #define MENLO_TIMEOUT 30 | 3720 | #define MENLO_TIMEOUT 30 |
3667 | #define SETVAR_MLOMNT 0x103107 | 3721 | #define SETVAR_MLOMNT 0x103107 |
3668 | #define SETVAR_MLORST 0x103007 | 3722 | #define SETVAR_MLORST 0x103007 |
3723 | |||
3724 | #define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ | ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h new file mode 100644 index 000000000000..39c34b3ad29d --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -0,0 +1,2141 @@ | |||
1 | /******************************************************************* | ||
2 | * This file is part of the Emulex Linux Device Driver for * | ||
3 | * Fibre Channel Host Bus Adapters. * | ||
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | ||
5 | * EMULEX and SLI are trademarks of Emulex. * | ||
6 | * www.emulex.com * | ||
7 | * * | ||
8 | * This program is free software; you can redistribute it and/or * | ||
9 | * modify it under the terms of version 2 of the GNU General * | ||
10 | * Public License as published by the Free Software Foundation. * | ||
11 | * This program is distributed in the hope that it will be useful. * | ||
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
17 | * more details, a copy of which can be found in the file COPYING * | ||
18 | * included with this package. * | ||
19 | *******************************************************************/ | ||
20 | |||
21 | /* Macros to deal with bit fields. Each bit field must have 3 #defines | ||
22 | * associated with it (_SHIFT, _MASK, and _WORD). | ||
23 | * EG. For a bit field that is in the 7th bit of the "field4" field of a | ||
24 | * structure and is 2 bits in size the following #defines must exist: | ||
25 | * struct temp { | ||
26 | * uint32_t field1; | ||
27 | * uint32_t field2; | ||
28 | * uint32_t field3; | ||
29 | * uint32_t field4; | ||
30 | * #define example_bit_field_SHIFT 7 | ||
31 | * #define example_bit_field_MASK 0x03 | ||
32 | * #define example_bit_field_WORD field4 | ||
33 | * uint32_t field5; | ||
34 | * }; | ||
35 | * Then the macros below may be used to get or set the value of that field. | ||
36 | * EG. To get the value of the bit field from the above example: | ||
37 | * struct temp t1; | ||
38 | * value = bf_get(example_bit_field, &t1); | ||
39 | * And then to set that bit field: | ||
40 | * bf_set(example_bit_field, &t1, 2); | ||
41 | * Or clear that bit field: | ||
42 | * bf_set(example_bit_field, &t1, 0); | ||
43 | */ | ||
44 | #define bf_get(name, ptr) \ | ||
45 | (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) | ||
46 | #define bf_set(name, ptr, value) \ | ||
47 | ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ | ||
48 | ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) | ||
49 | |||
50 | struct dma_address { | ||
51 | uint32_t addr_lo; | ||
52 | uint32_t addr_hi; | ||
53 | }; | ||
54 | |||
55 | #define LPFC_SLI4_BAR0 1 | ||
56 | #define LPFC_SLI4_BAR1 2 | ||
57 | #define LPFC_SLI4_BAR2 4 | ||
58 | |||
59 | #define LPFC_SLI4_MBX_EMBED true | ||
60 | #define LPFC_SLI4_MBX_NEMBED false | ||
61 | |||
62 | #define LPFC_SLI4_MB_WORD_COUNT 64 | ||
63 | #define LPFC_MAX_MQ_PAGE 8 | ||
64 | #define LPFC_MAX_WQ_PAGE 8 | ||
65 | #define LPFC_MAX_CQ_PAGE 4 | ||
66 | #define LPFC_MAX_EQ_PAGE 8 | ||
67 | |||
68 | #define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ | ||
69 | #define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ | ||
70 | #define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ | ||
71 | |||
72 | /* Define SLI4 Alignment requirements. */ | ||
73 | #define LPFC_ALIGN_16_BYTE 16 | ||
74 | #define LPFC_ALIGN_64_BYTE 64 | ||
75 | |||
76 | /* Define SLI4 specific definitions. */ | ||
77 | #define LPFC_MQ_CQE_BYTE_OFFSET 256 | ||
78 | #define LPFC_MBX_CMD_HDR_LENGTH 16 | ||
79 | #define LPFC_MBX_ERROR_RANGE 0x4000 | ||
80 | #define LPFC_BMBX_BIT1_ADDR_HI 0x2 | ||
81 | #define LPFC_BMBX_BIT1_ADDR_LO 0 | ||
82 | #define LPFC_RPI_HDR_COUNT 64 | ||
83 | #define LPFC_HDR_TEMPLATE_SIZE 4096 | ||
84 | #define LPFC_RPI_ALLOC_ERROR 0xFFFF | ||
85 | #define LPFC_FCF_RECORD_WD_CNT 132 | ||
86 | #define LPFC_ENTIRE_FCF_DATABASE 0 | ||
87 | #define LPFC_DFLT_FCF_INDEX 0 | ||
88 | |||
89 | /* Virtual function numbers */ | ||
90 | #define LPFC_VF0 0 | ||
91 | #define LPFC_VF1 1 | ||
92 | #define LPFC_VF2 2 | ||
93 | #define LPFC_VF3 3 | ||
94 | #define LPFC_VF4 4 | ||
95 | #define LPFC_VF5 5 | ||
96 | #define LPFC_VF6 6 | ||
97 | #define LPFC_VF7 7 | ||
98 | #define LPFC_VF8 8 | ||
99 | #define LPFC_VF9 9 | ||
100 | #define LPFC_VF10 10 | ||
101 | #define LPFC_VF11 11 | ||
102 | #define LPFC_VF12 12 | ||
103 | #define LPFC_VF13 13 | ||
104 | #define LPFC_VF14 14 | ||
105 | #define LPFC_VF15 15 | ||
106 | #define LPFC_VF16 16 | ||
107 | #define LPFC_VF17 17 | ||
108 | #define LPFC_VF18 18 | ||
109 | #define LPFC_VF19 19 | ||
110 | #define LPFC_VF20 20 | ||
111 | #define LPFC_VF21 21 | ||
112 | #define LPFC_VF22 22 | ||
113 | #define LPFC_VF23 23 | ||
114 | #define LPFC_VF24 24 | ||
115 | #define LPFC_VF25 25 | ||
116 | #define LPFC_VF26 26 | ||
117 | #define LPFC_VF27 27 | ||
118 | #define LPFC_VF28 28 | ||
119 | #define LPFC_VF29 29 | ||
120 | #define LPFC_VF30 30 | ||
121 | #define LPFC_VF31 31 | ||
122 | |||
123 | /* PCI function numbers */ | ||
124 | #define LPFC_PCI_FUNC0 0 | ||
125 | #define LPFC_PCI_FUNC1 1 | ||
126 | #define LPFC_PCI_FUNC2 2 | ||
127 | #define LPFC_PCI_FUNC3 3 | ||
128 | #define LPFC_PCI_FUNC4 4 | ||
129 | |||
130 | /* Active interrupt test count */ | ||
131 | #define LPFC_ACT_INTR_CNT 4 | ||
132 | |||
133 | /* Delay Multiplier constant */ | ||
134 | #define LPFC_DMULT_CONST 651042 | ||
135 | #define LPFC_MIM_IMAX 636 | ||
136 | #define LPFC_FP_DEF_IMAX 10000 | ||
137 | #define LPFC_SP_DEF_IMAX 10000 | ||
138 | |||
139 | struct ulp_bde64 { | ||
140 | union ULP_BDE_TUS { | ||
141 | uint32_t w; | ||
142 | struct { | ||
143 | #ifdef __BIG_ENDIAN_BITFIELD | ||
144 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED | ||
145 | VALUE !! */ | ||
146 | uint32_t bdeSize:24; /* Size of buffer (in bytes) */ | ||
147 | #else /* __LITTLE_ENDIAN_BITFIELD */ | ||
148 | uint32_t bdeSize:24; /* Size of buffer (in bytes) */ | ||
149 | uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED | ||
150 | VALUE !! */ | ||
151 | #endif | ||
152 | #define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ | ||
153 | #define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ | ||
154 | #define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ | ||
155 | #define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ | ||
156 | #define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ | ||
157 | #define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ | ||
158 | #define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ | ||
159 | } f; | ||
160 | } tus; | ||
161 | uint32_t addrLow; | ||
162 | uint32_t addrHigh; | ||
163 | }; | ||
164 | |||
165 | struct lpfc_sli4_flags { | ||
166 | uint32_t word0; | ||
167 | #define lpfc_fip_flag_SHIFT 0 | ||
168 | #define lpfc_fip_flag_MASK 0x00000001 | ||
169 | #define lpfc_fip_flag_WORD word0 | ||
170 | }; | ||
171 | |||
172 | /* event queue entry structure */ | ||
173 | struct lpfc_eqe { | ||
174 | uint32_t word0; | ||
175 | #define lpfc_eqe_resource_id_SHIFT 16 | ||
176 | #define lpfc_eqe_resource_id_MASK 0x000000FF | ||
177 | #define lpfc_eqe_resource_id_WORD word0 | ||
178 | #define lpfc_eqe_minor_code_SHIFT 4 | ||
179 | #define lpfc_eqe_minor_code_MASK 0x00000FFF | ||
180 | #define lpfc_eqe_minor_code_WORD word0 | ||
181 | #define lpfc_eqe_major_code_SHIFT 1 | ||
182 | #define lpfc_eqe_major_code_MASK 0x00000007 | ||
183 | #define lpfc_eqe_major_code_WORD word0 | ||
184 | #define lpfc_eqe_valid_SHIFT 0 | ||
185 | #define lpfc_eqe_valid_MASK 0x00000001 | ||
186 | #define lpfc_eqe_valid_WORD word0 | ||
187 | }; | ||
188 | |||
189 | /* completion queue entry structure (common fields for all cqe types) */ | ||
190 | struct lpfc_cqe { | ||
191 | uint32_t reserved0; | ||
192 | uint32_t reserved1; | ||
193 | uint32_t reserved2; | ||
194 | uint32_t word3; | ||
195 | #define lpfc_cqe_valid_SHIFT 31 | ||
196 | #define lpfc_cqe_valid_MASK 0x00000001 | ||
197 | #define lpfc_cqe_valid_WORD word3 | ||
198 | #define lpfc_cqe_code_SHIFT 16 | ||
199 | #define lpfc_cqe_code_MASK 0x000000FF | ||
200 | #define lpfc_cqe_code_WORD word3 | ||
201 | }; | ||
202 | |||
203 | /* Completion Queue Entry Status Codes */ | ||
204 | #define CQE_STATUS_SUCCESS 0x0 | ||
205 | #define CQE_STATUS_FCP_RSP_FAILURE 0x1 | ||
206 | #define CQE_STATUS_REMOTE_STOP 0x2 | ||
207 | #define CQE_STATUS_LOCAL_REJECT 0x3 | ||
208 | #define CQE_STATUS_NPORT_RJT 0x4 | ||
209 | #define CQE_STATUS_FABRIC_RJT 0x5 | ||
210 | #define CQE_STATUS_NPORT_BSY 0x6 | ||
211 | #define CQE_STATUS_FABRIC_BSY 0x7 | ||
212 | #define CQE_STATUS_INTERMED_RSP 0x8 | ||
213 | #define CQE_STATUS_LS_RJT 0x9 | ||
214 | #define CQE_STATUS_CMD_REJECT 0xb | ||
215 | #define CQE_STATUS_FCP_TGT_LENCHECK 0xc | ||
216 | #define CQE_STATUS_NEED_BUFF_ENTRY 0xf | ||
217 | |||
218 | /* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ | ||
219 | #define CQE_HW_STATUS_NO_ERR 0x0 | ||
220 | #define CQE_HW_STATUS_UNDERRUN 0x1 | ||
221 | #define CQE_HW_STATUS_OVERRUN 0x2 | ||
222 | |||
223 | /* Completion Queue Entry Codes */ | ||
224 | #define CQE_CODE_COMPL_WQE 0x1 | ||
225 | #define CQE_CODE_RELEASE_WQE 0x2 | ||
226 | #define CQE_CODE_RECEIVE 0x4 | ||
227 | #define CQE_CODE_XRI_ABORTED 0x5 | ||
228 | |||
229 | /* completion queue entry for wqe completions */ | ||
230 | struct lpfc_wcqe_complete { | ||
231 | uint32_t word0; | ||
232 | #define lpfc_wcqe_c_request_tag_SHIFT 16 | ||
233 | #define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF | ||
234 | #define lpfc_wcqe_c_request_tag_WORD word0 | ||
235 | #define lpfc_wcqe_c_status_SHIFT 8 | ||
236 | #define lpfc_wcqe_c_status_MASK 0x000000FF | ||
237 | #define lpfc_wcqe_c_status_WORD word0 | ||
238 | #define lpfc_wcqe_c_hw_status_SHIFT 0 | ||
239 | #define lpfc_wcqe_c_hw_status_MASK 0x000000FF | ||
240 | #define lpfc_wcqe_c_hw_status_WORD word0 | ||
241 | uint32_t total_data_placed; | ||
242 | uint32_t parameter; | ||
243 | uint32_t word3; | ||
244 | #define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT | ||
245 | #define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK | ||
246 | #define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD | ||
247 | #define lpfc_wcqe_c_xb_SHIFT 28 | ||
248 | #define lpfc_wcqe_c_xb_MASK 0x00000001 | ||
249 | #define lpfc_wcqe_c_xb_WORD word3 | ||
250 | #define lpfc_wcqe_c_pv_SHIFT 27 | ||
251 | #define lpfc_wcqe_c_pv_MASK 0x00000001 | ||
252 | #define lpfc_wcqe_c_pv_WORD word3 | ||
253 | #define lpfc_wcqe_c_priority_SHIFT 24 | ||
254 | #define lpfc_wcqe_c_priority_MASK 0x00000007 | ||
255 | #define lpfc_wcqe_c_priority_WORD word3 | ||
256 | #define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT | ||
257 | #define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK | ||
258 | #define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD | ||
259 | }; | ||
260 | |||
261 | /* completion queue entry for wqe release */ | ||
262 | struct lpfc_wcqe_release { | ||
263 | uint32_t reserved0; | ||
264 | uint32_t reserved1; | ||
265 | uint32_t word2; | ||
266 | #define lpfc_wcqe_r_wq_id_SHIFT 16 | ||
267 | #define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF | ||
268 | #define lpfc_wcqe_r_wq_id_WORD word2 | ||
269 | #define lpfc_wcqe_r_wqe_index_SHIFT 0 | ||
270 | #define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF | ||
271 | #define lpfc_wcqe_r_wqe_index_WORD word2 | ||
272 | uint32_t word3; | ||
273 | #define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT | ||
274 | #define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK | ||
275 | #define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD | ||
276 | #define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT | ||
277 | #define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK | ||
278 | #define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD | ||
279 | }; | ||
280 | |||
281 | struct sli4_wcqe_xri_aborted { | ||
282 | uint32_t word0; | ||
283 | #define lpfc_wcqe_xa_status_SHIFT 8 | ||
284 | #define lpfc_wcqe_xa_status_MASK 0x000000FF | ||
285 | #define lpfc_wcqe_xa_status_WORD word0 | ||
286 | uint32_t parameter; | ||
287 | uint32_t word2; | ||
288 | #define lpfc_wcqe_xa_remote_xid_SHIFT 16 | ||
289 | #define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF | ||
290 | #define lpfc_wcqe_xa_remote_xid_WORD word2 | ||
291 | #define lpfc_wcqe_xa_xri_SHIFT 0 | ||
292 | #define lpfc_wcqe_xa_xri_MASK 0x0000FFFF | ||
293 | #define lpfc_wcqe_xa_xri_WORD word2 | ||
294 | uint32_t word3; | ||
295 | #define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT | ||
296 | #define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK | ||
297 | #define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD | ||
298 | #define lpfc_wcqe_xa_ia_SHIFT 30 | ||
299 | #define lpfc_wcqe_xa_ia_MASK 0x00000001 | ||
300 | #define lpfc_wcqe_xa_ia_WORD word3 | ||
301 | #define CQE_XRI_ABORTED_IA_REMOTE 0 | ||
302 | #define CQE_XRI_ABORTED_IA_LOCAL 1 | ||
303 | #define lpfc_wcqe_xa_br_SHIFT 29 | ||
304 | #define lpfc_wcqe_xa_br_MASK 0x00000001 | ||
305 | #define lpfc_wcqe_xa_br_WORD word3 | ||
306 | #define CQE_XRI_ABORTED_BR_BA_ACC 0 | ||
307 | #define CQE_XRI_ABORTED_BR_BA_RJT 1 | ||
308 | #define lpfc_wcqe_xa_eo_SHIFT 28 | ||
309 | #define lpfc_wcqe_xa_eo_MASK 0x00000001 | ||
310 | #define lpfc_wcqe_xa_eo_WORD word3 | ||
311 | #define CQE_XRI_ABORTED_EO_REMOTE 0 | ||
312 | #define CQE_XRI_ABORTED_EO_LOCAL 1 | ||
313 | #define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT | ||
314 | #define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK | ||
315 | #define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD | ||
316 | }; | ||
317 | |||
318 | /* completion queue entry structure for rqe completion */ | ||
319 | struct lpfc_rcqe { | ||
320 | uint32_t word0; | ||
321 | #define lpfc_rcqe_bindex_SHIFT 16 | ||
322 | #define lpfc_rcqe_bindex_MASK 0x0000FFF | ||
323 | #define lpfc_rcqe_bindex_WORD word0 | ||
324 | #define lpfc_rcqe_status_SHIFT 8 | ||
325 | #define lpfc_rcqe_status_MASK 0x000000FF | ||
326 | #define lpfc_rcqe_status_WORD word0 | ||
327 | #define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ | ||
328 | #define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ | ||
329 | #define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ | ||
330 | #define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ | ||
331 | uint32_t reserved1; | ||
332 | uint32_t word2; | ||
333 | #define lpfc_rcqe_length_SHIFT 16 | ||
334 | #define lpfc_rcqe_length_MASK 0x0000FFFF | ||
335 | #define lpfc_rcqe_length_WORD word2 | ||
336 | #define lpfc_rcqe_rq_id_SHIFT 6 | ||
337 | #define lpfc_rcqe_rq_id_MASK 0x000003FF | ||
338 | #define lpfc_rcqe_rq_id_WORD word2 | ||
339 | #define lpfc_rcqe_fcf_id_SHIFT 0 | ||
340 | #define lpfc_rcqe_fcf_id_MASK 0x0000003F | ||
341 | #define lpfc_rcqe_fcf_id_WORD word2 | ||
342 | uint32_t word3; | ||
343 | #define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT | ||
344 | #define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK | ||
345 | #define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD | ||
346 | #define lpfc_rcqe_port_SHIFT 30 | ||
347 | #define lpfc_rcqe_port_MASK 0x00000001 | ||
348 | #define lpfc_rcqe_port_WORD word3 | ||
349 | #define lpfc_rcqe_hdr_length_SHIFT 24 | ||
350 | #define lpfc_rcqe_hdr_length_MASK 0x0000001F | ||
351 | #define lpfc_rcqe_hdr_length_WORD word3 | ||
352 | #define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT | ||
353 | #define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK | ||
354 | #define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD | ||
355 | #define lpfc_rcqe_eof_SHIFT 8 | ||
356 | #define lpfc_rcqe_eof_MASK 0x000000FF | ||
357 | #define lpfc_rcqe_eof_WORD word3 | ||
358 | #define FCOE_EOFn 0x41 | ||
359 | #define FCOE_EOFt 0x42 | ||
360 | #define FCOE_EOFni 0x49 | ||
361 | #define FCOE_EOFa 0x50 | ||
362 | #define lpfc_rcqe_sof_SHIFT 0 | ||
363 | #define lpfc_rcqe_sof_MASK 0x000000FF | ||
364 | #define lpfc_rcqe_sof_WORD word3 | ||
365 | #define FCOE_SOFi2 0x2d | ||
366 | #define FCOE_SOFi3 0x2e | ||
367 | #define FCOE_SOFn2 0x35 | ||
368 | #define FCOE_SOFn3 0x36 | ||
369 | }; | ||
370 | |||
371 | struct lpfc_wqe_generic{ | ||
372 | struct ulp_bde64 bde; | ||
373 | uint32_t word3; | ||
374 | uint32_t word4; | ||
375 | uint32_t word5; | ||
376 | uint32_t word6; | ||
377 | #define lpfc_wqe_gen_context_SHIFT 16 | ||
378 | #define lpfc_wqe_gen_context_MASK 0x0000FFFF | ||
379 | #define lpfc_wqe_gen_context_WORD word6 | ||
380 | #define lpfc_wqe_gen_xri_SHIFT 0 | ||
381 | #define lpfc_wqe_gen_xri_MASK 0x0000FFFF | ||
382 | #define lpfc_wqe_gen_xri_WORD word6 | ||
383 | uint32_t word7; | ||
384 | #define lpfc_wqe_gen_lnk_SHIFT 23 | ||
385 | #define lpfc_wqe_gen_lnk_MASK 0x00000001 | ||
386 | #define lpfc_wqe_gen_lnk_WORD word7 | ||
387 | #define lpfc_wqe_gen_erp_SHIFT 22 | ||
388 | #define lpfc_wqe_gen_erp_MASK 0x00000001 | ||
389 | #define lpfc_wqe_gen_erp_WORD word7 | ||
390 | #define lpfc_wqe_gen_pu_SHIFT 20 | ||
391 | #define lpfc_wqe_gen_pu_MASK 0x00000003 | ||
392 | #define lpfc_wqe_gen_pu_WORD word7 | ||
393 | #define lpfc_wqe_gen_class_SHIFT 16 | ||
394 | #define lpfc_wqe_gen_class_MASK 0x00000007 | ||
395 | #define lpfc_wqe_gen_class_WORD word7 | ||
396 | #define lpfc_wqe_gen_command_SHIFT 8 | ||
397 | #define lpfc_wqe_gen_command_MASK 0x000000FF | ||
398 | #define lpfc_wqe_gen_command_WORD word7 | ||
399 | #define lpfc_wqe_gen_status_SHIFT 4 | ||
400 | #define lpfc_wqe_gen_status_MASK 0x0000000F | ||
401 | #define lpfc_wqe_gen_status_WORD word7 | ||
402 | #define lpfc_wqe_gen_ct_SHIFT 2 | ||
403 | #define lpfc_wqe_gen_ct_MASK 0x00000007 | ||
404 | #define lpfc_wqe_gen_ct_WORD word7 | ||
405 | uint32_t abort_tag; | ||
406 | uint32_t word9; | ||
407 | #define lpfc_wqe_gen_request_tag_SHIFT 0 | ||
408 | #define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF | ||
409 | #define lpfc_wqe_gen_request_tag_WORD word9 | ||
410 | uint32_t word10; | ||
411 | #define lpfc_wqe_gen_ccp_SHIFT 24 | ||
412 | #define lpfc_wqe_gen_ccp_MASK 0x000000FF | ||
413 | #define lpfc_wqe_gen_ccp_WORD word10 | ||
414 | #define lpfc_wqe_gen_ccpe_SHIFT 23 | ||
415 | #define lpfc_wqe_gen_ccpe_MASK 0x00000001 | ||
416 | #define lpfc_wqe_gen_ccpe_WORD word10 | ||
417 | #define lpfc_wqe_gen_pv_SHIFT 19 | ||
418 | #define lpfc_wqe_gen_pv_MASK 0x00000001 | ||
419 | #define lpfc_wqe_gen_pv_WORD word10 | ||
420 | #define lpfc_wqe_gen_pri_SHIFT 16 | ||
421 | #define lpfc_wqe_gen_pri_MASK 0x00000007 | ||
422 | #define lpfc_wqe_gen_pri_WORD word10 | ||
423 | uint32_t word11; | ||
424 | #define lpfc_wqe_gen_cq_id_SHIFT 16 | ||
425 | #define lpfc_wqe_gen_cq_id_MASK 0x000003FF | ||
426 | #define lpfc_wqe_gen_cq_id_WORD word11 | ||
427 | #define LPFC_WQE_CQ_ID_DEFAULT 0x3ff | ||
428 | #define lpfc_wqe_gen_wqec_SHIFT 7 | ||
429 | #define lpfc_wqe_gen_wqec_MASK 0x00000001 | ||
430 | #define lpfc_wqe_gen_wqec_WORD word11 | ||
431 | #define lpfc_wqe_gen_cmd_type_SHIFT 0 | ||
432 | #define lpfc_wqe_gen_cmd_type_MASK 0x0000000F | ||
433 | #define lpfc_wqe_gen_cmd_type_WORD word11 | ||
434 | uint32_t payload[4]; | ||
435 | }; | ||
436 | |||
437 | struct lpfc_rqe { | ||
438 | uint32_t address_hi; | ||
439 | uint32_t address_lo; | ||
440 | }; | ||
441 | |||
442 | /* buffer descriptors */ | ||
443 | struct lpfc_bde4 { | ||
444 | uint32_t addr_hi; | ||
445 | uint32_t addr_lo; | ||
446 | uint32_t word2; | ||
447 | #define lpfc_bde4_last_SHIFT 31 | ||
448 | #define lpfc_bde4_last_MASK 0x00000001 | ||
449 | #define lpfc_bde4_last_WORD word2 | ||
450 | #define lpfc_bde4_sge_offset_SHIFT 0 | ||
451 | #define lpfc_bde4_sge_offset_MASK 0x000003FF | ||
452 | #define lpfc_bde4_sge_offset_WORD word2 | ||
453 | uint32_t word3; | ||
454 | #define lpfc_bde4_length_SHIFT 0 | ||
455 | #define lpfc_bde4_length_MASK 0x000000FF | ||
456 | #define lpfc_bde4_length_WORD word3 | ||
457 | }; | ||
458 | |||
459 | struct lpfc_register { | ||
460 | uint32_t word0; | ||
461 | }; | ||
462 | |||
463 | #define LPFC_UERR_STATUS_HI 0x00A4 | ||
464 | #define LPFC_UERR_STATUS_LO 0x00A0 | ||
465 | #define LPFC_ONLINE0 0x00B0 | ||
466 | #define LPFC_ONLINE1 0x00B4 | ||
467 | #define LPFC_SCRATCHPAD 0x0058 | ||
468 | |||
469 | /* BAR0 Registers */ | ||
470 | #define LPFC_HST_STATE 0x00AC | ||
471 | #define lpfc_hst_state_perr_SHIFT 31 | ||
472 | #define lpfc_hst_state_perr_MASK 0x1 | ||
473 | #define lpfc_hst_state_perr_WORD word0 | ||
474 | #define lpfc_hst_state_sfi_SHIFT 30 | ||
475 | #define lpfc_hst_state_sfi_MASK 0x1 | ||
476 | #define lpfc_hst_state_sfi_WORD word0 | ||
477 | #define lpfc_hst_state_nip_SHIFT 29 | ||
478 | #define lpfc_hst_state_nip_MASK 0x1 | ||
479 | #define lpfc_hst_state_nip_WORD word0 | ||
480 | #define lpfc_hst_state_ipc_SHIFT 28 | ||
481 | #define lpfc_hst_state_ipc_MASK 0x1 | ||
482 | #define lpfc_hst_state_ipc_WORD word0 | ||
483 | #define lpfc_hst_state_xrom_SHIFT 27 | ||
484 | #define lpfc_hst_state_xrom_MASK 0x1 | ||
485 | #define lpfc_hst_state_xrom_WORD word0 | ||
486 | #define lpfc_hst_state_dl_SHIFT 26 | ||
487 | #define lpfc_hst_state_dl_MASK 0x1 | ||
488 | #define lpfc_hst_state_dl_WORD word0 | ||
489 | #define lpfc_hst_state_port_status_SHIFT 0 | ||
490 | #define lpfc_hst_state_port_status_MASK 0xFFFF | ||
491 | #define lpfc_hst_state_port_status_WORD word0 | ||
492 | |||
493 | #define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 | ||
494 | #define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 | ||
495 | #define LPFC_POST_STAGE_HOST_RDY 0x0002 | ||
496 | #define LPFC_POST_STAGE_BE_RESET 0x0003 | ||
497 | #define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 | ||
498 | #define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 | ||
499 | #define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 | ||
500 | #define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 | ||
501 | #define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 | ||
502 | #define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 | ||
503 | #define LPFC_POST_STAGE_DDR_TEST_START 0x0400 | ||
504 | #define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 | ||
505 | #define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 | ||
506 | #define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 | ||
507 | #define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 | ||
508 | #define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 | ||
509 | #define LPFC_POST_STAGE_ARMFW_START 0x0800 | ||
510 | #define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 | ||
511 | #define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 | ||
512 | #define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 | ||
513 | #define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 | ||
514 | #define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 | ||
515 | #define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 | ||
516 | #define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 | ||
517 | #define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 | ||
518 | #define LPFC_POST_STAGE_PARSE_XML 0x0B04 | ||
519 | #define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 | ||
520 | #define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 | ||
521 | #define LPFC_POST_STAGE_RC_DONE 0x0B07 | ||
522 | #define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 | ||
523 | #define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 | ||
524 | #define LPFC_POST_STAGE_ARMFW_READY 0xC000 | ||
525 | #define LPFC_POST_STAGE_ARMFW_UE 0xF000 | ||
526 | |||
527 | #define lpfc_scratchpad_slirev_SHIFT 4 | ||
528 | #define lpfc_scratchpad_slirev_MASK 0xF | ||
529 | #define lpfc_scratchpad_slirev_WORD word0 | ||
530 | #define lpfc_scratchpad_chiptype_SHIFT 8 | ||
531 | #define lpfc_scratchpad_chiptype_MASK 0xFF | ||
532 | #define lpfc_scratchpad_chiptype_WORD word0 | ||
533 | #define lpfc_scratchpad_featurelevel1_SHIFT 16 | ||
534 | #define lpfc_scratchpad_featurelevel1_MASK 0xFF | ||
535 | #define lpfc_scratchpad_featurelevel1_WORD word0 | ||
536 | #define lpfc_scratchpad_featurelevel2_SHIFT 24 | ||
537 | #define lpfc_scratchpad_featurelevel2_MASK 0xFF | ||
538 | #define lpfc_scratchpad_featurelevel2_WORD word0 | ||
539 | |||
540 | /* BAR1 Registers */ | ||
541 | #define LPFC_IMR_MASK_ALL 0xFFFFFFFF | ||
542 | #define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF | ||
543 | |||
544 | #define LPFC_HST_ISR0 0x0C18 | ||
545 | #define LPFC_HST_ISR1 0x0C1C | ||
546 | #define LPFC_HST_ISR2 0x0C20 | ||
547 | #define LPFC_HST_ISR3 0x0C24 | ||
548 | #define LPFC_HST_ISR4 0x0C28 | ||
549 | |||
550 | #define LPFC_HST_IMR0 0x0C48 | ||
551 | #define LPFC_HST_IMR1 0x0C4C | ||
552 | #define LPFC_HST_IMR2 0x0C50 | ||
553 | #define LPFC_HST_IMR3 0x0C54 | ||
554 | #define LPFC_HST_IMR4 0x0C58 | ||
555 | |||
556 | #define LPFC_HST_ISCR0 0x0C78 | ||
557 | #define LPFC_HST_ISCR1 0x0C7C | ||
558 | #define LPFC_HST_ISCR2 0x0C80 | ||
559 | #define LPFC_HST_ISCR3 0x0C84 | ||
560 | #define LPFC_HST_ISCR4 0x0C88 | ||
561 | |||
562 | #define LPFC_SLI4_INTR0 BIT0 | ||
563 | #define LPFC_SLI4_INTR1 BIT1 | ||
564 | #define LPFC_SLI4_INTR2 BIT2 | ||
565 | #define LPFC_SLI4_INTR3 BIT3 | ||
566 | #define LPFC_SLI4_INTR4 BIT4 | ||
567 | #define LPFC_SLI4_INTR5 BIT5 | ||
568 | #define LPFC_SLI4_INTR6 BIT6 | ||
569 | #define LPFC_SLI4_INTR7 BIT7 | ||
570 | #define LPFC_SLI4_INTR8 BIT8 | ||
571 | #define LPFC_SLI4_INTR9 BIT9 | ||
572 | #define LPFC_SLI4_INTR10 BIT10 | ||
573 | #define LPFC_SLI4_INTR11 BIT11 | ||
574 | #define LPFC_SLI4_INTR12 BIT12 | ||
575 | #define LPFC_SLI4_INTR13 BIT13 | ||
576 | #define LPFC_SLI4_INTR14 BIT14 | ||
577 | #define LPFC_SLI4_INTR15 BIT15 | ||
578 | #define LPFC_SLI4_INTR16 BIT16 | ||
579 | #define LPFC_SLI4_INTR17 BIT17 | ||
580 | #define LPFC_SLI4_INTR18 BIT18 | ||
581 | #define LPFC_SLI4_INTR19 BIT19 | ||
582 | #define LPFC_SLI4_INTR20 BIT20 | ||
583 | #define LPFC_SLI4_INTR21 BIT21 | ||
584 | #define LPFC_SLI4_INTR22 BIT22 | ||
585 | #define LPFC_SLI4_INTR23 BIT23 | ||
586 | #define LPFC_SLI4_INTR24 BIT24 | ||
587 | #define LPFC_SLI4_INTR25 BIT25 | ||
588 | #define LPFC_SLI4_INTR26 BIT26 | ||
589 | #define LPFC_SLI4_INTR27 BIT27 | ||
590 | #define LPFC_SLI4_INTR28 BIT28 | ||
591 | #define LPFC_SLI4_INTR29 BIT29 | ||
592 | #define LPFC_SLI4_INTR30 BIT30 | ||
593 | #define LPFC_SLI4_INTR31 BIT31 | ||
594 | |||
595 | /* BAR2 Registers */ | ||
596 | #define LPFC_RQ_DOORBELL 0x00A0 | ||
597 | #define lpfc_rq_doorbell_num_posted_SHIFT 16 | ||
598 | #define lpfc_rq_doorbell_num_posted_MASK 0x3FFF | ||
599 | #define lpfc_rq_doorbell_num_posted_WORD word0 | ||
600 | #define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ | ||
601 | #define lpfc_rq_doorbell_id_SHIFT 0 | ||
602 | #define lpfc_rq_doorbell_id_MASK 0x03FF | ||
603 | #define lpfc_rq_doorbell_id_WORD word0 | ||
604 | |||
605 | #define LPFC_WQ_DOORBELL 0x0040 | ||
606 | #define lpfc_wq_doorbell_num_posted_SHIFT 24 | ||
607 | #define lpfc_wq_doorbell_num_posted_MASK 0x00FF | ||
608 | #define lpfc_wq_doorbell_num_posted_WORD word0 | ||
609 | #define lpfc_wq_doorbell_index_SHIFT 16 | ||
610 | #define lpfc_wq_doorbell_index_MASK 0x00FF | ||
611 | #define lpfc_wq_doorbell_index_WORD word0 | ||
612 | #define lpfc_wq_doorbell_id_SHIFT 0 | ||
613 | #define lpfc_wq_doorbell_id_MASK 0xFFFF | ||
614 | #define lpfc_wq_doorbell_id_WORD word0 | ||
615 | |||
616 | #define LPFC_EQCQ_DOORBELL 0x0120 | ||
617 | #define lpfc_eqcq_doorbell_arm_SHIFT 29 | ||
618 | #define lpfc_eqcq_doorbell_arm_MASK 0x0001 | ||
619 | #define lpfc_eqcq_doorbell_arm_WORD word0 | ||
620 | #define lpfc_eqcq_doorbell_num_released_SHIFT 16 | ||
621 | #define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF | ||
622 | #define lpfc_eqcq_doorbell_num_released_WORD word0 | ||
623 | #define lpfc_eqcq_doorbell_qt_SHIFT 10 | ||
624 | #define lpfc_eqcq_doorbell_qt_MASK 0x0001 | ||
625 | #define lpfc_eqcq_doorbell_qt_WORD word0 | ||
626 | #define LPFC_QUEUE_TYPE_COMPLETION 0 | ||
627 | #define LPFC_QUEUE_TYPE_EVENT 1 | ||
628 | #define lpfc_eqcq_doorbell_eqci_SHIFT 9 | ||
629 | #define lpfc_eqcq_doorbell_eqci_MASK 0x0001 | ||
630 | #define lpfc_eqcq_doorbell_eqci_WORD word0 | ||
631 | #define lpfc_eqcq_doorbell_cqid_SHIFT 0 | ||
632 | #define lpfc_eqcq_doorbell_cqid_MASK 0x03FF | ||
633 | #define lpfc_eqcq_doorbell_cqid_WORD word0 | ||
634 | #define lpfc_eqcq_doorbell_eqid_SHIFT 0 | ||
635 | #define lpfc_eqcq_doorbell_eqid_MASK 0x01FF | ||
636 | #define lpfc_eqcq_doorbell_eqid_WORD word0 | ||
637 | |||
638 | #define LPFC_BMBX 0x0160 | ||
639 | #define lpfc_bmbx_addr_SHIFT 2 | ||
640 | #define lpfc_bmbx_addr_MASK 0x3FFFFFFF | ||
641 | #define lpfc_bmbx_addr_WORD word0 | ||
642 | #define lpfc_bmbx_hi_SHIFT 1 | ||
643 | #define lpfc_bmbx_hi_MASK 0x0001 | ||
644 | #define lpfc_bmbx_hi_WORD word0 | ||
645 | #define lpfc_bmbx_rdy_SHIFT 0 | ||
646 | #define lpfc_bmbx_rdy_MASK 0x0001 | ||
647 | #define lpfc_bmbx_rdy_WORD word0 | ||
648 | |||
649 | #define LPFC_MQ_DOORBELL 0x0140 | ||
650 | #define lpfc_mq_doorbell_num_posted_SHIFT 16 | ||
651 | #define lpfc_mq_doorbell_num_posted_MASK 0x3FFF | ||
652 | #define lpfc_mq_doorbell_num_posted_WORD word0 | ||
653 | #define lpfc_mq_doorbell_id_SHIFT 0 | ||
654 | #define lpfc_mq_doorbell_id_MASK 0x03FF | ||
655 | #define lpfc_mq_doorbell_id_WORD word0 | ||
656 | |||
657 | struct lpfc_sli4_cfg_mhdr { | ||
658 | uint32_t word1; | ||
659 | #define lpfc_mbox_hdr_emb_SHIFT 0 | ||
660 | #define lpfc_mbox_hdr_emb_MASK 0x00000001 | ||
661 | #define lpfc_mbox_hdr_emb_WORD word1 | ||
662 | #define lpfc_mbox_hdr_sge_cnt_SHIFT 3 | ||
663 | #define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F | ||
664 | #define lpfc_mbox_hdr_sge_cnt_WORD word1 | ||
665 | uint32_t payload_length; | ||
666 | uint32_t tag_lo; | ||
667 | uint32_t tag_hi; | ||
668 | uint32_t reserved5; | ||
669 | }; | ||
670 | |||
671 | union lpfc_sli4_cfg_shdr { | ||
672 | struct { | ||
673 | uint32_t word6; | ||
674 | #define lpfc_mbox_hdr_opcode_SHIFT 0 | ||
675 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF | ||
676 | #define lpfc_mbox_hdr_opcode_WORD word6 | ||
677 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 | ||
678 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF | ||
679 | #define lpfc_mbox_hdr_subsystem_WORD word6 | ||
680 | #define lpfc_mbox_hdr_port_number_SHIFT 16 | ||
681 | #define lpfc_mbox_hdr_port_number_MASK 0x000000FF | ||
682 | #define lpfc_mbox_hdr_port_number_WORD word6 | ||
683 | #define lpfc_mbox_hdr_domain_SHIFT 24 | ||
684 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF | ||
685 | #define lpfc_mbox_hdr_domain_WORD word6 | ||
686 | uint32_t timeout; | ||
687 | uint32_t request_length; | ||
688 | uint32_t reserved9; | ||
689 | } request; | ||
690 | struct { | ||
691 | uint32_t word6; | ||
692 | #define lpfc_mbox_hdr_opcode_SHIFT 0 | ||
693 | #define lpfc_mbox_hdr_opcode_MASK 0x000000FF | ||
694 | #define lpfc_mbox_hdr_opcode_WORD word6 | ||
695 | #define lpfc_mbox_hdr_subsystem_SHIFT 8 | ||
696 | #define lpfc_mbox_hdr_subsystem_MASK 0x000000FF | ||
697 | #define lpfc_mbox_hdr_subsystem_WORD word6 | ||
698 | #define lpfc_mbox_hdr_domain_SHIFT 24 | ||
699 | #define lpfc_mbox_hdr_domain_MASK 0x000000FF | ||
700 | #define lpfc_mbox_hdr_domain_WORD word6 | ||
701 | uint32_t word7; | ||
702 | #define lpfc_mbox_hdr_status_SHIFT 0 | ||
703 | #define lpfc_mbox_hdr_status_MASK 0x000000FF | ||
704 | #define lpfc_mbox_hdr_status_WORD word7 | ||
705 | #define lpfc_mbox_hdr_add_status_SHIFT 8 | ||
706 | #define lpfc_mbox_hdr_add_status_MASK 0x000000FF | ||
707 | #define lpfc_mbox_hdr_add_status_WORD word7 | ||
708 | uint32_t response_length; | ||
709 | uint32_t actual_response_length; | ||
710 | } response; | ||
711 | }; | ||
712 | |||
713 | /* Mailbox structures */ | ||
714 | struct mbox_header { | ||
715 | struct lpfc_sli4_cfg_mhdr cfg_mhdr; | ||
716 | union lpfc_sli4_cfg_shdr cfg_shdr; | ||
717 | }; | ||
718 | |||
719 | /* Subsystem Definitions */ | ||
720 | #define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 | ||
721 | #define LPFC_MBOX_SUBSYSTEM_FCOE 0xC | ||
722 | |||
723 | /* Device Specific Definitions */ | ||
724 | |||
725 | /* The HOST ENDIAN defines are in Big Endian format. */ | ||
726 | #define HOST_ENDIAN_LOW_WORD0 0xFF3412FF | ||
727 | #define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF | ||
728 | |||
729 | /* Common Opcodes */ | ||
730 | #define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C | ||
731 | #define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D | ||
732 | #define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 | ||
733 | #define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 | ||
734 | #define LPFC_MBOX_OPCODE_NOP 0x21 | ||
735 | #define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 | ||
736 | #define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 | ||
737 | #define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 | ||
738 | #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D | ||
739 | |||
740 | /* FCoE Opcodes */ | ||
741 | #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 | ||
742 | #define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 | ||
743 | #define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 | ||
744 | #define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 | ||
745 | #define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 | ||
746 | #define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 | ||
747 | #define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 | ||
748 | #define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 | ||
749 | #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A | ||
750 | #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B | ||
751 | |||
752 | /* Mailbox command structures */ | ||
753 | struct eq_context { | ||
754 | uint32_t word0; | ||
755 | #define lpfc_eq_context_size_SHIFT 31 | ||
756 | #define lpfc_eq_context_size_MASK 0x00000001 | ||
757 | #define lpfc_eq_context_size_WORD word0 | ||
758 | #define LPFC_EQE_SIZE_4 0x0 | ||
759 | #define LPFC_EQE_SIZE_16 0x1 | ||
760 | #define lpfc_eq_context_valid_SHIFT 29 | ||
761 | #define lpfc_eq_context_valid_MASK 0x00000001 | ||
762 | #define lpfc_eq_context_valid_WORD word0 | ||
763 | uint32_t word1; | ||
764 | #define lpfc_eq_context_count_SHIFT 26 | ||
765 | #define lpfc_eq_context_count_MASK 0x00000003 | ||
766 | #define lpfc_eq_context_count_WORD word1 | ||
767 | #define LPFC_EQ_CNT_256 0x0 | ||
768 | #define LPFC_EQ_CNT_512 0x1 | ||
769 | #define LPFC_EQ_CNT_1024 0x2 | ||
770 | #define LPFC_EQ_CNT_2048 0x3 | ||
771 | #define LPFC_EQ_CNT_4096 0x4 | ||
772 | uint32_t word2; | ||
773 | #define lpfc_eq_context_delay_multi_SHIFT 13 | ||
774 | #define lpfc_eq_context_delay_multi_MASK 0x000003FF | ||
775 | #define lpfc_eq_context_delay_multi_WORD word2 | ||
776 | uint32_t reserved3; | ||
777 | }; | ||
778 | |||
779 | struct sgl_page_pairs { | ||
780 | uint32_t sgl_pg0_addr_lo; | ||
781 | uint32_t sgl_pg0_addr_hi; | ||
782 | uint32_t sgl_pg1_addr_lo; | ||
783 | uint32_t sgl_pg1_addr_hi; | ||
784 | }; | ||
785 | |||
786 | struct lpfc_mbx_post_sgl_pages { | ||
787 | struct mbox_header header; | ||
788 | uint32_t word0; | ||
789 | #define lpfc_post_sgl_pages_xri_SHIFT 0 | ||
790 | #define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF | ||
791 | #define lpfc_post_sgl_pages_xri_WORD word0 | ||
792 | #define lpfc_post_sgl_pages_xricnt_SHIFT 16 | ||
793 | #define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF | ||
794 | #define lpfc_post_sgl_pages_xricnt_WORD word0 | ||
795 | struct sgl_page_pairs sgl_pg_pairs[1]; | ||
796 | }; | ||
797 | |||
798 | /* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ | ||
799 | struct lpfc_mbx_post_uembed_sgl_page1 { | ||
800 | union lpfc_sli4_cfg_shdr cfg_shdr; | ||
801 | uint32_t word0; | ||
802 | struct sgl_page_pairs sgl_pg_pairs; | ||
803 | }; | ||
804 | |||
805 | struct lpfc_mbx_sge { | ||
806 | uint32_t pa_lo; | ||
807 | uint32_t pa_hi; | ||
808 | uint32_t length; | ||
809 | }; | ||
810 | |||
811 | struct lpfc_mbx_nembed_cmd { | ||
812 | struct lpfc_sli4_cfg_mhdr cfg_mhdr; | ||
813 | #define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 | ||
814 | struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; | ||
815 | }; | ||
816 | |||
817 | struct lpfc_mbx_nembed_sge_virt { | ||
818 | void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; | ||
819 | }; | ||
820 | |||
821 | struct lpfc_mbx_eq_create { | ||
822 | struct mbox_header header; | ||
823 | union { | ||
824 | struct { | ||
825 | uint32_t word0; | ||
826 | #define lpfc_mbx_eq_create_num_pages_SHIFT 0 | ||
827 | #define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF | ||
828 | #define lpfc_mbx_eq_create_num_pages_WORD word0 | ||
829 | struct eq_context context; | ||
830 | struct dma_address page[LPFC_MAX_EQ_PAGE]; | ||
831 | } request; | ||
832 | struct { | ||
833 | uint32_t word0; | ||
834 | #define lpfc_mbx_eq_create_q_id_SHIFT 0 | ||
835 | #define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF | ||
836 | #define lpfc_mbx_eq_create_q_id_WORD word0 | ||
837 | } response; | ||
838 | } u; | ||
839 | }; | ||
840 | |||
841 | struct lpfc_mbx_eq_destroy { | ||
842 | struct mbox_header header; | ||
843 | union { | ||
844 | struct { | ||
845 | uint32_t word0; | ||
846 | #define lpfc_mbx_eq_destroy_q_id_SHIFT 0 | ||
847 | #define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF | ||
848 | #define lpfc_mbx_eq_destroy_q_id_WORD word0 | ||
849 | } request; | ||
850 | struct { | ||
851 | uint32_t word0; | ||
852 | } response; | ||
853 | } u; | ||
854 | }; | ||
855 | |||
856 | struct lpfc_mbx_nop { | ||
857 | struct mbox_header header; | ||
858 | uint32_t context[2]; | ||
859 | }; | ||
860 | |||
861 | struct cq_context { | ||
862 | uint32_t word0; | ||
863 | #define lpfc_cq_context_event_SHIFT 31 | ||
864 | #define lpfc_cq_context_event_MASK 0x00000001 | ||
865 | #define lpfc_cq_context_event_WORD word0 | ||
866 | #define lpfc_cq_context_valid_SHIFT 29 | ||
867 | #define lpfc_cq_context_valid_MASK 0x00000001 | ||
868 | #define lpfc_cq_context_valid_WORD word0 | ||
869 | #define lpfc_cq_context_count_SHIFT 27 | ||
870 | #define lpfc_cq_context_count_MASK 0x00000003 | ||
871 | #define lpfc_cq_context_count_WORD word0 | ||
872 | #define LPFC_CQ_CNT_256 0x0 | ||
873 | #define LPFC_CQ_CNT_512 0x1 | ||
874 | #define LPFC_CQ_CNT_1024 0x2 | ||
875 | uint32_t word1; | ||
876 | #define lpfc_cq_eq_id_SHIFT 22 | ||
877 | #define lpfc_cq_eq_id_MASK 0x000000FF | ||
878 | #define lpfc_cq_eq_id_WORD word1 | ||
879 | uint32_t reserved0; | ||
880 | uint32_t reserved1; | ||
881 | }; | ||
882 | |||
883 | struct lpfc_mbx_cq_create { | ||
884 | struct mbox_header header; | ||
885 | union { | ||
886 | struct { | ||
887 | uint32_t word0; | ||
888 | #define lpfc_mbx_cq_create_num_pages_SHIFT 0 | ||
889 | #define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF | ||
890 | #define lpfc_mbx_cq_create_num_pages_WORD word0 | ||
891 | struct cq_context context; | ||
892 | struct dma_address page[LPFC_MAX_CQ_PAGE]; | ||
893 | } request; | ||
894 | struct { | ||
895 | uint32_t word0; | ||
896 | #define lpfc_mbx_cq_create_q_id_SHIFT 0 | ||
897 | #define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF | ||
898 | #define lpfc_mbx_cq_create_q_id_WORD word0 | ||
899 | } response; | ||
900 | } u; | ||
901 | }; | ||
902 | |||
903 | struct lpfc_mbx_cq_destroy { | ||
904 | struct mbox_header header; | ||
905 | union { | ||
906 | struct { | ||
907 | uint32_t word0; | ||
908 | #define lpfc_mbx_cq_destroy_q_id_SHIFT 0 | ||
909 | #define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF | ||
910 | #define lpfc_mbx_cq_destroy_q_id_WORD word0 | ||
911 | } request; | ||
912 | struct { | ||
913 | uint32_t word0; | ||
914 | } response; | ||
915 | } u; | ||
916 | }; | ||
917 | |||
918 | struct wq_context { | ||
919 | uint32_t reserved0; | ||
920 | uint32_t reserved1; | ||
921 | uint32_t reserved2; | ||
922 | uint32_t reserved3; | ||
923 | }; | ||
924 | |||
925 | struct lpfc_mbx_wq_create { | ||
926 | struct mbox_header header; | ||
927 | union { | ||
928 | struct { | ||
929 | uint32_t word0; | ||
930 | #define lpfc_mbx_wq_create_num_pages_SHIFT 0 | ||
931 | #define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF | ||
932 | #define lpfc_mbx_wq_create_num_pages_WORD word0 | ||
933 | #define lpfc_mbx_wq_create_cq_id_SHIFT 16 | ||
934 | #define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF | ||
935 | #define lpfc_mbx_wq_create_cq_id_WORD word0 | ||
936 | struct dma_address page[LPFC_MAX_WQ_PAGE]; | ||
937 | } request; | ||
938 | struct { | ||
939 | uint32_t word0; | ||
940 | #define lpfc_mbx_wq_create_q_id_SHIFT 0 | ||
941 | #define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF | ||
942 | #define lpfc_mbx_wq_create_q_id_WORD word0 | ||
943 | } response; | ||
944 | } u; | ||
945 | }; | ||
946 | |||
947 | struct lpfc_mbx_wq_destroy { | ||
948 | struct mbox_header header; | ||
949 | union { | ||
950 | struct { | ||
951 | uint32_t word0; | ||
952 | #define lpfc_mbx_wq_destroy_q_id_SHIFT 0 | ||
953 | #define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF | ||
954 | #define lpfc_mbx_wq_destroy_q_id_WORD word0 | ||
955 | } request; | ||
956 | struct { | ||
957 | uint32_t word0; | ||
958 | } response; | ||
959 | } u; | ||
960 | }; | ||
961 | |||
962 | #define LPFC_HDR_BUF_SIZE 128 | ||
963 | #define LPFC_DATA_BUF_SIZE 4096 | ||
964 | struct rq_context { | ||
965 | uint32_t word0; | ||
966 | #define lpfc_rq_context_rq_size_SHIFT 16 | ||
967 | #define lpfc_rq_context_rq_size_MASK 0x0000000F | ||
968 | #define lpfc_rq_context_rq_size_WORD word0 | ||
969 | #define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ | ||
970 | #define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ | ||
971 | #define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ | ||
972 | #define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ | ||
973 | uint32_t reserved1; | ||
974 | uint32_t word2; | ||
975 | #define lpfc_rq_context_cq_id_SHIFT 16 | ||
976 | #define lpfc_rq_context_cq_id_MASK 0x000003FF | ||
977 | #define lpfc_rq_context_cq_id_WORD word2 | ||
978 | #define lpfc_rq_context_buf_size_SHIFT 0 | ||
979 | #define lpfc_rq_context_buf_size_MASK 0x0000FFFF | ||
980 | #define lpfc_rq_context_buf_size_WORD word2 | ||
981 | uint32_t reserved3; | ||
982 | }; | ||
983 | |||
984 | struct lpfc_mbx_rq_create { | ||
985 | struct mbox_header header; | ||
986 | union { | ||
987 | struct { | ||
988 | uint32_t word0; | ||
989 | #define lpfc_mbx_rq_create_num_pages_SHIFT 0 | ||
990 | #define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF | ||
991 | #define lpfc_mbx_rq_create_num_pages_WORD word0 | ||
992 | struct rq_context context; | ||
993 | struct dma_address page[LPFC_MAX_WQ_PAGE]; | ||
994 | } request; | ||
995 | struct { | ||
996 | uint32_t word0; | ||
997 | #define lpfc_mbx_rq_create_q_id_SHIFT 0 | ||
998 | #define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF | ||
999 | #define lpfc_mbx_rq_create_q_id_WORD word0 | ||
1000 | } response; | ||
1001 | } u; | ||
1002 | }; | ||
1003 | |||
1004 | struct lpfc_mbx_rq_destroy { | ||
1005 | struct mbox_header header; | ||
1006 | union { | ||
1007 | struct { | ||
1008 | uint32_t word0; | ||
1009 | #define lpfc_mbx_rq_destroy_q_id_SHIFT 0 | ||
1010 | #define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF | ||
1011 | #define lpfc_mbx_rq_destroy_q_id_WORD word0 | ||
1012 | } request; | ||
1013 | struct { | ||
1014 | uint32_t word0; | ||
1015 | } response; | ||
1016 | } u; | ||
1017 | }; | ||
1018 | |||
1019 | struct mq_context { | ||
1020 | uint32_t word0; | ||
1021 | #define lpfc_mq_context_cq_id_SHIFT 22 | ||
1022 | #define lpfc_mq_context_cq_id_MASK 0x000003FF | ||
1023 | #define lpfc_mq_context_cq_id_WORD word0 | ||
1024 | #define lpfc_mq_context_count_SHIFT 16 | ||
1025 | #define lpfc_mq_context_count_MASK 0x0000000F | ||
1026 | #define lpfc_mq_context_count_WORD word0 | ||
1027 | #define LPFC_MQ_CNT_16 0x5 | ||
1028 | #define LPFC_MQ_CNT_32 0x6 | ||
1029 | #define LPFC_MQ_CNT_64 0x7 | ||
1030 | #define LPFC_MQ_CNT_128 0x8 | ||
1031 | uint32_t word1; | ||
1032 | #define lpfc_mq_context_valid_SHIFT 31 | ||
1033 | #define lpfc_mq_context_valid_MASK 0x00000001 | ||
1034 | #define lpfc_mq_context_valid_WORD word1 | ||
1035 | uint32_t reserved2; | ||
1036 | uint32_t reserved3; | ||
1037 | }; | ||
1038 | |||
1039 | struct lpfc_mbx_mq_create { | ||
1040 | struct mbox_header header; | ||
1041 | union { | ||
1042 | struct { | ||
1043 | uint32_t word0; | ||
1044 | #define lpfc_mbx_mq_create_num_pages_SHIFT 0 | ||
1045 | #define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF | ||
1046 | #define lpfc_mbx_mq_create_num_pages_WORD word0 | ||
1047 | struct mq_context context; | ||
1048 | struct dma_address page[LPFC_MAX_MQ_PAGE]; | ||
1049 | } request; | ||
1050 | struct { | ||
1051 | uint32_t word0; | ||
1052 | #define lpfc_mbx_mq_create_q_id_SHIFT 0 | ||
1053 | #define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF | ||
1054 | #define lpfc_mbx_mq_create_q_id_WORD word0 | ||
1055 | } response; | ||
1056 | } u; | ||
1057 | }; | ||
1058 | |||
1059 | struct lpfc_mbx_mq_destroy { | ||
1060 | struct mbox_header header; | ||
1061 | union { | ||
1062 | struct { | ||
1063 | uint32_t word0; | ||
1064 | #define lpfc_mbx_mq_destroy_q_id_SHIFT 0 | ||
1065 | #define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF | ||
1066 | #define lpfc_mbx_mq_destroy_q_id_WORD word0 | ||
1067 | } request; | ||
1068 | struct { | ||
1069 | uint32_t word0; | ||
1070 | } response; | ||
1071 | } u; | ||
1072 | }; | ||
1073 | |||
1074 | struct lpfc_mbx_post_hdr_tmpl { | ||
1075 | struct mbox_header header; | ||
1076 | uint32_t word10; | ||
1077 | #define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 | ||
1078 | #define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF | ||
1079 | #define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 | ||
1080 | #define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 | ||
1081 | #define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF | ||
1082 | #define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 | ||
1083 | uint32_t rpi_paddr_lo; | ||
1084 | uint32_t rpi_paddr_hi; | ||
1085 | }; | ||
1086 | |||
1087 | struct sli4_sge { /* SLI-4 */ | ||
1088 | uint32_t addr_hi; | ||
1089 | uint32_t addr_lo; | ||
1090 | |||
1091 | uint32_t word2; | ||
1092 | #define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ | ||
1093 | #define lpfc_sli4_sge_offset_MASK 0x00FFFFFF | ||
1094 | #define lpfc_sli4_sge_offset_WORD word2 | ||
1095 | #define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets | ||
1096 | this flag !! */ | ||
1097 | #define lpfc_sli4_sge_last_MASK 0x00000001 | ||
1098 | #define lpfc_sli4_sge_last_WORD word2 | ||
1099 | uint32_t word3; | ||
1100 | #define lpfc_sli4_sge_len_SHIFT 0 | ||
1101 | #define lpfc_sli4_sge_len_MASK 0x0001FFFF | ||
1102 | #define lpfc_sli4_sge_len_WORD word3 | ||
1103 | }; | ||
1104 | |||
1105 | struct fcf_record { | ||
1106 | uint32_t max_rcv_size; | ||
1107 | uint32_t fka_adv_period; | ||
1108 | uint32_t fip_priority; | ||
1109 | uint32_t word3; | ||
1110 | #define lpfc_fcf_record_mac_0_SHIFT 0 | ||
1111 | #define lpfc_fcf_record_mac_0_MASK 0x000000FF | ||
1112 | #define lpfc_fcf_record_mac_0_WORD word3 | ||
1113 | #define lpfc_fcf_record_mac_1_SHIFT 8 | ||
1114 | #define lpfc_fcf_record_mac_1_MASK 0x000000FF | ||
1115 | #define lpfc_fcf_record_mac_1_WORD word3 | ||
1116 | #define lpfc_fcf_record_mac_2_SHIFT 16 | ||
1117 | #define lpfc_fcf_record_mac_2_MASK 0x000000FF | ||
1118 | #define lpfc_fcf_record_mac_2_WORD word3 | ||
1119 | #define lpfc_fcf_record_mac_3_SHIFT 24 | ||
1120 | #define lpfc_fcf_record_mac_3_MASK 0x000000FF | ||
1121 | #define lpfc_fcf_record_mac_3_WORD word3 | ||
1122 | uint32_t word4; | ||
1123 | #define lpfc_fcf_record_mac_4_SHIFT 0 | ||
1124 | #define lpfc_fcf_record_mac_4_MASK 0x000000FF | ||
1125 | #define lpfc_fcf_record_mac_4_WORD word4 | ||
1126 | #define lpfc_fcf_record_mac_5_SHIFT 8 | ||
1127 | #define lpfc_fcf_record_mac_5_MASK 0x000000FF | ||
1128 | #define lpfc_fcf_record_mac_5_WORD word4 | ||
1129 | #define lpfc_fcf_record_fcf_avail_SHIFT 16 | ||
1130 | #define lpfc_fcf_record_fcf_avail_MASK 0x000000FF | ||
1131 | #define lpfc_fcf_record_fc_avail_WORD word4 | ||
1132 | #define lpfc_fcf_record_mac_addr_prov_SHIFT 24 | ||
1133 | #define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF | ||
1134 | #define lpfc_fcf_record_mac_addr_prov_WORD word4 | ||
1135 | #define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ | ||
1136 | #define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ | ||
1137 | uint32_t word5; | ||
1138 | #define lpfc_fcf_record_fab_name_0_SHIFT 0 | ||
1139 | #define lpfc_fcf_record_fab_name_0_MASK 0x000000FF | ||
1140 | #define lpfc_fcf_record_fab_name_0_WORD word5 | ||
1141 | #define lpfc_fcf_record_fab_name_1_SHIFT 8 | ||
1142 | #define lpfc_fcf_record_fab_name_1_MASK 0x000000FF | ||
1143 | #define lpfc_fcf_record_fab_name_1_WORD word5 | ||
1144 | #define lpfc_fcf_record_fab_name_2_SHIFT 16 | ||
1145 | #define lpfc_fcf_record_fab_name_2_MASK 0x000000FF | ||
1146 | #define lpfc_fcf_record_fab_name_2_WORD word5 | ||
1147 | #define lpfc_fcf_record_fab_name_3_SHIFT 24 | ||
1148 | #define lpfc_fcf_record_fab_name_3_MASK 0x000000FF | ||
1149 | #define lpfc_fcf_record_fab_name_3_WORD word5 | ||
1150 | uint32_t word6; | ||
1151 | #define lpfc_fcf_record_fab_name_4_SHIFT 0 | ||
1152 | #define lpfc_fcf_record_fab_name_4_MASK 0x000000FF | ||
1153 | #define lpfc_fcf_record_fab_name_4_WORD word6 | ||
1154 | #define lpfc_fcf_record_fab_name_5_SHIFT 8 | ||
1155 | #define lpfc_fcf_record_fab_name_5_MASK 0x000000FF | ||
1156 | #define lpfc_fcf_record_fab_name_5_WORD word6 | ||
1157 | #define lpfc_fcf_record_fab_name_6_SHIFT 16 | ||
1158 | #define lpfc_fcf_record_fab_name_6_MASK 0x000000FF | ||
1159 | #define lpfc_fcf_record_fab_name_6_WORD word6 | ||
1160 | #define lpfc_fcf_record_fab_name_7_SHIFT 24 | ||
1161 | #define lpfc_fcf_record_fab_name_7_MASK 0x000000FF | ||
1162 | #define lpfc_fcf_record_fab_name_7_WORD word6 | ||
1163 | uint32_t word7; | ||
1164 | #define lpfc_fcf_record_fc_map_0_SHIFT 0 | ||
1165 | #define lpfc_fcf_record_fc_map_0_MASK 0x000000FF | ||
1166 | #define lpfc_fcf_record_fc_map_0_WORD word7 | ||
1167 | #define lpfc_fcf_record_fc_map_1_SHIFT 8 | ||
1168 | #define lpfc_fcf_record_fc_map_1_MASK 0x000000FF | ||
1169 | #define lpfc_fcf_record_fc_map_1_WORD word7 | ||
1170 | #define lpfc_fcf_record_fc_map_2_SHIFT 16 | ||
1171 | #define lpfc_fcf_record_fc_map_2_MASK 0x000000FF | ||
1172 | #define lpfc_fcf_record_fc_map_2_WORD word7 | ||
1173 | #define lpfc_fcf_record_fcf_valid_SHIFT 24 | ||
1174 | #define lpfc_fcf_record_fcf_valid_MASK 0x000000FF | ||
1175 | #define lpfc_fcf_record_fcf_valid_WORD word7 | ||
1176 | uint32_t word8; | ||
1177 | #define lpfc_fcf_record_fcf_index_SHIFT 0 | ||
1178 | #define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF | ||
1179 | #define lpfc_fcf_record_fcf_index_WORD word8 | ||
1180 | #define lpfc_fcf_record_fcf_state_SHIFT 16 | ||
1181 | #define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF | ||
1182 | #define lpfc_fcf_record_fcf_state_WORD word8 | ||
1183 | uint8_t vlan_bitmap[512]; | ||
1184 | }; | ||
1185 | |||
1186 | struct lpfc_mbx_read_fcf_tbl { | ||
1187 | union lpfc_sli4_cfg_shdr cfg_shdr; | ||
1188 | union { | ||
1189 | struct { | ||
1190 | uint32_t word10; | ||
1191 | #define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 | ||
1192 | #define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF | ||
1193 | #define lpfc_mbx_read_fcf_tbl_indx_WORD word10 | ||
1194 | } request; | ||
1195 | struct { | ||
1196 | uint32_t eventag; | ||
1197 | } response; | ||
1198 | } u; | ||
1199 | uint32_t word11; | ||
1200 | #define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 | ||
1201 | #define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF | ||
1202 | #define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 | ||
1203 | }; | ||
1204 | |||
1205 | struct lpfc_mbx_add_fcf_tbl_entry { | ||
1206 | union lpfc_sli4_cfg_shdr cfg_shdr; | ||
1207 | uint32_t word10; | ||
1208 | #define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 | ||
1209 | #define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF | ||
1210 | #define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 | ||
1211 | struct lpfc_mbx_sge fcf_sge; | ||
1212 | }; | ||
1213 | |||
1214 | struct lpfc_mbx_del_fcf_tbl_entry { | ||
1215 | struct mbox_header header; | ||
1216 | uint32_t word10; | ||
1217 | #define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 | ||
1218 | #define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF | ||
1219 | #define lpfc_mbx_del_fcf_tbl_count_WORD word10 | ||
1220 | #define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 | ||
1221 | #define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF | ||
1222 | #define lpfc_mbx_del_fcf_tbl_index_WORD word10 | ||
1223 | }; | ||
1224 | |||
1225 | /* Status field for embedded SLI_CONFIG mailbox command */ | ||
1226 | #define STATUS_SUCCESS 0x0 | ||
1227 | #define STATUS_FAILED 0x1 | ||
1228 | #define STATUS_ILLEGAL_REQUEST 0x2 | ||
1229 | #define STATUS_ILLEGAL_FIELD 0x3 | ||
1230 | #define STATUS_INSUFFICIENT_BUFFER 0x4 | ||
1231 | #define STATUS_UNAUTHORIZED_REQUEST 0x5 | ||
1232 | #define STATUS_FLASHROM_SAVE_FAILED 0x17 | ||
1233 | #define STATUS_FLASHROM_RESTORE_FAILED 0x18 | ||
1234 | #define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a | ||
1235 | #define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b | ||
1236 | #define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c | ||
1237 | #define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d | ||
1238 | #define STATUS_ASSERT_FAILED 0x1e | ||
1239 | #define STATUS_INVALID_SESSION 0x1f | ||
1240 | #define STATUS_INVALID_CONNECTION 0x20 | ||
1241 | #define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 | ||
1242 | #define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 | ||
1243 | #define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 | ||
1244 | #define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 | ||
1245 | #define STATUS_FLASHROM_READ_FAILED 0x27 | ||
1246 | #define STATUS_POLL_IOCTL_TIMEOUT 0x28 | ||
1247 | #define STATUS_ERROR_ACITMAIN 0x2a | ||
1248 | #define STATUS_REBOOT_REQUIRED 0x2c | ||
1249 | #define STATUS_FCF_IN_USE 0x3a | ||
1250 | |||
1251 | struct lpfc_mbx_sli4_config { | ||
1252 | struct mbox_header header; | ||
1253 | }; | ||
1254 | |||
1255 | struct lpfc_mbx_init_vfi { | ||
1256 | uint32_t word1; | ||
1257 | #define lpfc_init_vfi_vr_SHIFT 31 | ||
1258 | #define lpfc_init_vfi_vr_MASK 0x00000001 | ||
1259 | #define lpfc_init_vfi_vr_WORD word1 | ||
1260 | #define lpfc_init_vfi_vt_SHIFT 30 | ||
1261 | #define lpfc_init_vfi_vt_MASK 0x00000001 | ||
1262 | #define lpfc_init_vfi_vt_WORD word1 | ||
1263 | #define lpfc_init_vfi_vf_SHIFT 29 | ||
1264 | #define lpfc_init_vfi_vf_MASK 0x00000001 | ||
1265 | #define lpfc_init_vfi_vf_WORD word1 | ||
1266 | #define lpfc_init_vfi_vfi_SHIFT 0 | ||
1267 | #define lpfc_init_vfi_vfi_MASK 0x0000FFFF | ||
1268 | #define lpfc_init_vfi_vfi_WORD word1 | ||
1269 | uint32_t word2; | ||
1270 | #define lpfc_init_vfi_fcfi_SHIFT 0 | ||
1271 | #define lpfc_init_vfi_fcfi_MASK 0x0000FFFF | ||
1272 | #define lpfc_init_vfi_fcfi_WORD word2 | ||
1273 | uint32_t word3; | ||
1274 | #define lpfc_init_vfi_pri_SHIFT 13 | ||
1275 | #define lpfc_init_vfi_pri_MASK 0x00000007 | ||
1276 | #define lpfc_init_vfi_pri_WORD word3 | ||
1277 | #define lpfc_init_vfi_vf_id_SHIFT 1 | ||
1278 | #define lpfc_init_vfi_vf_id_MASK 0x00000FFF | ||
1279 | #define lpfc_init_vfi_vf_id_WORD word3 | ||
1280 | uint32_t word4; | ||
1281 | #define lpfc_init_vfi_hop_count_SHIFT 24 | ||
1282 | #define lpfc_init_vfi_hop_count_MASK 0x000000FF | ||
1283 | #define lpfc_init_vfi_hop_count_WORD word4 | ||
1284 | }; | ||
1285 | |||
1286 | struct lpfc_mbx_reg_vfi { | ||
1287 | uint32_t word1; | ||
1288 | #define lpfc_reg_vfi_vp_SHIFT 28 | ||
1289 | #define lpfc_reg_vfi_vp_MASK 0x00000001 | ||
1290 | #define lpfc_reg_vfi_vp_WORD word1 | ||
1291 | #define lpfc_reg_vfi_vfi_SHIFT 0 | ||
1292 | #define lpfc_reg_vfi_vfi_MASK 0x0000FFFF | ||
1293 | #define lpfc_reg_vfi_vfi_WORD word1 | ||
1294 | uint32_t word2; | ||
1295 | #define lpfc_reg_vfi_vpi_SHIFT 16 | ||
1296 | #define lpfc_reg_vfi_vpi_MASK 0x0000FFFF | ||
1297 | #define lpfc_reg_vfi_vpi_WORD word2 | ||
1298 | #define lpfc_reg_vfi_fcfi_SHIFT 0 | ||
1299 | #define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF | ||
1300 | #define lpfc_reg_vfi_fcfi_WORD word2 | ||
1301 | uint32_t word3_rsvd; | ||
1302 | uint32_t word4_rsvd; | ||
1303 | struct ulp_bde64 bde; | ||
1304 | uint32_t word8_rsvd; | ||
1305 | uint32_t word9_rsvd; | ||
1306 | uint32_t word10; | ||
1307 | #define lpfc_reg_vfi_nport_id_SHIFT 0 | ||
1308 | #define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF | ||
1309 | #define lpfc_reg_vfi_nport_id_WORD word10 | ||
1310 | }; | ||
1311 | |||
1312 | struct lpfc_mbx_init_vpi { | ||
1313 | uint32_t word1; | ||
1314 | #define lpfc_init_vpi_vfi_SHIFT 16 | ||
1315 | #define lpfc_init_vpi_vfi_MASK 0x0000FFFF | ||
1316 | #define lpfc_init_vpi_vfi_WORD word1 | ||
1317 | #define lpfc_init_vpi_vpi_SHIFT 0 | ||
1318 | #define lpfc_init_vpi_vpi_MASK 0x0000FFFF | ||
1319 | #define lpfc_init_vpi_vpi_WORD word1 | ||
1320 | }; | ||
1321 | |||
1322 | struct lpfc_mbx_read_vpi { | ||
1323 | uint32_t word1_rsvd; | ||
1324 | uint32_t word2; | ||
1325 | #define lpfc_mbx_read_vpi_vnportid_SHIFT 0 | ||
1326 | #define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF | ||
1327 | #define lpfc_mbx_read_vpi_vnportid_WORD word2 | ||
1328 | uint32_t word3_rsvd; | ||
1329 | uint32_t word4; | ||
1330 | #define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 | ||
1331 | #define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF | ||
1332 | #define lpfc_mbx_read_vpi_acq_alpa_WORD word4 | ||
1333 | #define lpfc_mbx_read_vpi_pb_SHIFT 15 | ||
1334 | #define lpfc_mbx_read_vpi_pb_MASK 0x00000001 | ||
1335 | #define lpfc_mbx_read_vpi_pb_WORD word4 | ||
1336 | #define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 | ||
1337 | #define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF | ||
1338 | #define lpfc_mbx_read_vpi_spec_alpa_WORD word4 | ||
1339 | #define lpfc_mbx_read_vpi_ns_SHIFT 30 | ||
1340 | #define lpfc_mbx_read_vpi_ns_MASK 0x00000001 | ||
1341 | #define lpfc_mbx_read_vpi_ns_WORD word4 | ||
1342 | #define lpfc_mbx_read_vpi_hl_SHIFT 31 | ||
1343 | #define lpfc_mbx_read_vpi_hl_MASK 0x00000001 | ||
1344 | #define lpfc_mbx_read_vpi_hl_WORD word4 | ||
1345 | uint32_t word5_rsvd; | ||
1346 | uint32_t word6; | ||
1347 | #define lpfc_mbx_read_vpi_vpi_SHIFT 0 | ||
1348 | #define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF | ||
1349 | #define lpfc_mbx_read_vpi_vpi_WORD word6 | ||
1350 | uint32_t word7; | ||
1351 | #define lpfc_mbx_read_vpi_mac_0_SHIFT 0 | ||
1352 | #define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF | ||
1353 | #define lpfc_mbx_read_vpi_mac_0_WORD word7 | ||
1354 | #define lpfc_mbx_read_vpi_mac_1_SHIFT 8 | ||
1355 | #define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF | ||
1356 | #define lpfc_mbx_read_vpi_mac_1_WORD word7 | ||
1357 | #define lpfc_mbx_read_vpi_mac_2_SHIFT 16 | ||
1358 | #define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF | ||
1359 | #define lpfc_mbx_read_vpi_mac_2_WORD word7 | ||
1360 | #define lpfc_mbx_read_vpi_mac_3_SHIFT 24 | ||
1361 | #define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF | ||
1362 | #define lpfc_mbx_read_vpi_mac_3_WORD word7 | ||
1363 | uint32_t word8; | ||
1364 | #define lpfc_mbx_read_vpi_mac_4_SHIFT 0 | ||
1365 | #define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF | ||
1366 | #define lpfc_mbx_read_vpi_mac_4_WORD word8 | ||
1367 | #define lpfc_mbx_read_vpi_mac_5_SHIFT 8 | ||
1368 | #define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF | ||
1369 | #define lpfc_mbx_read_vpi_mac_5_WORD word8 | ||
1370 | #define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 | ||
1371 | #define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF | ||
1372 | #define lpfc_mbx_read_vpi_vlan_tag_WORD word8 | ||
1373 | #define lpfc_mbx_read_vpi_vv_SHIFT 28 | ||
1374 | #define lpfc_mbx_read_vpi_vv_MASK 0x0000001 | ||
1375 | #define lpfc_mbx_read_vpi_vv_WORD word8 | ||
1376 | }; | ||
1377 | |||
1378 | struct lpfc_mbx_unreg_vfi { | ||
1379 | uint32_t word1_rsvd; | ||
1380 | uint32_t word2; | ||
1381 | #define lpfc_unreg_vfi_vfi_SHIFT 0 | ||
1382 | #define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF | ||
1383 | #define lpfc_unreg_vfi_vfi_WORD word2 | ||
1384 | }; | ||
1385 | |||
1386 | struct lpfc_mbx_resume_rpi { | ||
1387 | uint32_t word1; | ||
1388 | #define lpfc_resume_rpi_rpi_SHIFT 0 | ||
1389 | #define lpfc_resume_rpi_rpi_MASK 0x0000FFFF | ||
1390 | #define lpfc_resume_rpi_rpi_WORD word1 | ||
1391 | uint32_t event_tag; | ||
1392 | uint32_t word3_rsvd; | ||
1393 | uint32_t word4_rsvd; | ||
1394 | uint32_t word5_rsvd; | ||
1395 | uint32_t word6; | ||
1396 | #define lpfc_resume_rpi_vpi_SHIFT 0 | ||
1397 | #define lpfc_resume_rpi_vpi_MASK 0x0000FFFF | ||
1398 | #define lpfc_resume_rpi_vpi_WORD word6 | ||
1399 | #define lpfc_resume_rpi_vfi_SHIFT 16 | ||
1400 | #define lpfc_resume_rpi_vfi_MASK 0x0000FFFF | ||
1401 | #define lpfc_resume_rpi_vfi_WORD word6 | ||
1402 | }; | ||
1403 | |||
1404 | #define REG_FCF_INVALID_QID 0xFFFF | ||
1405 | struct lpfc_mbx_reg_fcfi { | ||
1406 | uint32_t word1; | ||
1407 | #define lpfc_reg_fcfi_info_index_SHIFT 0 | ||
1408 | #define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF | ||
1409 | #define lpfc_reg_fcfi_info_index_WORD word1 | ||
1410 | #define lpfc_reg_fcfi_fcfi_SHIFT 16 | ||
1411 | #define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF | ||
1412 | #define lpfc_reg_fcfi_fcfi_WORD word1 | ||
1413 | uint32_t word2; | ||
1414 | #define lpfc_reg_fcfi_rq_id1_SHIFT 0 | ||
1415 | #define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF | ||
1416 | #define lpfc_reg_fcfi_rq_id1_WORD word2 | ||
1417 | #define lpfc_reg_fcfi_rq_id0_SHIFT 16 | ||
1418 | #define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF | ||
1419 | #define lpfc_reg_fcfi_rq_id0_WORD word2 | ||
1420 | uint32_t word3; | ||
1421 | #define lpfc_reg_fcfi_rq_id3_SHIFT 0 | ||
1422 | #define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF | ||
1423 | #define lpfc_reg_fcfi_rq_id3_WORD word3 | ||
1424 | #define lpfc_reg_fcfi_rq_id2_SHIFT 16 | ||
1425 | #define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF | ||
1426 | #define lpfc_reg_fcfi_rq_id2_WORD word3 | ||
1427 | uint32_t word4; | ||
1428 | #define lpfc_reg_fcfi_type_match0_SHIFT 24 | ||
1429 | #define lpfc_reg_fcfi_type_match0_MASK 0x000000FF | ||
1430 | #define lpfc_reg_fcfi_type_match0_WORD word4 | ||
1431 | #define lpfc_reg_fcfi_type_mask0_SHIFT 16 | ||
1432 | #define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF | ||
1433 | #define lpfc_reg_fcfi_type_mask0_WORD word4 | ||
1434 | #define lpfc_reg_fcfi_rctl_match0_SHIFT 8 | ||
1435 | #define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF | ||
1436 | #define lpfc_reg_fcfi_rctl_match0_WORD word4 | ||
1437 | #define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 | ||
1438 | #define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF | ||
1439 | #define lpfc_reg_fcfi_rctl_mask0_WORD word4 | ||
1440 | uint32_t word5; | ||
1441 | #define lpfc_reg_fcfi_type_match1_SHIFT 24 | ||
1442 | #define lpfc_reg_fcfi_type_match1_MASK 0x000000FF | ||
1443 | #define lpfc_reg_fcfi_type_match1_WORD word5 | ||
1444 | #define lpfc_reg_fcfi_type_mask1_SHIFT 16 | ||
1445 | #define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF | ||
1446 | #define lpfc_reg_fcfi_type_mask1_WORD word5 | ||
1447 | #define lpfc_reg_fcfi_rctl_match1_SHIFT 8 | ||
1448 | #define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF | ||
1449 | #define lpfc_reg_fcfi_rctl_match1_WORD word5 | ||
1450 | #define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 | ||
1451 | #define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF | ||
1452 | #define lpfc_reg_fcfi_rctl_mask1_WORD word5 | ||
1453 | uint32_t word6; | ||
1454 | #define lpfc_reg_fcfi_type_match2_SHIFT 24 | ||
1455 | #define lpfc_reg_fcfi_type_match2_MASK 0x000000FF | ||
1456 | #define lpfc_reg_fcfi_type_match2_WORD word6 | ||
1457 | #define lpfc_reg_fcfi_type_mask2_SHIFT 16 | ||
1458 | #define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF | ||
1459 | #define lpfc_reg_fcfi_type_mask2_WORD word6 | ||
1460 | #define lpfc_reg_fcfi_rctl_match2_SHIFT 8 | ||
1461 | #define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF | ||
1462 | #define lpfc_reg_fcfi_rctl_match2_WORD word6 | ||
1463 | #define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 | ||
1464 | #define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF | ||
1465 | #define lpfc_reg_fcfi_rctl_mask2_WORD word6 | ||
1466 | uint32_t word7; | ||
1467 | #define lpfc_reg_fcfi_type_match3_SHIFT 24 | ||
1468 | #define lpfc_reg_fcfi_type_match3_MASK 0x000000FF | ||
1469 | #define lpfc_reg_fcfi_type_match3_WORD word7 | ||
1470 | #define lpfc_reg_fcfi_type_mask3_SHIFT 16 | ||
1471 | #define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF | ||
1472 | #define lpfc_reg_fcfi_type_mask3_WORD word7 | ||
1473 | #define lpfc_reg_fcfi_rctl_match3_SHIFT 8 | ||
1474 | #define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF | ||
1475 | #define lpfc_reg_fcfi_rctl_match3_WORD word7 | ||
1476 | #define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 | ||
1477 | #define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF | ||
1478 | #define lpfc_reg_fcfi_rctl_mask3_WORD word7 | ||
1479 | uint32_t word8; | ||
1480 | #define lpfc_reg_fcfi_mam_SHIFT 13 | ||
1481 | #define lpfc_reg_fcfi_mam_MASK 0x00000003 | ||
1482 | #define lpfc_reg_fcfi_mam_WORD word8 | ||
1483 | #define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ | ||
1484 | #define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ | ||
1485 | #define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ | ||
1486 | #define lpfc_reg_fcfi_vv_SHIFT 12 | ||
1487 | #define lpfc_reg_fcfi_vv_MASK 0x00000001 | ||
1488 | #define lpfc_reg_fcfi_vv_WORD word8 | ||
1489 | #define lpfc_reg_fcfi_vlan_tag_SHIFT 0 | ||
1490 | #define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF | ||
1491 | #define lpfc_reg_fcfi_vlan_tag_WORD word8 | ||
1492 | }; | ||
1493 | |||
1494 | struct lpfc_mbx_unreg_fcfi { | ||
1495 | uint32_t word1_rsv; | ||
1496 | uint32_t word2; | ||
1497 | #define lpfc_unreg_fcfi_SHIFT 0 | ||
1498 | #define lpfc_unreg_fcfi_MASK 0x0000FFFF | ||
1499 | #define lpfc_unreg_fcfi_WORD word2 | ||
1500 | }; | ||
1501 | |||
1502 | struct lpfc_mbx_read_rev { | ||
1503 | uint32_t word1; | ||
1504 | #define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 | ||
1505 | #define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F | ||
1506 | #define lpfc_mbx_rd_rev_sli_lvl_WORD word1 | ||
1507 | #define lpfc_mbx_rd_rev_fcoe_SHIFT 20 | ||
1508 | #define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 | ||
1509 | #define lpfc_mbx_rd_rev_fcoe_WORD word1 | ||
1510 | #define lpfc_mbx_rd_rev_vpd_SHIFT 29 | ||
1511 | #define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 | ||
1512 | #define lpfc_mbx_rd_rev_vpd_WORD word1 | ||
1513 | uint32_t first_hw_rev; | ||
1514 | uint32_t second_hw_rev; | ||
1515 | uint32_t word4_rsvd; | ||
1516 | uint32_t third_hw_rev; | ||
1517 | uint32_t word6; | ||
1518 | #define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 | ||
1519 | #define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF | ||
1520 | #define lpfc_mbx_rd_rev_fcph_low_WORD word6 | ||
1521 | #define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 | ||
1522 | #define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF | ||
1523 | #define lpfc_mbx_rd_rev_fcph_high_WORD word6 | ||
1524 | #define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 | ||
1525 | #define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF | ||
1526 | #define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 | ||
1527 | #define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 | ||
1528 | #define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF | ||
1529 | #define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 | ||
1530 | uint32_t word7_rsvd; | ||
1531 | uint32_t fw_id_rev; | ||
1532 | uint8_t fw_name[16]; | ||
1533 | uint32_t ulp_fw_id_rev; | ||
1534 | uint8_t ulp_fw_name[16]; | ||
1535 | uint32_t word18_47_rsvd[30]; | ||
1536 | uint32_t word48; | ||
1537 | #define lpfc_mbx_rd_rev_avail_len_SHIFT 0 | ||
1538 | #define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF | ||
1539 | #define lpfc_mbx_rd_rev_avail_len_WORD word48 | ||
1540 | uint32_t vpd_paddr_low; | ||
1541 | uint32_t vpd_paddr_high; | ||
1542 | uint32_t avail_vpd_len; | ||
1543 | uint32_t rsvd_52_63[12]; | ||
1544 | }; | ||
1545 | |||
1546 | struct lpfc_mbx_read_config { | ||
1547 | uint32_t word1; | ||
1548 | #define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 | ||
1549 | #define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF | ||
1550 | #define lpfc_mbx_rd_conf_max_bbc_WORD word1 | ||
1551 | #define lpfc_mbx_rd_conf_init_bbc_SHIFT 8 | ||
1552 | #define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF | ||
1553 | #define lpfc_mbx_rd_conf_init_bbc_WORD word1 | ||
1554 | uint32_t word2; | ||
1555 | #define lpfc_mbx_rd_conf_nport_did_SHIFT 0 | ||
1556 | #define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF | ||
1557 | #define lpfc_mbx_rd_conf_nport_did_WORD word2 | ||
1558 | #define lpfc_mbx_rd_conf_topology_SHIFT 24 | ||
1559 | #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF | ||
1560 | #define lpfc_mbx_rd_conf_topology_WORD word2 | ||
1561 | uint32_t word3; | ||
1562 | #define lpfc_mbx_rd_conf_ao_SHIFT 0 | ||
1563 | #define lpfc_mbx_rd_conf_ao_MASK 0x00000001 | ||
1564 | #define lpfc_mbx_rd_conf_ao_WORD word3 | ||
1565 | #define lpfc_mbx_rd_conf_bb_scn_SHIFT 8 | ||
1566 | #define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F | ||
1567 | #define lpfc_mbx_rd_conf_bb_scn_WORD word3 | ||
1568 | #define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12 | ||
1569 | #define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F | ||
1570 | #define lpfc_mbx_rd_conf_cbb_scn_WORD word3 | ||
1571 | #define lpfc_mbx_rd_conf_mc_SHIFT 29 | ||
1572 | #define lpfc_mbx_rd_conf_mc_MASK 0x00000001 | ||
1573 | #define lpfc_mbx_rd_conf_mc_WORD word3 | ||
1574 | uint32_t word4; | ||
1575 | #define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 | ||
1576 | #define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF | ||
1577 | #define lpfc_mbx_rd_conf_e_d_tov_WORD word4 | ||
1578 | uint32_t word5; | ||
1579 | #define lpfc_mbx_rd_conf_lp_tov_SHIFT 0 | ||
1580 | #define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF | ||
1581 | #define lpfc_mbx_rd_conf_lp_tov_WORD word5 | ||
1582 | uint32_t word6; | ||
1583 | #define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 | ||
1584 | #define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF | ||
1585 | #define lpfc_mbx_rd_conf_r_a_tov_WORD word6 | ||
1586 | uint32_t word7; | ||
1587 | #define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 | ||
1588 | #define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF | ||
1589 | #define lpfc_mbx_rd_conf_r_t_tov_WORD word7 | ||
1590 | uint32_t word8; | ||
1591 | #define lpfc_mbx_rd_conf_al_tov_SHIFT 0 | ||
1592 | #define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F | ||
1593 | #define lpfc_mbx_rd_conf_al_tov_WORD word8 | ||
1594 | uint32_t word9; | ||
1595 | #define lpfc_mbx_rd_conf_lmt_SHIFT 0 | ||
1596 | #define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF | ||
1597 | #define lpfc_mbx_rd_conf_lmt_WORD word9 | ||
1598 | uint32_t word10; | ||
1599 | #define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 | ||
1600 | #define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF | ||
1601 | #define lpfc_mbx_rd_conf_max_alpa_WORD word10 | ||
1602 | uint32_t word11_rsvd; | ||
1603 | uint32_t word12; | ||
1604 | #define lpfc_mbx_rd_conf_xri_base_SHIFT 0 | ||
1605 | #define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF | ||
1606 | #define lpfc_mbx_rd_conf_xri_base_WORD word12 | ||
1607 | #define lpfc_mbx_rd_conf_xri_count_SHIFT 16 | ||
1608 | #define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF | ||
1609 | #define lpfc_mbx_rd_conf_xri_count_WORD word12 | ||
1610 | uint32_t word13; | ||
1611 | #define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 | ||
1612 | #define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF | ||
1613 | #define lpfc_mbx_rd_conf_rpi_base_WORD word13 | ||
1614 | #define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 | ||
1615 | #define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF | ||
1616 | #define lpfc_mbx_rd_conf_rpi_count_WORD word13 | ||
1617 | uint32_t word14; | ||
1618 | #define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 | ||
1619 | #define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF | ||
1620 | #define lpfc_mbx_rd_conf_vpi_base_WORD word14 | ||
1621 | #define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 | ||
1622 | #define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF | ||
1623 | #define lpfc_mbx_rd_conf_vpi_count_WORD word14 | ||
1624 | uint32_t word15; | ||
1625 | #define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 | ||
1626 | #define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF | ||
1627 | #define lpfc_mbx_rd_conf_vfi_base_WORD word15 | ||
1628 | #define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 | ||
1629 | #define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF | ||
1630 | #define lpfc_mbx_rd_conf_vfi_count_WORD word15 | ||
1631 | uint32_t word16; | ||
1632 | #define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0 | ||
1633 | #define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF | ||
1634 | #define lpfc_mbx_rd_conf_fcfi_base_WORD word16 | ||
1635 | #define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 | ||
1636 | #define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF | ||
1637 | #define lpfc_mbx_rd_conf_fcfi_count_WORD word16 | ||
1638 | uint32_t word17; | ||
1639 | #define lpfc_mbx_rd_conf_rq_count_SHIFT 0 | ||
1640 | #define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF | ||
1641 | #define lpfc_mbx_rd_conf_rq_count_WORD word17 | ||
1642 | #define lpfc_mbx_rd_conf_eq_count_SHIFT 16 | ||
1643 | #define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF | ||
1644 | #define lpfc_mbx_rd_conf_eq_count_WORD word17 | ||
1645 | uint32_t word18; | ||
1646 | #define lpfc_mbx_rd_conf_wq_count_SHIFT 0 | ||
1647 | #define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF | ||
1648 | #define lpfc_mbx_rd_conf_wq_count_WORD word18 | ||
1649 | #define lpfc_mbx_rd_conf_cq_count_SHIFT 16 | ||
1650 | #define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF | ||
1651 | #define lpfc_mbx_rd_conf_cq_count_WORD word18 | ||
1652 | }; | ||
1653 | |||
1654 | struct lpfc_mbx_request_features { | ||
1655 | uint32_t word1; | ||
1656 | #define lpfc_mbx_rq_ftr_qry_SHIFT 0 | ||
1657 | #define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 | ||
1658 | #define lpfc_mbx_rq_ftr_qry_WORD word1 | ||
1659 | uint32_t word2; | ||
1660 | #define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 | ||
1661 | #define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 | ||
1662 | #define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 | ||
1663 | #define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 | ||
1664 | #define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 | ||
1665 | #define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 | ||
1666 | #define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 | ||
1667 | #define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 | ||
1668 | #define lpfc_mbx_rq_ftr_rq_dif_WORD word2 | ||
1669 | #define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 | ||
1670 | #define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 | ||
1671 | #define lpfc_mbx_rq_ftr_rq_vf_WORD word2 | ||
1672 | #define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 | ||
1673 | #define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 | ||
1674 | #define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 | ||
1675 | #define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 | ||
1676 | #define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 | ||
1677 | #define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 | ||
1678 | #define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 | ||
1679 | #define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 | ||
1680 | #define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 | ||
1681 | #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 | ||
1682 | #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 | ||
1683 | #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 | ||
1684 | uint32_t word3; | ||
1685 | #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 | ||
1686 | #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 | ||
1687 | #define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 | ||
1688 | #define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 | ||
1689 | #define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 | ||
1690 | #define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 | ||
1691 | #define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 | ||
1692 | #define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 | ||
1693 | #define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 | ||
1694 | #define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 | ||
1695 | #define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 | ||
1696 | #define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 | ||
1697 | #define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 | ||
1698 | #define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 | ||
1699 | #define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 | ||
1700 | #define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 | ||
1701 | #define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 | ||
1702 | #define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 | ||
1703 | #define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 | ||
1704 | #define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 | ||
1705 | #define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 | ||
1706 | #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 | ||
1707 | #define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 | ||
1708 | #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 | ||
1709 | }; | ||
1710 | |||
1711 | /* Mailbox Completion Queue Error Messages */ | ||
1712 | #define MB_CQE_STATUS_SUCCESS 0x0 | ||
1713 | #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 | ||
1714 | #define MB_CQE_STATUS_INVALID_PARAMETER 0x2 | ||
1715 | #define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 | ||
1716 | #define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 | ||
1717 | #define MB_CQE_STATUS_DMA_FAILED 0x5 | ||
1718 | |||
1719 | /* mailbox queue entry structure */ | ||
1720 | struct lpfc_mqe { | ||
1721 | uint32_t word0; | ||
1722 | #define lpfc_mqe_status_SHIFT 16 | ||
1723 | #define lpfc_mqe_status_MASK 0x0000FFFF | ||
1724 | #define lpfc_mqe_status_WORD word0 | ||
1725 | #define lpfc_mqe_command_SHIFT 8 | ||
1726 | #define lpfc_mqe_command_MASK 0x000000FF | ||
1727 | #define lpfc_mqe_command_WORD word0 | ||
1728 | union { | ||
1729 | uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; | ||
1730 | /* sli4 mailbox commands */ | ||
1731 | struct lpfc_mbx_sli4_config sli4_config; | ||
1732 | struct lpfc_mbx_init_vfi init_vfi; | ||
1733 | struct lpfc_mbx_reg_vfi reg_vfi; | ||
1734 | struct lpfc_mbx_reg_vfi unreg_vfi; | ||
1735 | struct lpfc_mbx_init_vpi init_vpi; | ||
1736 | struct lpfc_mbx_resume_rpi resume_rpi; | ||
1737 | struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; | ||
1738 | struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; | ||
1739 | struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; | ||
1740 | struct lpfc_mbx_reg_fcfi reg_fcfi; | ||
1741 | struct lpfc_mbx_unreg_fcfi unreg_fcfi; | ||
1742 | struct lpfc_mbx_mq_create mq_create; | ||
1743 | struct lpfc_mbx_eq_create eq_create; | ||
1744 | struct lpfc_mbx_cq_create cq_create; | ||
1745 | struct lpfc_mbx_wq_create wq_create; | ||
1746 | struct lpfc_mbx_rq_create rq_create; | ||
1747 | struct lpfc_mbx_mq_destroy mq_destroy; | ||
1748 | struct lpfc_mbx_eq_destroy eq_destroy; | ||
1749 | struct lpfc_mbx_cq_destroy cq_destroy; | ||
1750 | struct lpfc_mbx_wq_destroy wq_destroy; | ||
1751 | struct lpfc_mbx_rq_destroy rq_destroy; | ||
1752 | struct lpfc_mbx_post_sgl_pages post_sgl_pages; | ||
1753 | struct lpfc_mbx_nembed_cmd nembed_cmd; | ||
1754 | struct lpfc_mbx_read_rev read_rev; | ||
1755 | struct lpfc_mbx_read_vpi read_vpi; | ||
1756 | struct lpfc_mbx_read_config rd_config; | ||
1757 | struct lpfc_mbx_request_features req_ftrs; | ||
1758 | struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; | ||
1759 | struct lpfc_mbx_nop nop; | ||
1760 | } un; | ||
1761 | }; | ||
1762 | |||
1763 | struct lpfc_mcqe { | ||
1764 | uint32_t word0; | ||
1765 | #define lpfc_mcqe_status_SHIFT 0 | ||
1766 | #define lpfc_mcqe_status_MASK 0x0000FFFF | ||
1767 | #define lpfc_mcqe_status_WORD word0 | ||
1768 | #define lpfc_mcqe_ext_status_SHIFT 16 | ||
1769 | #define lpfc_mcqe_ext_status_MASK 0x0000FFFF | ||
1770 | #define lpfc_mcqe_ext_status_WORD word0 | ||
1771 | uint32_t mcqe_tag0; | ||
1772 | uint32_t mcqe_tag1; | ||
1773 | uint32_t trailer; | ||
1774 | #define lpfc_trailer_valid_SHIFT 31 | ||
1775 | #define lpfc_trailer_valid_MASK 0x00000001 | ||
1776 | #define lpfc_trailer_valid_WORD trailer | ||
1777 | #define lpfc_trailer_async_SHIFT 30 | ||
1778 | #define lpfc_trailer_async_MASK 0x00000001 | ||
1779 | #define lpfc_trailer_async_WORD trailer | ||
1780 | #define lpfc_trailer_hpi_SHIFT 29 | ||
1781 | #define lpfc_trailer_hpi_MASK 0x00000001 | ||
1782 | #define lpfc_trailer_hpi_WORD trailer | ||
1783 | #define lpfc_trailer_completed_SHIFT 28 | ||
1784 | #define lpfc_trailer_completed_MASK 0x00000001 | ||
1785 | #define lpfc_trailer_completed_WORD trailer | ||
1786 | #define lpfc_trailer_consumed_SHIFT 27 | ||
1787 | #define lpfc_trailer_consumed_MASK 0x00000001 | ||
1788 | #define lpfc_trailer_consumed_WORD trailer | ||
1789 | #define lpfc_trailer_type_SHIFT 16 | ||
1790 | #define lpfc_trailer_type_MASK 0x000000FF | ||
1791 | #define lpfc_trailer_type_WORD trailer | ||
1792 | #define lpfc_trailer_code_SHIFT 8 | ||
1793 | #define lpfc_trailer_code_MASK 0x000000FF | ||
1794 | #define lpfc_trailer_code_WORD trailer | ||
1795 | #define LPFC_TRAILER_CODE_LINK 0x1 | ||
1796 | #define LPFC_TRAILER_CODE_FCOE 0x2 | ||
1797 | #define LPFC_TRAILER_CODE_DCBX 0x3 | ||
1798 | }; | ||
1799 | |||
1800 | struct lpfc_acqe_link { | ||
1801 | uint32_t word0; | ||
1802 | #define lpfc_acqe_link_speed_SHIFT 24 | ||
1803 | #define lpfc_acqe_link_speed_MASK 0x000000FF | ||
1804 | #define lpfc_acqe_link_speed_WORD word0 | ||
1805 | #define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 | ||
1806 | #define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 | ||
1807 | #define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 | ||
1808 | #define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 | ||
1809 | #define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 | ||
1810 | #define lpfc_acqe_link_duplex_SHIFT 16 | ||
1811 | #define lpfc_acqe_link_duplex_MASK 0x000000FF | ||
1812 | #define lpfc_acqe_link_duplex_WORD word0 | ||
1813 | #define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 | ||
1814 | #define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 | ||
1815 | #define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 | ||
1816 | #define lpfc_acqe_link_status_SHIFT 8 | ||
1817 | #define lpfc_acqe_link_status_MASK 0x000000FF | ||
1818 | #define lpfc_acqe_link_status_WORD word0 | ||
1819 | #define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 | ||
1820 | #define LPFC_ASYNC_LINK_STATUS_UP 0x1 | ||
1821 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 | ||
1822 | #define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 | ||
1823 | #define lpfc_acqe_link_physical_SHIFT 0 | ||
1824 | #define lpfc_acqe_link_physical_MASK 0x000000FF | ||
1825 | #define lpfc_acqe_link_physical_WORD word0 | ||
1826 | #define LPFC_ASYNC_LINK_PORT_A 0x0 | ||
1827 | #define LPFC_ASYNC_LINK_PORT_B 0x1 | ||
1828 | uint32_t word1; | ||
1829 | #define lpfc_acqe_link_fault_SHIFT 0 | ||
1830 | #define lpfc_acqe_link_fault_MASK 0x000000FF | ||
1831 | #define lpfc_acqe_link_fault_WORD word1 | ||
1832 | #define LPFC_ASYNC_LINK_FAULT_NONE 0x0 | ||
1833 | #define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 | ||
1834 | #define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 | ||
1835 | uint32_t event_tag; | ||
1836 | uint32_t trailer; | ||
1837 | }; | ||
1838 | |||
1839 | struct lpfc_acqe_fcoe { | ||
1840 | uint32_t fcf_index; | ||
1841 | uint32_t word1; | ||
1842 | #define lpfc_acqe_fcoe_fcf_count_SHIFT 0 | ||
1843 | #define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF | ||
1844 | #define lpfc_acqe_fcoe_fcf_count_WORD word1 | ||
1845 | #define lpfc_acqe_fcoe_event_type_SHIFT 16 | ||
1846 | #define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF | ||
1847 | #define lpfc_acqe_fcoe_event_type_WORD word1 | ||
1848 | #define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 | ||
1849 | #define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 | ||
1850 | #define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 | ||
1851 | uint32_t event_tag; | ||
1852 | uint32_t trailer; | ||
1853 | }; | ||
1854 | |||
1855 | struct lpfc_acqe_dcbx { | ||
1856 | uint32_t tlv_ttl; | ||
1857 | uint32_t reserved; | ||
1858 | uint32_t event_tag; | ||
1859 | uint32_t trailer; | ||
1860 | }; | ||
1861 | |||
1862 | /* | ||
1863 | * Define the bootstrap mailbox (bmbx) region used to communicate | ||
1864 | * mailbox command between the host and port. The mailbox consists | ||
1865 | * of a payload area of 256 bytes and a completion queue of length | ||
1866 | * 16 bytes. | ||
1867 | */ | ||
1868 | struct lpfc_bmbx_create { | ||
1869 | struct lpfc_mqe mqe; | ||
1870 | struct lpfc_mcqe mcqe; | ||
1871 | }; | ||
1872 | |||
1873 | #define SGL_ALIGN_SZ 64 | ||
1874 | #define SGL_PAGE_SIZE 4096 | ||
1875 | /* align SGL addr on a size boundary - adjust address up */ | ||
1876 | #define NO_XRI ((uint16_t)-1) | ||
1877 | struct wqe_common { | ||
1878 | uint32_t word6; | ||
1879 | #define wqe_xri_SHIFT 0 | ||
1880 | #define wqe_xri_MASK 0x0000FFFF | ||
1881 | #define wqe_xri_WORD word6 | ||
1882 | #define wqe_ctxt_tag_SHIFT 16 | ||
1883 | #define wqe_ctxt_tag_MASK 0x0000FFFF | ||
1884 | #define wqe_ctxt_tag_WORD word6 | ||
1885 | uint32_t word7; | ||
1886 | #define wqe_ct_SHIFT 2 | ||
1887 | #define wqe_ct_MASK 0x00000003 | ||
1888 | #define wqe_ct_WORD word7 | ||
1889 | #define wqe_status_SHIFT 4 | ||
1890 | #define wqe_status_MASK 0x0000000f | ||
1891 | #define wqe_status_WORD word7 | ||
1892 | #define wqe_cmnd_SHIFT 8 | ||
1893 | #define wqe_cmnd_MASK 0x000000ff | ||
1894 | #define wqe_cmnd_WORD word7 | ||
1895 | #define wqe_class_SHIFT 16 | ||
1896 | #define wqe_class_MASK 0x00000007 | ||
1897 | #define wqe_class_WORD word7 | ||
1898 | #define wqe_pu_SHIFT 20 | ||
1899 | #define wqe_pu_MASK 0x00000003 | ||
1900 | #define wqe_pu_WORD word7 | ||
1901 | #define wqe_erp_SHIFT 22 | ||
1902 | #define wqe_erp_MASK 0x00000001 | ||
1903 | #define wqe_erp_WORD word7 | ||
1904 | #define wqe_lnk_SHIFT 23 | ||
1905 | #define wqe_lnk_MASK 0x00000001 | ||
1906 | #define wqe_lnk_WORD word7 | ||
1907 | #define wqe_tmo_SHIFT 24 | ||
1908 | #define wqe_tmo_MASK 0x000000ff | ||
1909 | #define wqe_tmo_WORD word7 | ||
1910 | uint32_t abort_tag; /* word 8 in WQE */ | ||
1911 | uint32_t word9; | ||
1912 | #define wqe_reqtag_SHIFT 0 | ||
1913 | #define wqe_reqtag_MASK 0x0000FFFF | ||
1914 | #define wqe_reqtag_WORD word9 | ||
1915 | #define wqe_rcvoxid_SHIFT 16 | ||
1916 | #define wqe_rcvoxid_MASK 0x0000FFFF | ||
1917 | #define wqe_rcvoxid_WORD word9 | ||
1918 | uint32_t word10; | ||
1919 | #define wqe_pri_SHIFT 16 | ||
1920 | #define wqe_pri_MASK 0x00000007 | ||
1921 | #define wqe_pri_WORD word10 | ||
1922 | #define wqe_pv_SHIFT 19 | ||
1923 | #define wqe_pv_MASK 0x00000001 | ||
1924 | #define wqe_pv_WORD word10 | ||
1925 | #define wqe_xc_SHIFT 21 | ||
1926 | #define wqe_xc_MASK 0x00000001 | ||
1927 | #define wqe_xc_WORD word10 | ||
1928 | #define wqe_ccpe_SHIFT 23 | ||
1929 | #define wqe_ccpe_MASK 0x00000001 | ||
1930 | #define wqe_ccpe_WORD word10 | ||
1931 | #define wqe_ccp_SHIFT 24 | ||
1932 | #define wqe_ccp_MASK 0x000000ff | ||
1933 | #define wqe_ccp_WORD word10 | ||
1934 | uint32_t word11; | ||
1935 | #define wqe_cmd_type_SHIFT 0 | ||
1936 | #define wqe_cmd_type_MASK 0x0000000f | ||
1937 | #define wqe_cmd_type_WORD word11 | ||
1938 | #define wqe_wqec_SHIFT 7 | ||
1939 | #define wqe_wqec_MASK 0x00000001 | ||
1940 | #define wqe_wqec_WORD word11 | ||
1941 | #define wqe_cqid_SHIFT 16 | ||
1942 | #define wqe_cqid_MASK 0x000003ff | ||
1943 | #define wqe_cqid_WORD word11 | ||
1944 | }; | ||
1945 | |||
1946 | struct wqe_did { | ||
1947 | uint32_t word5; | ||
1948 | #define wqe_els_did_SHIFT 0 | ||
1949 | #define wqe_els_did_MASK 0x00FFFFFF | ||
1950 | #define wqe_els_did_WORD word5 | ||
1951 | #define wqe_xmit_bls_ar_SHIFT 30 | ||
1952 | #define wqe_xmit_bls_ar_MASK 0x00000001 | ||
1953 | #define wqe_xmit_bls_ar_WORD word5 | ||
1954 | #define wqe_xmit_bls_xo_SHIFT 31 | ||
1955 | #define wqe_xmit_bls_xo_MASK 0x00000001 | ||
1956 | #define wqe_xmit_bls_xo_WORD word5 | ||
1957 | }; | ||
1958 | |||
1959 | struct els_request64_wqe { | ||
1960 | struct ulp_bde64 bde; | ||
1961 | uint32_t payload_len; | ||
1962 | uint32_t word4; | ||
1963 | #define els_req64_sid_SHIFT 0 | ||
1964 | #define els_req64_sid_MASK 0x00FFFFFF | ||
1965 | #define els_req64_sid_WORD word4 | ||
1966 | #define els_req64_sp_SHIFT 24 | ||
1967 | #define els_req64_sp_MASK 0x00000001 | ||
1968 | #define els_req64_sp_WORD word4 | ||
1969 | #define els_req64_vf_SHIFT 25 | ||
1970 | #define els_req64_vf_MASK 0x00000001 | ||
1971 | #define els_req64_vf_WORD word4 | ||
1972 | struct wqe_did wqe_dest; | ||
1973 | struct wqe_common wqe_com; /* words 6-11 */ | ||
1974 | uint32_t word12; | ||
1975 | #define els_req64_vfid_SHIFT 1 | ||
1976 | #define els_req64_vfid_MASK 0x00000FFF | ||
1977 | #define els_req64_vfid_WORD word12 | ||
1978 | #define els_req64_pri_SHIFT 13 | ||
1979 | #define els_req64_pri_MASK 0x00000007 | ||
1980 | #define els_req64_pri_WORD word12 | ||
1981 | uint32_t word13; | ||
1982 | #define els_req64_hopcnt_SHIFT 24 | ||
1983 | #define els_req64_hopcnt_MASK 0x000000ff | ||
1984 | #define els_req64_hopcnt_WORD word13 | ||
1985 | uint32_t reserved[2]; | ||
1986 | }; | ||
1987 | |||
1988 | struct xmit_els_rsp64_wqe { | ||
1989 | struct ulp_bde64 bde; | ||
1990 | uint32_t rsvd3; | ||
1991 | uint32_t rsvd4; | ||
1992 | struct wqe_did wqe_dest; | ||
1993 | struct wqe_common wqe_com; /* words 6-11 */ | ||
1994 | uint32_t rsvd_12_15[4]; | ||
1995 | }; | ||
1996 | |||
1997 | struct xmit_bls_rsp64_wqe { | ||
1998 | uint32_t payload0; | ||
1999 | uint32_t word1; | ||
2000 | #define xmit_bls_rsp64_rxid_SHIFT 0 | ||
2001 | #define xmit_bls_rsp64_rxid_MASK 0x0000ffff | ||
2002 | #define xmit_bls_rsp64_rxid_WORD word1 | ||
2003 | #define xmit_bls_rsp64_oxid_SHIFT 16 | ||
2004 | #define xmit_bls_rsp64_oxid_MASK 0x0000ffff | ||
2005 | #define xmit_bls_rsp64_oxid_WORD word1 | ||
2006 | uint32_t word2; | ||
2007 | #define xmit_bls_rsp64_seqcntlo_SHIFT 0 | ||
2008 | #define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff | ||
2009 | #define xmit_bls_rsp64_seqcntlo_WORD word2 | ||
2010 | #define xmit_bls_rsp64_seqcnthi_SHIFT 16 | ||
2011 | #define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff | ||
2012 | #define xmit_bls_rsp64_seqcnthi_WORD word2 | ||
2013 | uint32_t rsrvd3; | ||
2014 | uint32_t rsrvd4; | ||
2015 | struct wqe_did wqe_dest; | ||
2016 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2017 | uint32_t rsvd_12_15[4]; | ||
2018 | }; | ||
2019 | struct wqe_rctl_dfctl { | ||
2020 | uint32_t word5; | ||
2021 | #define wqe_si_SHIFT 2 | ||
2022 | #define wqe_si_MASK 0x000000001 | ||
2023 | #define wqe_si_WORD word5 | ||
2024 | #define wqe_la_SHIFT 3 | ||
2025 | #define wqe_la_MASK 0x000000001 | ||
2026 | #define wqe_la_WORD word5 | ||
2027 | #define wqe_ls_SHIFT 7 | ||
2028 | #define wqe_ls_MASK 0x000000001 | ||
2029 | #define wqe_ls_WORD word5 | ||
2030 | #define wqe_dfctl_SHIFT 8 | ||
2031 | #define wqe_dfctl_MASK 0x0000000ff | ||
2032 | #define wqe_dfctl_WORD word5 | ||
2033 | #define wqe_type_SHIFT 16 | ||
2034 | #define wqe_type_MASK 0x0000000ff | ||
2035 | #define wqe_type_WORD word5 | ||
2036 | #define wqe_rctl_SHIFT 24 | ||
2037 | #define wqe_rctl_MASK 0x0000000ff | ||
2038 | #define wqe_rctl_WORD word5 | ||
2039 | }; | ||
2040 | |||
2041 | struct xmit_seq64_wqe { | ||
2042 | struct ulp_bde64 bde; | ||
2043 | uint32_t paylaod_offset; | ||
2044 | uint32_t relative_offset; | ||
2045 | struct wqe_rctl_dfctl wge_ctl; | ||
2046 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2047 | /* Note: word10 different REVISIT */ | ||
2048 | uint32_t xmit_len; | ||
2049 | uint32_t rsvd_12_15[3]; | ||
2050 | }; | ||
2051 | struct xmit_bcast64_wqe { | ||
2052 | struct ulp_bde64 bde; | ||
2053 | uint32_t paylaod_len; | ||
2054 | uint32_t rsvd4; | ||
2055 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ | ||
2056 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2057 | uint32_t rsvd_12_15[4]; | ||
2058 | }; | ||
2059 | |||
2060 | struct gen_req64_wqe { | ||
2061 | struct ulp_bde64 bde; | ||
2062 | uint32_t command_len; | ||
2063 | uint32_t payload_len; | ||
2064 | struct wqe_rctl_dfctl wge_ctl; /* word 5 */ | ||
2065 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2066 | uint32_t rsvd_12_15[4]; | ||
2067 | }; | ||
2068 | |||
2069 | struct create_xri_wqe { | ||
2070 | uint32_t rsrvd[5]; /* words 0-4 */ | ||
2071 | struct wqe_did wqe_dest; /* word 5 */ | ||
2072 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2073 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | ||
2074 | }; | ||
2075 | |||
2076 | #define T_REQUEST_TAG 3 | ||
2077 | #define T_XRI_TAG 1 | ||
2078 | |||
2079 | struct abort_cmd_wqe { | ||
2080 | uint32_t rsrvd[3]; | ||
2081 | uint32_t word3; | ||
2082 | #define abort_cmd_ia_SHIFT 0 | ||
2083 | #define abort_cmd_ia_MASK 0x000000001 | ||
2084 | #define abort_cmd_ia_WORD word3 | ||
2085 | #define abort_cmd_criteria_SHIFT 8 | ||
2086 | #define abort_cmd_criteria_MASK 0x0000000ff | ||
2087 | #define abort_cmd_criteria_WORD word3 | ||
2088 | uint32_t rsrvd4; | ||
2089 | uint32_t rsrvd5; | ||
2090 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2091 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | ||
2092 | }; | ||
2093 | |||
2094 | struct fcp_iwrite64_wqe { | ||
2095 | struct ulp_bde64 bde; | ||
2096 | uint32_t payload_len; | ||
2097 | uint32_t total_xfer_len; | ||
2098 | uint32_t initial_xfer_len; | ||
2099 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2100 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | ||
2101 | }; | ||
2102 | |||
2103 | struct fcp_iread64_wqe { | ||
2104 | struct ulp_bde64 bde; | ||
2105 | uint32_t payload_len; /* word 3 */ | ||
2106 | uint32_t total_xfer_len; /* word 4 */ | ||
2107 | uint32_t rsrvd5; /* word 5 */ | ||
2108 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2109 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | ||
2110 | }; | ||
2111 | |||
2112 | struct fcp_icmnd64_wqe { | ||
2113 | struct ulp_bde64 bde; /* words 0-2 */ | ||
2114 | uint32_t rsrvd[3]; /* words 3-5 */ | ||
2115 | struct wqe_common wqe_com; /* words 6-11 */ | ||
2116 | uint32_t rsvd_12_15[4]; /* word 12-15 */ | ||
2117 | }; | ||
2118 | |||
2119 | |||
2120 | union lpfc_wqe { | ||
2121 | uint32_t words[16]; | ||
2122 | struct lpfc_wqe_generic generic; | ||
2123 | struct fcp_icmnd64_wqe fcp_icmd; | ||
2124 | struct fcp_iread64_wqe fcp_iread; | ||
2125 | struct fcp_iwrite64_wqe fcp_iwrite; | ||
2126 | struct abort_cmd_wqe abort_cmd; | ||
2127 | struct create_xri_wqe create_xri; | ||
2128 | struct xmit_bcast64_wqe xmit_bcast64; | ||
2129 | struct xmit_seq64_wqe xmit_sequence; | ||
2130 | struct xmit_bls_rsp64_wqe xmit_bls_rsp; | ||
2131 | struct xmit_els_rsp64_wqe xmit_els_rsp; | ||
2132 | struct els_request64_wqe els_req; | ||
2133 | struct gen_req64_wqe gen_req; | ||
2134 | }; | ||
2135 | |||
2136 | #define FCP_COMMAND 0x0 | ||
2137 | #define FCP_COMMAND_DATA_OUT 0x1 | ||
2138 | #define ELS_COMMAND_NON_FIP 0xC | ||
2139 | #define ELS_COMMAND_FIP 0xD | ||
2140 | #define OTHER_COMMAND 0x8 | ||
2141 | |||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 3f06ce2becf5..e9e4a1df8989 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -34,8 +34,10 @@ | |||
34 | #include <scsi/scsi_host.h> | 34 | #include <scsi/scsi_host.h> |
35 | #include <scsi/scsi_transport_fc.h> | 35 | #include <scsi/scsi_transport_fc.h> |
36 | 36 | ||
37 | #include "lpfc_hw4.h" | ||
37 | #include "lpfc_hw.h" | 38 | #include "lpfc_hw.h" |
38 | #include "lpfc_sli.h" | 39 | #include "lpfc_sli.h" |
40 | #include "lpfc_sli4.h" | ||
39 | #include "lpfc_nl.h" | 41 | #include "lpfc_nl.h" |
40 | #include "lpfc_disc.h" | 42 | #include "lpfc_disc.h" |
41 | #include "lpfc_scsi.h" | 43 | #include "lpfc_scsi.h" |
@@ -51,9 +53,23 @@ char *_dump_buf_dif; | |||
51 | unsigned long _dump_buf_dif_order; | 53 | unsigned long _dump_buf_dif_order; |
52 | spinlock_t _dump_buf_lock; | 54 | spinlock_t _dump_buf_lock; |
53 | 55 | ||
54 | static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | ||
55 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); | 56 | static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); |
56 | static int lpfc_post_rcv_buf(struct lpfc_hba *); | 57 | static int lpfc_post_rcv_buf(struct lpfc_hba *); |
58 | static int lpfc_sli4_queue_create(struct lpfc_hba *); | ||
59 | static void lpfc_sli4_queue_destroy(struct lpfc_hba *); | ||
60 | static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); | ||
61 | static int lpfc_setup_endian_order(struct lpfc_hba *); | ||
62 | static int lpfc_sli4_read_config(struct lpfc_hba *); | ||
63 | static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); | ||
64 | static void lpfc_free_sgl_list(struct lpfc_hba *); | ||
65 | static int lpfc_init_sgl_list(struct lpfc_hba *); | ||
66 | static int lpfc_init_active_sgl_array(struct lpfc_hba *); | ||
67 | static void lpfc_free_active_sgl(struct lpfc_hba *); | ||
68 | static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); | ||
69 | static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); | ||
70 | static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); | ||
71 | static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); | ||
72 | static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); | ||
57 | 73 | ||
58 | static struct scsi_transport_template *lpfc_transport_template = NULL; | 74 | static struct scsi_transport_template *lpfc_transport_template = NULL; |
59 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; | 75 | static struct scsi_transport_template *lpfc_vport_transport_template = NULL; |
@@ -646,6 +662,77 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) | |||
646 | 662 | ||
647 | return 0; | 663 | return 0; |
648 | } | 664 | } |
665 | /** | ||
666 | * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset | ||
667 | * @phba: pointer to lpfc HBA data structure. | ||
668 | * | ||
669 | * This routine will do uninitialization after the HBA is reset when bring | ||
670 | * down the SLI Layer. | ||
671 | * | ||
672 | * Return codes | ||
673 | * 0 - sucess. | ||
674 | * Any other value - error. | ||
675 | **/ | ||
676 | static int | ||
677 | lpfc_hba_down_post_s4(struct lpfc_hba *phba) | ||
678 | { | ||
679 | struct lpfc_scsi_buf *psb, *psb_next; | ||
680 | LIST_HEAD(aborts); | ||
681 | int ret; | ||
682 | unsigned long iflag = 0; | ||
683 | ret = lpfc_hba_down_post_s3(phba); | ||
684 | if (ret) | ||
685 | return ret; | ||
686 | /* At this point in time the HBA is either reset or DOA. Either | ||
687 | * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be | ||
688 | * on the lpfc_sgl_list so that it can either be freed if the | ||
689 | * driver is unloading or reposted if the driver is restarting | ||
690 | * the port. | ||
691 | */ | ||
692 | spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ | ||
693 | /* scsl_buf_list */ | ||
694 | /* abts_sgl_list_lock required because worker thread uses this | ||
695 | * list. | ||
696 | */ | ||
697 | spin_lock(&phba->sli4_hba.abts_sgl_list_lock); | ||
698 | list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, | ||
699 | &phba->sli4_hba.lpfc_sgl_list); | ||
700 | spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); | ||
701 | /* abts_scsi_buf_list_lock required because worker thread uses this | ||
702 | * list. | ||
703 | */ | ||
704 | spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); | ||
705 | list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, | ||
706 | &aborts); | ||
707 | spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); | ||
708 | spin_unlock_irq(&phba->hbalock); | ||
709 | |||
710 | list_for_each_entry_safe(psb, psb_next, &aborts, list) { | ||
711 | psb->pCmd = NULL; | ||
712 | psb->status = IOSTAT_SUCCESS; | ||
713 | } | ||
714 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
715 | list_splice(&aborts, &phba->lpfc_scsi_buf_list); | ||
716 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
717 | return 0; | ||
718 | } | ||
719 | |||
720 | /** | ||
721 | * lpfc_hba_down_post - Wrapper func for hba down post routine | ||
722 | * @phba: pointer to lpfc HBA data structure. | ||
723 | * | ||
724 | * This routine wraps the actual SLI3 or SLI4 routine for performing | ||
725 | * uninitialization after the HBA is reset when bring down the SLI Layer. | ||
726 | * | ||
727 | * Return codes | ||
728 | * 0 - sucess. | ||
729 | * Any other value - error. | ||
730 | **/ | ||
731 | int | ||
732 | lpfc_hba_down_post(struct lpfc_hba *phba) | ||
733 | { | ||
734 | return (*phba->lpfc_hba_down_post)(phba); | ||
735 | } | ||
649 | 736 | ||
650 | /** | 737 | /** |
651 | * lpfc_hb_timeout - The HBA-timer timeout handler | 738 | * lpfc_hb_timeout - The HBA-timer timeout handler |
@@ -853,6 +940,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba) | |||
853 | } | 940 | } |
854 | 941 | ||
855 | /** | 942 | /** |
943 | * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention | ||
944 | * @phba: pointer to lpfc hba data structure. | ||
945 | * | ||
946 | * This routine is called to bring a SLI4 HBA offline when HBA hardware error | ||
947 | * other than Port Error 6 has been detected. | ||
948 | **/ | ||
949 | static void | ||
950 | lpfc_sli4_offline_eratt(struct lpfc_hba *phba) | ||
951 | { | ||
952 | lpfc_offline_prep(phba); | ||
953 | lpfc_offline(phba); | ||
954 | lpfc_sli4_brdreset(phba); | ||
955 | lpfc_hba_down_post(phba); | ||
956 | lpfc_sli4_post_status_check(phba); | ||
957 | lpfc_unblock_mgmt_io(phba); | ||
958 | phba->link_state = LPFC_HBA_ERROR; | ||
959 | } | ||
960 | |||
961 | /** | ||
856 | * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler | 962 | * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler |
857 | * @phba: pointer to lpfc hba data structure. | 963 | * @phba: pointer to lpfc hba data structure. |
858 | * | 964 | * |
@@ -1057,6 +1163,65 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) | |||
1057 | } | 1163 | } |
1058 | 1164 | ||
1059 | /** | 1165 | /** |
1166 | * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler | ||
1167 | * @phba: pointer to lpfc hba data structure. | ||
1168 | * | ||
1169 | * This routine is invoked to handle the SLI4 HBA hardware error attention | ||
1170 | * conditions. | ||
1171 | **/ | ||
1172 | static void | ||
1173 | lpfc_handle_eratt_s4(struct lpfc_hba *phba) | ||
1174 | { | ||
1175 | struct lpfc_vport *vport = phba->pport; | ||
1176 | uint32_t event_data; | ||
1177 | struct Scsi_Host *shost; | ||
1178 | |||
1179 | /* If the pci channel is offline, ignore possible errors, since | ||
1180 | * we cannot communicate with the pci card anyway. | ||
1181 | */ | ||
1182 | if (pci_channel_offline(phba->pcidev)) | ||
1183 | return; | ||
1184 | /* If resets are disabled then leave the HBA alone and return */ | ||
1185 | if (!phba->cfg_enable_hba_reset) | ||
1186 | return; | ||
1187 | |||
1188 | /* Send an internal error event to mgmt application */ | ||
1189 | lpfc_board_errevt_to_mgmt(phba); | ||
1190 | |||
1191 | /* For now, the actual action for SLI4 device handling is not | ||
1192 | * specified yet, just treated it as adaptor hardware failure | ||
1193 | */ | ||
1194 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
1195 | "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", | ||
1196 | phba->work_status[0], phba->work_status[1]); | ||
1197 | |||
1198 | event_data = FC_REG_DUMP_EVENT; | ||
1199 | shost = lpfc_shost_from_vport(vport); | ||
1200 | fc_host_post_vendor_event(shost, fc_get_event_number(), | ||
1201 | sizeof(event_data), (char *) &event_data, | ||
1202 | SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); | ||
1203 | |||
1204 | lpfc_sli4_offline_eratt(phba); | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * lpfc_handle_eratt - Wrapper func for handling hba error attention | ||
1209 | * @phba: pointer to lpfc HBA data structure. | ||
1210 | * | ||
1211 | * This routine wraps the actual SLI3 or SLI4 hba error attention handling | ||
1212 | * routine from the API jump table function pointer from the lpfc_hba struct. | ||
1213 | * | ||
1214 | * Return codes | ||
1215 | * 0 - sucess. | ||
1216 | * Any other value - error. | ||
1217 | **/ | ||
1218 | void | ||
1219 | lpfc_handle_eratt(struct lpfc_hba *phba) | ||
1220 | { | ||
1221 | (*phba->lpfc_handle_eratt)(phba); | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1060 | * lpfc_handle_latt - The HBA link event handler | 1225 | * lpfc_handle_latt - The HBA link event handler |
1061 | * @phba: pointer to lpfc hba data structure. | 1226 | * @phba: pointer to lpfc hba data structure. |
1062 | * | 1227 | * |
@@ -1312,6 +1477,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1312 | uint16_t dev_id = phba->pcidev->device; | 1477 | uint16_t dev_id = phba->pcidev->device; |
1313 | int max_speed; | 1478 | int max_speed; |
1314 | int GE = 0; | 1479 | int GE = 0; |
1480 | int oneConnect = 0; /* default is not a oneConnect */ | ||
1315 | struct { | 1481 | struct { |
1316 | char * name; | 1482 | char * name; |
1317 | int max_speed; | 1483 | int max_speed; |
@@ -1457,6 +1623,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1457 | case PCI_DEVICE_ID_PROTEUS_S: | 1623 | case PCI_DEVICE_ID_PROTEUS_S: |
1458 | m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; | 1624 | m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; |
1459 | break; | 1625 | break; |
1626 | case PCI_DEVICE_ID_TIGERSHARK: | ||
1627 | oneConnect = 1; | ||
1628 | m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; | ||
1629 | break; | ||
1630 | case PCI_DEVICE_ID_TIGERSHARK_S: | ||
1631 | oneConnect = 1; | ||
1632 | m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; | ||
1633 | break; | ||
1460 | default: | 1634 | default: |
1461 | m = (typeof(m)){ NULL }; | 1635 | m = (typeof(m)){ NULL }; |
1462 | break; | 1636 | break; |
@@ -1464,13 +1638,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) | |||
1464 | 1638 | ||
1465 | if (mdp && mdp[0] == '\0') | 1639 | if (mdp && mdp[0] == '\0') |
1466 | snprintf(mdp, 79,"%s", m.name); | 1640 | snprintf(mdp, 79,"%s", m.name); |
1467 | if (descp && descp[0] == '\0') | 1641 | /* oneConnect hba requires special processing, they are all initiators |
1468 | snprintf(descp, 255, | 1642 | * and we put the port number on the end |
1469 | "Emulex %s %d%s %s %s", | 1643 | */ |
1470 | m.name, m.max_speed, | 1644 | if (descp && descp[0] == '\0') { |
1471 | (GE) ? "GE" : "Gb", | 1645 | if (oneConnect) |
1472 | m.bus, | 1646 | snprintf(descp, 255, |
1473 | (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); | 1647 | "Emulex OneConnect %s, FCoE Initiator, Port %s", |
1648 | m.name, | ||
1649 | phba->Port); | ||
1650 | else | ||
1651 | snprintf(descp, 255, | ||
1652 | "Emulex %s %d%s %s %s", | ||
1653 | m.name, m.max_speed, | ||
1654 | (GE) ? "GE" : "Gb", | ||
1655 | m.bus, | ||
1656 | (GE) ? "FCoE Adapter" : | ||
1657 | "Fibre Channel Adapter"); | ||
1658 | } | ||
1474 | } | 1659 | } |
1475 | 1660 | ||
1476 | /** | 1661 | /** |
@@ -1911,14 +2096,21 @@ lpfc_online(struct lpfc_hba *phba) | |||
1911 | return 1; | 2096 | return 1; |
1912 | } | 2097 | } |
1913 | 2098 | ||
1914 | if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ | 2099 | if (phba->sli_rev == LPFC_SLI_REV4) { |
1915 | lpfc_unblock_mgmt_io(phba); | 2100 | if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ |
1916 | return 1; | 2101 | lpfc_unblock_mgmt_io(phba); |
2102 | return 1; | ||
2103 | } | ||
2104 | } else { | ||
2105 | if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ | ||
2106 | lpfc_unblock_mgmt_io(phba); | ||
2107 | return 1; | ||
2108 | } | ||
1917 | } | 2109 | } |
1918 | 2110 | ||
1919 | vports = lpfc_create_vport_work_array(phba); | 2111 | vports = lpfc_create_vport_work_array(phba); |
1920 | if (vports != NULL) | 2112 | if (vports != NULL) |
1921 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 2113 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
1922 | struct Scsi_Host *shost; | 2114 | struct Scsi_Host *shost; |
1923 | shost = lpfc_shost_from_vport(vports[i]); | 2115 | shost = lpfc_shost_from_vport(vports[i]); |
1924 | spin_lock_irq(shost->host_lock); | 2116 | spin_lock_irq(shost->host_lock); |
@@ -1980,11 +2172,12 @@ lpfc_offline_prep(struct lpfc_hba * phba) | |||
1980 | /* Issue an unreg_login to all nodes on all vports */ | 2172 | /* Issue an unreg_login to all nodes on all vports */ |
1981 | vports = lpfc_create_vport_work_array(phba); | 2173 | vports = lpfc_create_vport_work_array(phba); |
1982 | if (vports != NULL) { | 2174 | if (vports != NULL) { |
1983 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 2175 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
1984 | struct Scsi_Host *shost; | 2176 | struct Scsi_Host *shost; |
1985 | 2177 | ||
1986 | if (vports[i]->load_flag & FC_UNLOADING) | 2178 | if (vports[i]->load_flag & FC_UNLOADING) |
1987 | continue; | 2179 | continue; |
2180 | vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; | ||
1988 | shost = lpfc_shost_from_vport(vports[i]); | 2181 | shost = lpfc_shost_from_vport(vports[i]); |
1989 | list_for_each_entry_safe(ndlp, next_ndlp, | 2182 | list_for_each_entry_safe(ndlp, next_ndlp, |
1990 | &vports[i]->fc_nodes, | 2183 | &vports[i]->fc_nodes, |
@@ -2029,11 +2222,11 @@ lpfc_offline(struct lpfc_hba *phba) | |||
2029 | if (phba->pport->fc_flag & FC_OFFLINE_MODE) | 2222 | if (phba->pport->fc_flag & FC_OFFLINE_MODE) |
2030 | return; | 2223 | return; |
2031 | 2224 | ||
2032 | /* stop all timers associated with this hba */ | 2225 | /* stop port and all timers associated with this hba */ |
2033 | lpfc_stop_phba_timers(phba); | 2226 | lpfc_stop_port(phba); |
2034 | vports = lpfc_create_vport_work_array(phba); | 2227 | vports = lpfc_create_vport_work_array(phba); |
2035 | if (vports != NULL) | 2228 | if (vports != NULL) |
2036 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) | 2229 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) |
2037 | lpfc_stop_vport_timers(vports[i]); | 2230 | lpfc_stop_vport_timers(vports[i]); |
2038 | lpfc_destroy_vport_work_array(phba, vports); | 2231 | lpfc_destroy_vport_work_array(phba, vports); |
2039 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 2232 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, |
@@ -2046,7 +2239,7 @@ lpfc_offline(struct lpfc_hba *phba) | |||
2046 | spin_unlock_irq(&phba->hbalock); | 2239 | spin_unlock_irq(&phba->hbalock); |
2047 | vports = lpfc_create_vport_work_array(phba); | 2240 | vports = lpfc_create_vport_work_array(phba); |
2048 | if (vports != NULL) | 2241 | if (vports != NULL) |
2049 | for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { | 2242 | for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { |
2050 | shost = lpfc_shost_from_vport(vports[i]); | 2243 | shost = lpfc_shost_from_vport(vports[i]); |
2051 | spin_lock_irq(shost->host_lock); | 2244 | spin_lock_irq(shost->host_lock); |
2052 | vports[i]->work_port_events = 0; | 2245 | vports[i]->work_port_events = 0; |
@@ -2139,6 +2332,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
2139 | shost->max_lun = vport->cfg_max_luns; | 2332 | shost->max_lun = vport->cfg_max_luns; |
2140 | shost->this_id = -1; | 2333 | shost->this_id = -1; |
2141 | shost->max_cmd_len = 16; | 2334 | shost->max_cmd_len = 16; |
2335 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
2336 | shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; | ||
2337 | shost->sg_tablesize = phba->cfg_sg_seg_cnt; | ||
2338 | } | ||
2142 | 2339 | ||
2143 | /* | 2340 | /* |
2144 | * Set initial can_queue value since 0 is no longer supported and | 2341 | * Set initial can_queue value since 0 is no longer supported and |
@@ -2156,6 +2353,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) | |||
2156 | 2353 | ||
2157 | /* Initialize all internally managed lists. */ | 2354 | /* Initialize all internally managed lists. */ |
2158 | INIT_LIST_HEAD(&vport->fc_nodes); | 2355 | INIT_LIST_HEAD(&vport->fc_nodes); |
2356 | INIT_LIST_HEAD(&vport->rcv_buffer_list); | ||
2159 | spin_lock_init(&vport->work_port_lock); | 2357 | spin_lock_init(&vport->work_port_lock); |
2160 | 2358 | ||
2161 | init_timer(&vport->fc_disctmo); | 2359 | init_timer(&vport->fc_disctmo); |
@@ -2347,192 +2545,501 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) | |||
2347 | } | 2545 | } |
2348 | 2546 | ||
2349 | /** | 2547 | /** |
2350 | * lpfc_enable_msix - Enable MSI-X interrupt mode | 2548 | * lpfc_stop_port_s3 - Stop SLI3 device port |
2351 | * @phba: pointer to lpfc hba data structure. | 2549 | * @phba: pointer to lpfc hba data structure. |
2352 | * | 2550 | * |
2353 | * This routine is invoked to enable the MSI-X interrupt vectors. The kernel | 2551 | * This routine is invoked to stop an SLI3 device port, it stops the device |
2354 | * function pci_enable_msix() is called to enable the MSI-X vectors. Note that | 2552 | * from generating interrupts and stops the device driver's timers for the |
2355 | * pci_enable_msix(), once invoked, enables either all or nothing, depending | 2553 | * device. |
2356 | * on the current availability of PCI vector resources. The device driver is | 2554 | **/ |
2357 | * responsible for calling the individual request_irq() to register each MSI-X | 2555 | static void |
2358 | * vector with a interrupt handler, which is done in this function. Note that | 2556 | lpfc_stop_port_s3(struct lpfc_hba *phba) |
2359 | * later when device is unloading, the driver should always call free_irq() | 2557 | { |
2360 | * on all MSI-X vectors it has done request_irq() on before calling | 2558 | /* Clear all interrupt enable conditions */ |
2361 | * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device | 2559 | writel(0, phba->HCregaddr); |
2362 | * will be left with MSI-X enabled and leaks its vectors. | 2560 | readl(phba->HCregaddr); /* flush */ |
2561 | /* Clear all pending interrupts */ | ||
2562 | writel(0xffffffff, phba->HAregaddr); | ||
2563 | readl(phba->HAregaddr); /* flush */ | ||
2564 | |||
2565 | /* Reset some HBA SLI setup states */ | ||
2566 | lpfc_stop_hba_timers(phba); | ||
2567 | phba->pport->work_port_events = 0; | ||
2568 | } | ||
2569 | |||
2570 | /** | ||
2571 | * lpfc_stop_port_s4 - Stop SLI4 device port | ||
2572 | * @phba: pointer to lpfc hba data structure. | ||
2363 | * | 2573 | * |
2364 | * Return codes | 2574 | * This routine is invoked to stop an SLI4 device port, it stops the device |
2365 | * 0 - sucessful | 2575 | * from generating interrupts and stops the device driver's timers for the |
2366 | * other values - error | 2576 | * device. |
2367 | **/ | 2577 | **/ |
2368 | static int | 2578 | static void |
2369 | lpfc_enable_msix(struct lpfc_hba *phba) | 2579 | lpfc_stop_port_s4(struct lpfc_hba *phba) |
2370 | { | 2580 | { |
2371 | int rc, i; | 2581 | /* Reset some HBA SLI4 setup states */ |
2372 | LPFC_MBOXQ_t *pmb; | 2582 | lpfc_stop_hba_timers(phba); |
2583 | phba->pport->work_port_events = 0; | ||
2584 | phba->sli4_hba.intr_enable = 0; | ||
2585 | /* Hard clear it for now, shall have more graceful way to wait later */ | ||
2586 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
2587 | } | ||
2373 | 2588 | ||
2374 | /* Set up MSI-X multi-message vectors */ | 2589 | /** |
2375 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | 2590 | * lpfc_stop_port - Wrapper function for stopping hba port |
2376 | phba->msix_entries[i].entry = i; | 2591 | * @phba: Pointer to HBA context object. |
2592 | * | ||
2593 | * This routine wraps the actual SLI3 or SLI4 hba stop port routine from | ||
2594 | * the API jump table function pointer from the lpfc_hba struct. | ||
2595 | **/ | ||
2596 | void | ||
2597 | lpfc_stop_port(struct lpfc_hba *phba) | ||
2598 | { | ||
2599 | phba->lpfc_stop_port(phba); | ||
2600 | } | ||
2377 | 2601 | ||
2378 | /* Configure MSI-X capability structure */ | 2602 | /** |
2379 | rc = pci_enable_msix(phba->pcidev, phba->msix_entries, | 2603 | * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. |
2380 | ARRAY_SIZE(phba->msix_entries)); | 2604 | * @phba: pointer to lpfc hba data structure. |
2381 | if (rc) { | 2605 | * |
2382 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2606 | * This routine is invoked to remove the driver default fcf record from |
2383 | "0420 PCI enable MSI-X failed (%d)\n", rc); | 2607 | * the port. This routine currently acts on FCF Index 0. |
2384 | goto msi_fail_out; | 2608 | * |
2385 | } else | 2609 | **/ |
2386 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | 2610 | void |
2387 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2611 | lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) |
2388 | "0477 MSI-X entry[%d]: vector=x%x " | 2612 | { |
2389 | "message=%d\n", i, | 2613 | int rc = 0; |
2390 | phba->msix_entries[i].vector, | 2614 | LPFC_MBOXQ_t *mboxq; |
2391 | phba->msix_entries[i].entry); | 2615 | struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; |
2616 | uint32_t mbox_tmo, req_len; | ||
2617 | uint32_t shdr_status, shdr_add_status; | ||
2618 | |||
2619 | mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2620 | if (!mboxq) { | ||
2621 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2622 | "2020 Failed to allocate mbox for ADD_FCF cmd\n"); | ||
2623 | return; | ||
2624 | } | ||
2625 | |||
2626 | req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - | ||
2627 | sizeof(struct lpfc_sli4_cfg_mhdr); | ||
2628 | rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, | ||
2629 | LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, | ||
2630 | req_len, LPFC_SLI4_MBX_EMBED); | ||
2392 | /* | 2631 | /* |
2393 | * Assign MSI-X vectors to interrupt handlers | 2632 | * In phase 1, there is a single FCF index, 0. In phase2, the driver |
2633 | * supports multiple FCF indices. | ||
2394 | */ | 2634 | */ |
2635 | del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; | ||
2636 | bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); | ||
2637 | bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, | ||
2638 | phba->fcf.fcf_indx); | ||
2395 | 2639 | ||
2396 | /* vector-0 is associated to slow-path handler */ | 2640 | if (!phba->sli4_hba.intr_enable) |
2397 | rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, | 2641 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); |
2398 | IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); | 2642 | else { |
2399 | if (rc) { | 2643 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); |
2400 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 2644 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); |
2401 | "0421 MSI-X slow-path request_irq failed " | 2645 | } |
2402 | "(%d)\n", rc); | 2646 | /* The IOCTL status is embedded in the mailbox subheader. */ |
2403 | goto msi_fail_out; | 2647 | shdr_status = bf_get(lpfc_mbox_hdr_status, |
2648 | &del_fcf_record->header.cfg_shdr.response); | ||
2649 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, | ||
2650 | &del_fcf_record->header.cfg_shdr.response); | ||
2651 | if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { | ||
2652 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2653 | "2516 DEL FCF of default FCF Index failed " | ||
2654 | "mbx status x%x, status x%x add_status x%x\n", | ||
2655 | rc, shdr_status, shdr_add_status); | ||
2404 | } | 2656 | } |
2657 | if (rc != MBX_TIMEOUT) | ||
2658 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
2659 | } | ||
2405 | 2660 | ||
2406 | /* vector-1 is associated to fast-path handler */ | 2661 | /** |
2407 | rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, | 2662 | * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code |
2408 | IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); | 2663 | * @phba: pointer to lpfc hba data structure. |
2664 | * @acqe_link: pointer to the async link completion queue entry. | ||
2665 | * | ||
2666 | * This routine is to parse the SLI4 link-attention link fault code and | ||
2667 | * translate it into the base driver's read link attention mailbox command | ||
2668 | * status. | ||
2669 | * | ||
2670 | * Return: Link-attention status in terms of base driver's coding. | ||
2671 | **/ | ||
2672 | static uint16_t | ||
2673 | lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, | ||
2674 | struct lpfc_acqe_link *acqe_link) | ||
2675 | { | ||
2676 | uint16_t latt_fault; | ||
2409 | 2677 | ||
2410 | if (rc) { | 2678 | switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { |
2411 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 2679 | case LPFC_ASYNC_LINK_FAULT_NONE: |
2412 | "0429 MSI-X fast-path request_irq failed " | 2680 | case LPFC_ASYNC_LINK_FAULT_LOCAL: |
2413 | "(%d)\n", rc); | 2681 | case LPFC_ASYNC_LINK_FAULT_REMOTE: |
2414 | goto irq_fail_out; | 2682 | latt_fault = 0; |
2683 | break; | ||
2684 | default: | ||
2685 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2686 | "0398 Invalid link fault code: x%x\n", | ||
2687 | bf_get(lpfc_acqe_link_fault, acqe_link)); | ||
2688 | latt_fault = MBXERR_ERROR; | ||
2689 | break; | ||
2415 | } | 2690 | } |
2691 | return latt_fault; | ||
2692 | } | ||
2416 | 2693 | ||
2417 | /* | 2694 | /** |
2418 | * Configure HBA MSI-X attention conditions to messages | 2695 | * lpfc_sli4_parse_latt_type - Parse sli4 link attention type |
2419 | */ | 2696 | * @phba: pointer to lpfc hba data structure. |
2420 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 2697 | * @acqe_link: pointer to the async link completion queue entry. |
2698 | * | ||
2699 | * This routine is to parse the SLI4 link attention type and translate it | ||
2700 | * into the base driver's link attention type coding. | ||
2701 | * | ||
2702 | * Return: Link attention type in terms of base driver's coding. | ||
2703 | **/ | ||
2704 | static uint8_t | ||
2705 | lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, | ||
2706 | struct lpfc_acqe_link *acqe_link) | ||
2707 | { | ||
2708 | uint8_t att_type; | ||
2421 | 2709 | ||
2422 | if (!pmb) { | 2710 | switch (bf_get(lpfc_acqe_link_status, acqe_link)) { |
2423 | rc = -ENOMEM; | 2711 | case LPFC_ASYNC_LINK_STATUS_DOWN: |
2712 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: | ||
2713 | att_type = AT_LINK_DOWN; | ||
2714 | break; | ||
2715 | case LPFC_ASYNC_LINK_STATUS_UP: | ||
2716 | /* Ignore physical link up events - wait for logical link up */ | ||
2717 | att_type = AT_RESERVED; | ||
2718 | break; | ||
2719 | case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: | ||
2720 | att_type = AT_LINK_UP; | ||
2721 | break; | ||
2722 | default: | ||
2424 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2723 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2425 | "0474 Unable to allocate memory for issuing " | 2724 | "0399 Invalid link attention type: x%x\n", |
2426 | "MBOX_CONFIG_MSI command\n"); | 2725 | bf_get(lpfc_acqe_link_status, acqe_link)); |
2427 | goto mem_fail_out; | 2726 | att_type = AT_RESERVED; |
2727 | break; | ||
2428 | } | 2728 | } |
2429 | rc = lpfc_config_msi(phba, pmb); | 2729 | return att_type; |
2430 | if (rc) | 2730 | } |
2431 | goto mbx_fail_out; | 2731 | |
2432 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | 2732 | /** |
2433 | if (rc != MBX_SUCCESS) { | 2733 | * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed |
2434 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, | 2734 | * @phba: pointer to lpfc hba data structure. |
2435 | "0351 Config MSI mailbox command failed, " | 2735 | * @acqe_link: pointer to the async link completion queue entry. |
2436 | "mbxCmd x%x, mbxStatus x%x\n", | 2736 | * |
2437 | pmb->mb.mbxCommand, pmb->mb.mbxStatus); | 2737 | * This routine is to parse the SLI4 link-attention link speed and translate |
2438 | goto mbx_fail_out; | 2738 | * it into the base driver's link-attention link speed coding. |
2739 | * | ||
2740 | * Return: Link-attention link speed in terms of base driver's coding. | ||
2741 | **/ | ||
2742 | static uint8_t | ||
2743 | lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, | ||
2744 | struct lpfc_acqe_link *acqe_link) | ||
2745 | { | ||
2746 | uint8_t link_speed; | ||
2747 | |||
2748 | switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { | ||
2749 | case LPFC_ASYNC_LINK_SPEED_ZERO: | ||
2750 | link_speed = LA_UNKNW_LINK; | ||
2751 | break; | ||
2752 | case LPFC_ASYNC_LINK_SPEED_10MBPS: | ||
2753 | link_speed = LA_UNKNW_LINK; | ||
2754 | break; | ||
2755 | case LPFC_ASYNC_LINK_SPEED_100MBPS: | ||
2756 | link_speed = LA_UNKNW_LINK; | ||
2757 | break; | ||
2758 | case LPFC_ASYNC_LINK_SPEED_1GBPS: | ||
2759 | link_speed = LA_1GHZ_LINK; | ||
2760 | break; | ||
2761 | case LPFC_ASYNC_LINK_SPEED_10GBPS: | ||
2762 | link_speed = LA_10GHZ_LINK; | ||
2763 | break; | ||
2764 | default: | ||
2765 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
2766 | "0483 Invalid link-attention link speed: x%x\n", | ||
2767 | bf_get(lpfc_acqe_link_speed, acqe_link)); | ||
2768 | link_speed = LA_UNKNW_LINK; | ||
2769 | break; | ||
2439 | } | 2770 | } |
2771 | return link_speed; | ||
2772 | } | ||
2440 | 2773 | ||
2441 | /* Free memory allocated for mailbox command */ | 2774 | /** |
2442 | mempool_free(pmb, phba->mbox_mem_pool); | 2775 | * lpfc_sli4_async_link_evt - Process the asynchronous link event |
2443 | return rc; | 2776 | * @phba: pointer to lpfc hba data structure. |
2777 | * @acqe_link: pointer to the async link completion queue entry. | ||
2778 | * | ||
2779 | * This routine is to handle the SLI4 asynchronous link event. | ||
2780 | **/ | ||
2781 | static void | ||
2782 | lpfc_sli4_async_link_evt(struct lpfc_hba *phba, | ||
2783 | struct lpfc_acqe_link *acqe_link) | ||
2784 | { | ||
2785 | struct lpfc_dmabuf *mp; | ||
2786 | LPFC_MBOXQ_t *pmb; | ||
2787 | MAILBOX_t *mb; | ||
2788 | READ_LA_VAR *la; | ||
2789 | uint8_t att_type; | ||
2444 | 2790 | ||
2445 | mbx_fail_out: | 2791 | att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); |
2446 | /* Free memory allocated for mailbox command */ | 2792 | if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) |
2447 | mempool_free(pmb, phba->mbox_mem_pool); | 2793 | return; |
2794 | pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
2795 | if (!pmb) { | ||
2796 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2797 | "0395 The mboxq allocation failed\n"); | ||
2798 | return; | ||
2799 | } | ||
2800 | mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
2801 | if (!mp) { | ||
2802 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2803 | "0396 The lpfc_dmabuf allocation failed\n"); | ||
2804 | goto out_free_pmb; | ||
2805 | } | ||
2806 | mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); | ||
2807 | if (!mp->virt) { | ||
2808 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2809 | "0397 The mbuf allocation failed\n"); | ||
2810 | goto out_free_dmabuf; | ||
2811 | } | ||
2448 | 2812 | ||
2449 | mem_fail_out: | 2813 | /* Cleanup any outstanding ELS commands */ |
2450 | /* free the irq already requested */ | 2814 | lpfc_els_flush_all_cmd(phba); |
2451 | free_irq(phba->msix_entries[1].vector, phba); | ||
2452 | 2815 | ||
2453 | irq_fail_out: | 2816 | /* Block ELS IOCBs until we have done process link event */ |
2454 | /* free the irq already requested */ | 2817 | phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; |
2455 | free_irq(phba->msix_entries[0].vector, phba); | ||
2456 | 2818 | ||
2457 | msi_fail_out: | 2819 | /* Update link event statistics */ |
2458 | /* Unconfigure MSI-X capability structure */ | 2820 | phba->sli.slistat.link_event++; |
2459 | pci_disable_msix(phba->pcidev); | 2821 | |
2460 | return rc; | 2822 | /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ |
2823 | lpfc_read_la(phba, pmb, mp); | ||
2824 | pmb->vport = phba->pport; | ||
2825 | |||
2826 | /* Parse and translate status field */ | ||
2827 | mb = &pmb->u.mb; | ||
2828 | mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); | ||
2829 | |||
2830 | /* Parse and translate link attention fields */ | ||
2831 | la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; | ||
2832 | la->eventTag = acqe_link->event_tag; | ||
2833 | la->attType = att_type; | ||
2834 | la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); | ||
2835 | |||
2836 | /* Fake the the following irrelvant fields */ | ||
2837 | la->topology = TOPOLOGY_PT_PT; | ||
2838 | la->granted_AL_PA = 0; | ||
2839 | la->il = 0; | ||
2840 | la->pb = 0; | ||
2841 | la->fa = 0; | ||
2842 | la->mm = 0; | ||
2843 | |||
2844 | /* Keep the link status for extra SLI4 state machine reference */ | ||
2845 | phba->sli4_hba.link_state.speed = | ||
2846 | bf_get(lpfc_acqe_link_speed, acqe_link); | ||
2847 | phba->sli4_hba.link_state.duplex = | ||
2848 | bf_get(lpfc_acqe_link_duplex, acqe_link); | ||
2849 | phba->sli4_hba.link_state.status = | ||
2850 | bf_get(lpfc_acqe_link_status, acqe_link); | ||
2851 | phba->sli4_hba.link_state.physical = | ||
2852 | bf_get(lpfc_acqe_link_physical, acqe_link); | ||
2853 | phba->sli4_hba.link_state.fault = | ||
2854 | bf_get(lpfc_acqe_link_fault, acqe_link); | ||
2855 | |||
2856 | /* Invoke the lpfc_handle_latt mailbox command callback function */ | ||
2857 | lpfc_mbx_cmpl_read_la(phba, pmb); | ||
2858 | |||
2859 | return; | ||
2860 | |||
2861 | out_free_dmabuf: | ||
2862 | kfree(mp); | ||
2863 | out_free_pmb: | ||
2864 | mempool_free(pmb, phba->mbox_mem_pool); | ||
2461 | } | 2865 | } |
2462 | 2866 | ||
2463 | /** | 2867 | /** |
2464 | * lpfc_disable_msix - Disable MSI-X interrupt mode | 2868 | * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event |
2465 | * @phba: pointer to lpfc hba data structure. | 2869 | * @phba: pointer to lpfc hba data structure. |
2870 | * @acqe_link: pointer to the async fcoe completion queue entry. | ||
2466 | * | 2871 | * |
2467 | * This routine is invoked to release the MSI-X vectors and then disable the | 2872 | * This routine is to handle the SLI4 asynchronous fcoe event. |
2468 | * MSI-X interrupt mode. | ||
2469 | **/ | 2873 | **/ |
2470 | static void | 2874 | static void |
2471 | lpfc_disable_msix(struct lpfc_hba *phba) | 2875 | lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, |
2876 | struct lpfc_acqe_fcoe *acqe_fcoe) | ||
2472 | { | 2877 | { |
2473 | int i; | 2878 | uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); |
2879 | int rc; | ||
2474 | 2880 | ||
2475 | /* Free up MSI-X multi-message vectors */ | 2881 | switch (event_type) { |
2476 | for (i = 0; i < LPFC_MSIX_VECTORS; i++) | 2882 | case LPFC_FCOE_EVENT_TYPE_NEW_FCF: |
2477 | free_irq(phba->msix_entries[i].vector, phba); | 2883 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, |
2478 | /* Disable MSI-X */ | 2884 | "2546 New FCF found index 0x%x tag 0x%x \n", |
2479 | pci_disable_msix(phba->pcidev); | 2885 | acqe_fcoe->fcf_index, |
2886 | acqe_fcoe->event_tag); | ||
2887 | /* | ||
2888 | * If the current FCF is in discovered state, | ||
2889 | * do nothing. | ||
2890 | */ | ||
2891 | spin_lock_irq(&phba->hbalock); | ||
2892 | if (phba->fcf.fcf_flag & FCF_DISCOVERED) { | ||
2893 | spin_unlock_irq(&phba->hbalock); | ||
2894 | break; | ||
2895 | } | ||
2896 | spin_unlock_irq(&phba->hbalock); | ||
2897 | |||
2898 | /* Read the FCF table and re-discover SAN. */ | ||
2899 | rc = lpfc_sli4_read_fcf_record(phba, | ||
2900 | LPFC_FCOE_FCF_GET_FIRST); | ||
2901 | if (rc) | ||
2902 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
2903 | "2547 Read FCF record failed 0x%x\n", | ||
2904 | rc); | ||
2905 | break; | ||
2906 | |||
2907 | case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: | ||
2908 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2909 | "2548 FCF Table full count 0x%x tag 0x%x \n", | ||
2910 | bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), | ||
2911 | acqe_fcoe->event_tag); | ||
2912 | break; | ||
2913 | |||
2914 | case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: | ||
2915 | lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, | ||
2916 | "2549 FCF disconnected fron network index 0x%x" | ||
2917 | " tag 0x%x \n", acqe_fcoe->fcf_index, | ||
2918 | acqe_fcoe->event_tag); | ||
2919 | /* If the event is not for currently used fcf do nothing */ | ||
2920 | if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) | ||
2921 | break; | ||
2922 | /* | ||
2923 | * Currently, driver support only one FCF - so treat this as | ||
2924 | * a link down. | ||
2925 | */ | ||
2926 | lpfc_linkdown(phba); | ||
2927 | /* Unregister FCF if no devices connected to it */ | ||
2928 | lpfc_unregister_unused_fcf(phba); | ||
2929 | break; | ||
2930 | |||
2931 | default: | ||
2932 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2933 | "0288 Unknown FCoE event type 0x%x event tag " | ||
2934 | "0x%x\n", event_type, acqe_fcoe->event_tag); | ||
2935 | break; | ||
2936 | } | ||
2480 | } | 2937 | } |
2481 | 2938 | ||
2482 | /** | 2939 | /** |
2483 | * lpfc_enable_msi - Enable MSI interrupt mode | 2940 | * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event |
2484 | * @phba: pointer to lpfc hba data structure. | 2941 | * @phba: pointer to lpfc hba data structure. |
2942 | * @acqe_link: pointer to the async dcbx completion queue entry. | ||
2485 | * | 2943 | * |
2486 | * This routine is invoked to enable the MSI interrupt mode. The kernel | 2944 | * This routine is to handle the SLI4 asynchronous dcbx event. |
2487 | * function pci_enable_msi() is called to enable the MSI vector. The | 2945 | **/ |
2488 | * device driver is responsible for calling the request_irq() to register | 2946 | static void |
2489 | * MSI vector with a interrupt the handler, which is done in this function. | 2947 | lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, |
2490 | * | 2948 | struct lpfc_acqe_dcbx *acqe_dcbx) |
2491 | * Return codes | ||
2492 | * 0 - sucessful | ||
2493 | * other values - error | ||
2494 | */ | ||
2495 | static int | ||
2496 | lpfc_enable_msi(struct lpfc_hba *phba) | ||
2497 | { | 2949 | { |
2498 | int rc; | 2950 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, |
2951 | "0290 The SLI4 DCBX asynchronous event is not " | ||
2952 | "handled yet\n"); | ||
2953 | } | ||
2499 | 2954 | ||
2500 | rc = pci_enable_msi(phba->pcidev); | 2955 | /** |
2501 | if (!rc) | 2956 | * lpfc_sli4_async_event_proc - Process all the pending asynchronous event |
2502 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2957 | * @phba: pointer to lpfc hba data structure. |
2503 | "0462 PCI enable MSI mode success.\n"); | 2958 | * |
2504 | else { | 2959 | * This routine is invoked by the worker thread to process all the pending |
2505 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | 2960 | * SLI4 asynchronous events. |
2506 | "0471 PCI enable MSI mode failed (%d)\n", rc); | 2961 | **/ |
2507 | return rc; | 2962 | void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) |
2508 | } | 2963 | { |
2964 | struct lpfc_cq_event *cq_event; | ||
2509 | 2965 | ||
2510 | rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, | 2966 | /* First, declare the async event has been handled */ |
2511 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | 2967 | spin_lock_irq(&phba->hbalock); |
2512 | if (rc) { | 2968 | phba->hba_flag &= ~ASYNC_EVENT; |
2513 | pci_disable_msi(phba->pcidev); | 2969 | spin_unlock_irq(&phba->hbalock); |
2514 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | 2970 | /* Now, handle all the async events */ |
2515 | "0478 MSI request_irq failed (%d)\n", rc); | 2971 | while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { |
2972 | /* Get the first event from the head of the event queue */ | ||
2973 | spin_lock_irq(&phba->hbalock); | ||
2974 | list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, | ||
2975 | cq_event, struct lpfc_cq_event, list); | ||
2976 | spin_unlock_irq(&phba->hbalock); | ||
2977 | /* Process the asynchronous event */ | ||
2978 | switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { | ||
2979 | case LPFC_TRAILER_CODE_LINK: | ||
2980 | lpfc_sli4_async_link_evt(phba, | ||
2981 | &cq_event->cqe.acqe_link); | ||
2982 | break; | ||
2983 | case LPFC_TRAILER_CODE_FCOE: | ||
2984 | lpfc_sli4_async_fcoe_evt(phba, | ||
2985 | &cq_event->cqe.acqe_fcoe); | ||
2986 | break; | ||
2987 | case LPFC_TRAILER_CODE_DCBX: | ||
2988 | lpfc_sli4_async_dcbx_evt(phba, | ||
2989 | &cq_event->cqe.acqe_dcbx); | ||
2990 | break; | ||
2991 | default: | ||
2992 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
2993 | "1804 Invalid asynchrous event code: " | ||
2994 | "x%x\n", bf_get(lpfc_trailer_code, | ||
2995 | &cq_event->cqe.mcqe_cmpl)); | ||
2996 | break; | ||
2997 | } | ||
2998 | /* Free the completion event processed to the free pool */ | ||
2999 | lpfc_sli4_cq_event_release(phba, cq_event); | ||
2516 | } | 3000 | } |
2517 | return rc; | ||
2518 | } | 3001 | } |
2519 | 3002 | ||
2520 | /** | 3003 | /** |
2521 | * lpfc_disable_msi - Disable MSI interrupt mode | 3004 | * lpfc_api_table_setup - Set up per hba pci-device group func api jump table |
2522 | * @phba: pointer to lpfc hba data structure. | 3005 | * @phba: pointer to lpfc hba data structure. |
3006 | * @dev_grp: The HBA PCI-Device group number. | ||
2523 | * | 3007 | * |
2524 | * This routine is invoked to disable the MSI interrupt mode. The driver | 3008 | * This routine is invoked to set up the per HBA PCI-Device group function |
2525 | * calls free_irq() on MSI vector it has done request_irq() on before | 3009 | * API jump table entries. |
2526 | * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and | 3010 | * |
2527 | * a device will be left with MSI enabled and leaks its vector. | 3011 | * Return: 0 if success, otherwise -ENODEV |
2528 | */ | 3012 | **/ |
2529 | 3013 | int | |
2530 | static void | 3014 | lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) |
2531 | lpfc_disable_msi(struct lpfc_hba *phba) | ||
2532 | { | 3015 | { |
2533 | free_irq(phba->pcidev->irq, phba); | 3016 | int rc; |
2534 | pci_disable_msi(phba->pcidev); | 3017 | |
2535 | return; | 3018 | /* Set up lpfc PCI-device group */ |
3019 | phba->pci_dev_grp = dev_grp; | ||
3020 | |||
3021 | /* The LPFC_PCI_DEV_OC uses SLI4 */ | ||
3022 | if (dev_grp == LPFC_PCI_DEV_OC) | ||
3023 | phba->sli_rev = LPFC_SLI_REV4; | ||
3024 | |||
3025 | /* Set up device INIT API function jump table */ | ||
3026 | rc = lpfc_init_api_table_setup(phba, dev_grp); | ||
3027 | if (rc) | ||
3028 | return -ENODEV; | ||
3029 | /* Set up SCSI API function jump table */ | ||
3030 | rc = lpfc_scsi_api_table_setup(phba, dev_grp); | ||
3031 | if (rc) | ||
3032 | return -ENODEV; | ||
3033 | /* Set up SLI API function jump table */ | ||
3034 | rc = lpfc_sli_api_table_setup(phba, dev_grp); | ||
3035 | if (rc) | ||
3036 | return -ENODEV; | ||
3037 | /* Set up MBOX API function jump table */ | ||
3038 | rc = lpfc_mbox_api_table_setup(phba, dev_grp); | ||
3039 | if (rc) | ||
3040 | return -ENODEV; | ||
3041 | |||
3042 | return 0; | ||
2536 | } | 3043 | } |
2537 | 3044 | ||
2538 | /** | 3045 | /** |
@@ -2764,6 +3271,292 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) | |||
2764 | } | 3271 | } |
2765 | 3272 | ||
2766 | /** | 3273 | /** |
3274 | * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev | ||
3275 | * @phba: pointer to lpfc hba data structure. | ||
3276 | * | ||
3277 | * This routine is invoked to set up the driver internal resources specific to | ||
3278 | * support the SLI-4 HBA device it attached to. | ||
3279 | * | ||
3280 | * Return codes | ||
3281 | * 0 - sucessful | ||
3282 | * other values - error | ||
3283 | **/ | ||
3284 | static int | ||
3285 | lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | ||
3286 | { | ||
3287 | struct lpfc_sli *psli; | ||
3288 | int rc; | ||
3289 | int i, hbq_count; | ||
3290 | |||
3291 | /* Before proceed, wait for POST done and device ready */ | ||
3292 | rc = lpfc_sli4_post_status_check(phba); | ||
3293 | if (rc) | ||
3294 | return -ENODEV; | ||
3295 | |||
3296 | /* | ||
3297 | * Initialize timers used by driver | ||
3298 | */ | ||
3299 | |||
3300 | /* Heartbeat timer */ | ||
3301 | init_timer(&phba->hb_tmofunc); | ||
3302 | phba->hb_tmofunc.function = lpfc_hb_timeout; | ||
3303 | phba->hb_tmofunc.data = (unsigned long)phba; | ||
3304 | |||
3305 | psli = &phba->sli; | ||
3306 | /* MBOX heartbeat timer */ | ||
3307 | init_timer(&psli->mbox_tmo); | ||
3308 | psli->mbox_tmo.function = lpfc_mbox_timeout; | ||
3309 | psli->mbox_tmo.data = (unsigned long) phba; | ||
3310 | /* Fabric block timer */ | ||
3311 | init_timer(&phba->fabric_block_timer); | ||
3312 | phba->fabric_block_timer.function = lpfc_fabric_block_timeout; | ||
3313 | phba->fabric_block_timer.data = (unsigned long) phba; | ||
3314 | /* EA polling mode timer */ | ||
3315 | init_timer(&phba->eratt_poll); | ||
3316 | phba->eratt_poll.function = lpfc_poll_eratt; | ||
3317 | phba->eratt_poll.data = (unsigned long) phba; | ||
3318 | /* | ||
3319 | * We need to do a READ_CONFIG mailbox command here before | ||
3320 | * calling lpfc_get_cfgparam. For VFs this will report the | ||
3321 | * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. | ||
3322 | * All of the resources allocated | ||
3323 | * for this Port are tied to these values. | ||
3324 | */ | ||
3325 | /* Get all the module params for configuring this host */ | ||
3326 | lpfc_get_cfgparam(phba); | ||
3327 | phba->max_vpi = LPFC_MAX_VPI; | ||
3328 | /* This will be set to correct value after the read_config mbox */ | ||
3329 | phba->max_vports = 0; | ||
3330 | |||
3331 | /* Program the default value of vlan_id and fc_map */ | ||
3332 | phba->valid_vlan = 0; | ||
3333 | phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; | ||
3334 | phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; | ||
3335 | phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; | ||
3336 | |||
3337 | /* | ||
3338 | * Since the sg_tablesize is module parameter, the sg_dma_buf_size | ||
3339 | * used to create the sg_dma_buf_pool must be dynamically calculated. | ||
3340 | * 2 segments are added since the IOCB needs a command and response bde. | ||
3341 | * To insure that the scsi sgl does not cross a 4k page boundary only | ||
3342 | * sgl sizes of 1k, 2k, 4k, and 8k are supported. | ||
3343 | * Table of sgl sizes and seg_cnt: | ||
3344 | * sgl size, sg_seg_cnt total seg | ||
3345 | * 1k 50 52 | ||
3346 | * 2k 114 116 | ||
3347 | * 4k 242 244 | ||
3348 | * 8k 498 500 | ||
3349 | * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 | ||
3350 | * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 | ||
3351 | * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 | ||
3352 | * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 | ||
3353 | */ | ||
3354 | if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) | ||
3355 | phba->cfg_sg_seg_cnt = 50; | ||
3356 | else if (phba->cfg_sg_seg_cnt <= 114) | ||
3357 | phba->cfg_sg_seg_cnt = 114; | ||
3358 | else if (phba->cfg_sg_seg_cnt <= 242) | ||
3359 | phba->cfg_sg_seg_cnt = 242; | ||
3360 | else | ||
3361 | phba->cfg_sg_seg_cnt = 498; | ||
3362 | |||
3363 | phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) | ||
3364 | + sizeof(struct fcp_rsp); | ||
3365 | phba->cfg_sg_dma_buf_size += | ||
3366 | ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); | ||
3367 | |||
3368 | /* Initialize buffer queue management fields */ | ||
3369 | hbq_count = lpfc_sli_hbq_count(); | ||
3370 | for (i = 0; i < hbq_count; ++i) | ||
3371 | INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); | ||
3372 | INIT_LIST_HEAD(&phba->rb_pend_list); | ||
3373 | phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; | ||
3374 | phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; | ||
3375 | |||
3376 | /* | ||
3377 | * Initialize the SLI Layer to run with lpfc SLI4 HBAs. | ||
3378 | */ | ||
3379 | /* Initialize the Abort scsi buffer list used by driver */ | ||
3380 | spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); | ||
3381 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); | ||
3382 | /* This abort list used by worker thread */ | ||
3383 | spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); | ||
3384 | |||
3385 | /* | ||
3386 | * Initialize dirver internal slow-path work queues | ||
3387 | */ | ||
3388 | |||
3389 | /* Driver internel slow-path CQ Event pool */ | ||
3390 | INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); | ||
3391 | /* Response IOCB work queue list */ | ||
3392 | INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); | ||
3393 | /* Asynchronous event CQ Event work queue list */ | ||
3394 | INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); | ||
3395 | /* Fast-path XRI aborted CQ Event work queue list */ | ||
3396 | INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); | ||
3397 | /* Slow-path XRI aborted CQ Event work queue list */ | ||
3398 | INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); | ||
3399 | /* Receive queue CQ Event work queue list */ | ||
3400 | INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); | ||
3401 | |||
3402 | /* Initialize the driver internal SLI layer lists. */ | ||
3403 | lpfc_sli_setup(phba); | ||
3404 | lpfc_sli_queue_setup(phba); | ||
3405 | |||
3406 | /* Allocate device driver memory */ | ||
3407 | rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); | ||
3408 | if (rc) | ||
3409 | return -ENOMEM; | ||
3410 | |||
3411 | /* Create the bootstrap mailbox command */ | ||
3412 | rc = lpfc_create_bootstrap_mbox(phba); | ||
3413 | if (unlikely(rc)) | ||
3414 | goto out_free_mem; | ||
3415 | |||
3416 | /* Set up the host's endian order with the device. */ | ||
3417 | rc = lpfc_setup_endian_order(phba); | ||
3418 | if (unlikely(rc)) | ||
3419 | goto out_free_bsmbx; | ||
3420 | |||
3421 | /* Set up the hba's configuration parameters. */ | ||
3422 | rc = lpfc_sli4_read_config(phba); | ||
3423 | if (unlikely(rc)) | ||
3424 | goto out_free_bsmbx; | ||
3425 | |||
3426 | /* Perform a function reset */ | ||
3427 | rc = lpfc_pci_function_reset(phba); | ||
3428 | if (unlikely(rc)) | ||
3429 | goto out_free_bsmbx; | ||
3430 | |||
3431 | /* Create all the SLI4 queues */ | ||
3432 | rc = lpfc_sli4_queue_create(phba); | ||
3433 | if (rc) | ||
3434 | goto out_free_bsmbx; | ||
3435 | |||
3436 | /* Create driver internal CQE event pool */ | ||
3437 | rc = lpfc_sli4_cq_event_pool_create(phba); | ||
3438 | if (rc) | ||
3439 | goto out_destroy_queue; | ||
3440 | |||
3441 | /* Initialize and populate the iocb list per host */ | ||
3442 | rc = lpfc_init_sgl_list(phba); | ||
3443 | if (rc) { | ||
3444 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3445 | "1400 Failed to initialize sgl list.\n"); | ||
3446 | goto out_destroy_cq_event_pool; | ||
3447 | } | ||
3448 | rc = lpfc_init_active_sgl_array(phba); | ||
3449 | if (rc) { | ||
3450 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3451 | "1430 Failed to initialize sgl list.\n"); | ||
3452 | goto out_free_sgl_list; | ||
3453 | } | ||
3454 | |||
3455 | rc = lpfc_sli4_init_rpi_hdrs(phba); | ||
3456 | if (rc) { | ||
3457 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3458 | "1432 Failed to initialize rpi headers.\n"); | ||
3459 | goto out_free_active_sgl; | ||
3460 | } | ||
3461 | |||
3462 | phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * | ||
3463 | phba->cfg_fcp_eq_count), GFP_KERNEL); | ||
3464 | if (!phba->sli4_hba.fcp_eq_hdl) { | ||
3465 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3466 | "2572 Failed allocate memory for fast-path " | ||
3467 | "per-EQ handle array\n"); | ||
3468 | goto out_remove_rpi_hdrs; | ||
3469 | } | ||
3470 | |||
3471 | phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * | ||
3472 | phba->sli4_hba.cfg_eqn), GFP_KERNEL); | ||
3473 | if (!phba->sli4_hba.msix_entries) { | ||
3474 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
3475 | "2573 Failed allocate memory for msi-x " | ||
3476 | "interrupt vector entries\n"); | ||
3477 | goto out_free_fcp_eq_hdl; | ||
3478 | } | ||
3479 | |||
3480 | return rc; | ||
3481 | |||
3482 | out_free_fcp_eq_hdl: | ||
3483 | kfree(phba->sli4_hba.fcp_eq_hdl); | ||
3484 | out_remove_rpi_hdrs: | ||
3485 | lpfc_sli4_remove_rpi_hdrs(phba); | ||
3486 | out_free_active_sgl: | ||
3487 | lpfc_free_active_sgl(phba); | ||
3488 | out_free_sgl_list: | ||
3489 | lpfc_free_sgl_list(phba); | ||
3490 | out_destroy_cq_event_pool: | ||
3491 | lpfc_sli4_cq_event_pool_destroy(phba); | ||
3492 | out_destroy_queue: | ||
3493 | lpfc_sli4_queue_destroy(phba); | ||
3494 | out_free_bsmbx: | ||
3495 | lpfc_destroy_bootstrap_mbox(phba); | ||
3496 | out_free_mem: | ||
3497 | lpfc_mem_free(phba); | ||
3498 | return rc; | ||
3499 | } | ||
3500 | |||
3501 | /** | ||
3502 | * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev | ||
3503 | * @phba: pointer to lpfc hba data structure. | ||
3504 | * | ||
3505 | * This routine is invoked to unset the driver internal resources set up | ||
3506 | * specific for supporting the SLI-4 HBA device it attached to. | ||
3507 | **/ | ||
3508 | static void | ||
3509 | lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) | ||
3510 | { | ||
3511 | struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; | ||
3512 | |||
3513 | /* unregister default FCFI from the HBA */ | ||
3514 | lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); | ||
3515 | |||
3516 | /* Free the default FCR table */ | ||
3517 | lpfc_sli_remove_dflt_fcf(phba); | ||
3518 | |||
3519 | /* Free memory allocated for msi-x interrupt vector entries */ | ||
3520 | kfree(phba->sli4_hba.msix_entries); | ||
3521 | |||
3522 | /* Free memory allocated for fast-path work queue handles */ | ||
3523 | kfree(phba->sli4_hba.fcp_eq_hdl); | ||
3524 | |||
3525 | /* Free the allocated rpi headers. */ | ||
3526 | lpfc_sli4_remove_rpi_hdrs(phba); | ||
3527 | |||
3528 | /* Free the ELS sgl list */ | ||
3529 | lpfc_free_active_sgl(phba); | ||
3530 | lpfc_free_sgl_list(phba); | ||
3531 | |||
3532 | /* Free the SCSI sgl management array */ | ||
3533 | kfree(phba->sli4_hba.lpfc_scsi_psb_array); | ||
3534 | |||
3535 | /* Free the SLI4 queues */ | ||
3536 | lpfc_sli4_queue_destroy(phba); | ||
3537 | |||
3538 | /* Free the completion queue EQ event pool */ | ||
3539 | lpfc_sli4_cq_event_release_all(phba); | ||
3540 | lpfc_sli4_cq_event_pool_destroy(phba); | ||
3541 | |||
3542 | /* Reset SLI4 HBA FCoE function */ | ||
3543 | lpfc_pci_function_reset(phba); | ||
3544 | |||
3545 | /* Free the bsmbx region. */ | ||
3546 | lpfc_destroy_bootstrap_mbox(phba); | ||
3547 | |||
3548 | /* Free the SLI Layer memory with SLI4 HBAs */ | ||
3549 | lpfc_mem_free_all(phba); | ||
3550 | |||
3551 | /* Free the current connect table */ | ||
3552 | list_for_each_entry_safe(conn_entry, next_conn_entry, | ||
3553 | &phba->fcf_conn_rec_list, list) | ||
3554 | kfree(conn_entry); | ||
3555 | |||
3556 | return; | ||
3557 | } | ||
3558 | |||
3559 | /** | ||
2767 | * lpfc_init_api_table_setup - Set up init api fucntion jump table | 3560 | * lpfc_init_api_table_setup - Set up init api fucntion jump table |
2768 | * @phba: The hba struct for which this call is being executed. | 3561 | * @phba: The hba struct for which this call is being executed. |
2769 | * @dev_grp: The HBA PCI-Device group number. | 3562 | * @dev_grp: The HBA PCI-Device group number. |
@@ -2782,6 +3575,11 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
2782 | phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; | 3575 | phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; |
2783 | phba->lpfc_stop_port = lpfc_stop_port_s3; | 3576 | phba->lpfc_stop_port = lpfc_stop_port_s3; |
2784 | break; | 3577 | break; |
3578 | case LPFC_PCI_DEV_OC: | ||
3579 | phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; | ||
3580 | phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; | ||
3581 | phba->lpfc_stop_port = lpfc_stop_port_s4; | ||
3582 | break; | ||
2785 | default: | 3583 | default: |
2786 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 3584 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2787 | "1431 Invalid HBA PCI-device group: 0x%x\n", | 3585 | "1431 Invalid HBA PCI-device group: 0x%x\n", |
@@ -2956,6 +3754,346 @@ out_free_iocbq: | |||
2956 | } | 3754 | } |
2957 | 3755 | ||
2958 | /** | 3756 | /** |
3757 | * lpfc_free_sgl_list - Free sgl list. | ||
3758 | * @phba: pointer to lpfc hba data structure. | ||
3759 | * | ||
3760 | * This routine is invoked to free the driver's sgl list and memory. | ||
3761 | **/ | ||
3762 | static void | ||
3763 | lpfc_free_sgl_list(struct lpfc_hba *phba) | ||
3764 | { | ||
3765 | struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; | ||
3766 | LIST_HEAD(sglq_list); | ||
3767 | int rc = 0; | ||
3768 | |||
3769 | spin_lock_irq(&phba->hbalock); | ||
3770 | list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); | ||
3771 | spin_unlock_irq(&phba->hbalock); | ||
3772 | |||
3773 | list_for_each_entry_safe(sglq_entry, sglq_next, | ||
3774 | &sglq_list, list) { | ||
3775 | list_del(&sglq_entry->list); | ||
3776 | lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); | ||
3777 | kfree(sglq_entry); | ||
3778 | phba->sli4_hba.total_sglq_bufs--; | ||
3779 | } | ||
3780 | rc = lpfc_sli4_remove_all_sgl_pages(phba); | ||
3781 | if (rc) { | ||
3782 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3783 | "2005 Unable to deregister pages from HBA: %x", rc); | ||
3784 | } | ||
3785 | kfree(phba->sli4_hba.lpfc_els_sgl_array); | ||
3786 | } | ||
3787 | |||
3788 | /** | ||
3789 | * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. | ||
3790 | * @phba: pointer to lpfc hba data structure. | ||
3791 | * | ||
3792 | * This routine is invoked to allocate the driver's active sgl memory. | ||
3793 | * This array will hold the sglq_entry's for active IOs. | ||
3794 | **/ | ||
3795 | static int | ||
3796 | lpfc_init_active_sgl_array(struct lpfc_hba *phba) | ||
3797 | { | ||
3798 | int size; | ||
3799 | size = sizeof(struct lpfc_sglq *); | ||
3800 | size *= phba->sli4_hba.max_cfg_param.max_xri; | ||
3801 | |||
3802 | phba->sli4_hba.lpfc_sglq_active_list = | ||
3803 | kzalloc(size, GFP_KERNEL); | ||
3804 | if (!phba->sli4_hba.lpfc_sglq_active_list) | ||
3805 | return -ENOMEM; | ||
3806 | return 0; | ||
3807 | } | ||
3808 | |||
3809 | /** | ||
3810 | * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. | ||
3811 | * @phba: pointer to lpfc hba data structure. | ||
3812 | * | ||
3813 | * This routine is invoked to walk through the array of active sglq entries | ||
3814 | * and free all of the resources. | ||
3815 | * This is just a place holder for now. | ||
3816 | **/ | ||
3817 | static void | ||
3818 | lpfc_free_active_sgl(struct lpfc_hba *phba) | ||
3819 | { | ||
3820 | kfree(phba->sli4_hba.lpfc_sglq_active_list); | ||
3821 | } | ||
3822 | |||
3823 | /** | ||
3824 | * lpfc_init_sgl_list - Allocate and initialize sgl list. | ||
3825 | * @phba: pointer to lpfc hba data structure. | ||
3826 | * | ||
3827 | * This routine is invoked to allocate and initizlize the driver's sgl | ||
3828 | * list and set up the sgl xritag tag array accordingly. | ||
3829 | * | ||
3830 | * Return codes | ||
3831 | * 0 - sucessful | ||
3832 | * other values - error | ||
3833 | **/ | ||
3834 | static int | ||
3835 | lpfc_init_sgl_list(struct lpfc_hba *phba) | ||
3836 | { | ||
3837 | struct lpfc_sglq *sglq_entry = NULL; | ||
3838 | int i; | ||
3839 | int els_xri_cnt; | ||
3840 | |||
3841 | els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); | ||
3842 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3843 | "2400 lpfc_init_sgl_list els %d.\n", | ||
3844 | els_xri_cnt); | ||
3845 | /* Initialize and populate the sglq list per host/VF. */ | ||
3846 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); | ||
3847 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); | ||
3848 | |||
3849 | /* Sanity check on XRI management */ | ||
3850 | if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { | ||
3851 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3852 | "2562 No room left for SCSI XRI allocation: " | ||
3853 | "max_xri=%d, els_xri=%d\n", | ||
3854 | phba->sli4_hba.max_cfg_param.max_xri, | ||
3855 | els_xri_cnt); | ||
3856 | return -ENOMEM; | ||
3857 | } | ||
3858 | |||
3859 | /* Allocate memory for the ELS XRI management array */ | ||
3860 | phba->sli4_hba.lpfc_els_sgl_array = | ||
3861 | kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), | ||
3862 | GFP_KERNEL); | ||
3863 | |||
3864 | if (!phba->sli4_hba.lpfc_els_sgl_array) { | ||
3865 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3866 | "2401 Failed to allocate memory for ELS " | ||
3867 | "XRI management array of size %d.\n", | ||
3868 | els_xri_cnt); | ||
3869 | return -ENOMEM; | ||
3870 | } | ||
3871 | |||
3872 | /* Keep the SCSI XRI into the XRI management array */ | ||
3873 | phba->sli4_hba.scsi_xri_max = | ||
3874 | phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; | ||
3875 | phba->sli4_hba.scsi_xri_cnt = 0; | ||
3876 | |||
3877 | phba->sli4_hba.lpfc_scsi_psb_array = | ||
3878 | kzalloc((sizeof(struct lpfc_scsi_buf *) * | ||
3879 | phba->sli4_hba.scsi_xri_max), GFP_KERNEL); | ||
3880 | |||
3881 | if (!phba->sli4_hba.lpfc_scsi_psb_array) { | ||
3882 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
3883 | "2563 Failed to allocate memory for SCSI " | ||
3884 | "XRI management array of size %d.\n", | ||
3885 | phba->sli4_hba.scsi_xri_max); | ||
3886 | kfree(phba->sli4_hba.lpfc_els_sgl_array); | ||
3887 | return -ENOMEM; | ||
3888 | } | ||
3889 | |||
3890 | for (i = 0; i < els_xri_cnt; i++) { | ||
3891 | sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); | ||
3892 | if (sglq_entry == NULL) { | ||
3893 | printk(KERN_ERR "%s: only allocated %d sgls of " | ||
3894 | "expected %d count. Unloading driver.\n", | ||
3895 | __func__, i, els_xri_cnt); | ||
3896 | goto out_free_mem; | ||
3897 | } | ||
3898 | |||
3899 | sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); | ||
3900 | if (sglq_entry->sli4_xritag == NO_XRI) { | ||
3901 | kfree(sglq_entry); | ||
3902 | printk(KERN_ERR "%s: failed to allocate XRI.\n" | ||
3903 | "Unloading driver.\n", __func__); | ||
3904 | goto out_free_mem; | ||
3905 | } | ||
3906 | sglq_entry->buff_type = GEN_BUFF_TYPE; | ||
3907 | sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); | ||
3908 | if (sglq_entry->virt == NULL) { | ||
3909 | kfree(sglq_entry); | ||
3910 | printk(KERN_ERR "%s: failed to allocate mbuf.\n" | ||
3911 | "Unloading driver.\n", __func__); | ||
3912 | goto out_free_mem; | ||
3913 | } | ||
3914 | sglq_entry->sgl = sglq_entry->virt; | ||
3915 | memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); | ||
3916 | |||
3917 | /* The list order is used by later block SGL registraton */ | ||
3918 | spin_lock_irq(&phba->hbalock); | ||
3919 | list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); | ||
3920 | phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; | ||
3921 | phba->sli4_hba.total_sglq_bufs++; | ||
3922 | spin_unlock_irq(&phba->hbalock); | ||
3923 | } | ||
3924 | return 0; | ||
3925 | |||
3926 | out_free_mem: | ||
3927 | kfree(phba->sli4_hba.lpfc_scsi_psb_array); | ||
3928 | lpfc_free_sgl_list(phba); | ||
3929 | return -ENOMEM; | ||
3930 | } | ||
3931 | |||
3932 | /** | ||
3933 | * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port | ||
3934 | * @phba: pointer to lpfc hba data structure. | ||
3935 | * | ||
3936 | * This routine is invoked to post rpi header templates to the | ||
3937 | * HBA consistent with the SLI-4 interface spec. This routine | ||
3938 | * posts a PAGE_SIZE memory region to the port to hold up to | ||
3939 | * PAGE_SIZE modulo 64 rpi context headers. | ||
3940 | * No locks are held here because this is an initialization routine | ||
3941 | * called only from probe or lpfc_online when interrupts are not | ||
3942 | * enabled and the driver is reinitializing the device. | ||
3943 | * | ||
3944 | * Return codes | ||
3945 | * 0 - sucessful | ||
3946 | * ENOMEM - No availble memory | ||
3947 | * EIO - The mailbox failed to complete successfully. | ||
3948 | **/ | ||
3949 | int | ||
3950 | lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) | ||
3951 | { | ||
3952 | int rc = 0; | ||
3953 | int longs; | ||
3954 | uint16_t rpi_count; | ||
3955 | struct lpfc_rpi_hdr *rpi_hdr; | ||
3956 | |||
3957 | INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); | ||
3958 | |||
3959 | /* | ||
3960 | * Provision an rpi bitmask range for discovery. The total count | ||
3961 | * is the difference between max and base + 1. | ||
3962 | */ | ||
3963 | rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + | ||
3964 | phba->sli4_hba.max_cfg_param.max_rpi - 1; | ||
3965 | |||
3966 | longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; | ||
3967 | phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), | ||
3968 | GFP_KERNEL); | ||
3969 | if (!phba->sli4_hba.rpi_bmask) | ||
3970 | return -ENOMEM; | ||
3971 | |||
3972 | rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); | ||
3973 | if (!rpi_hdr) { | ||
3974 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3975 | "0391 Error during rpi post operation\n"); | ||
3976 | lpfc_sli4_remove_rpis(phba); | ||
3977 | rc = -ENODEV; | ||
3978 | } | ||
3979 | |||
3980 | return rc; | ||
3981 | } | ||
3982 | |||
3983 | /** | ||
3984 | * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region | ||
3985 | * @phba: pointer to lpfc hba data structure. | ||
3986 | * | ||
3987 | * This routine is invoked to allocate a single 4KB memory region to | ||
3988 | * support rpis and stores them in the phba. This single region | ||
3989 | * provides support for up to 64 rpis. The region is used globally | ||
3990 | * by the device. | ||
3991 | * | ||
3992 | * Returns: | ||
3993 | * A valid rpi hdr on success. | ||
3994 | * A NULL pointer on any failure. | ||
3995 | **/ | ||
3996 | struct lpfc_rpi_hdr * | ||
3997 | lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) | ||
3998 | { | ||
3999 | uint16_t rpi_limit, curr_rpi_range; | ||
4000 | struct lpfc_dmabuf *dmabuf; | ||
4001 | struct lpfc_rpi_hdr *rpi_hdr; | ||
4002 | |||
4003 | rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + | ||
4004 | phba->sli4_hba.max_cfg_param.max_rpi - 1; | ||
4005 | |||
4006 | spin_lock_irq(&phba->hbalock); | ||
4007 | curr_rpi_range = phba->sli4_hba.next_rpi; | ||
4008 | spin_unlock_irq(&phba->hbalock); | ||
4009 | |||
4010 | /* | ||
4011 | * The port has a limited number of rpis. The increment here | ||
4012 | * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value | ||
4013 | * and to allow the full max_rpi range per port. | ||
4014 | */ | ||
4015 | if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) | ||
4016 | return NULL; | ||
4017 | |||
4018 | /* | ||
4019 | * First allocate the protocol header region for the port. The | ||
4020 | * port expects a 4KB DMA-mapped memory region that is 4K aligned. | ||
4021 | */ | ||
4022 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
4023 | if (!dmabuf) | ||
4024 | return NULL; | ||
4025 | |||
4026 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | ||
4027 | LPFC_HDR_TEMPLATE_SIZE, | ||
4028 | &dmabuf->phys, | ||
4029 | GFP_KERNEL); | ||
4030 | if (!dmabuf->virt) { | ||
4031 | rpi_hdr = NULL; | ||
4032 | goto err_free_dmabuf; | ||
4033 | } | ||
4034 | |||
4035 | memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); | ||
4036 | if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { | ||
4037 | rpi_hdr = NULL; | ||
4038 | goto err_free_coherent; | ||
4039 | } | ||
4040 | |||
4041 | /* Save the rpi header data for cleanup later. */ | ||
4042 | rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); | ||
4043 | if (!rpi_hdr) | ||
4044 | goto err_free_coherent; | ||
4045 | |||
4046 | rpi_hdr->dmabuf = dmabuf; | ||
4047 | rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; | ||
4048 | rpi_hdr->page_count = 1; | ||
4049 | spin_lock_irq(&phba->hbalock); | ||
4050 | rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; | ||
4051 | list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); | ||
4052 | |||
4053 | /* | ||
4054 | * The next_rpi stores the next module-64 rpi value to post | ||
4055 | * in any subsequent rpi memory region postings. | ||
4056 | */ | ||
4057 | phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; | ||
4058 | spin_unlock_irq(&phba->hbalock); | ||
4059 | return rpi_hdr; | ||
4060 | |||
4061 | err_free_coherent: | ||
4062 | dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, | ||
4063 | dmabuf->virt, dmabuf->phys); | ||
4064 | err_free_dmabuf: | ||
4065 | kfree(dmabuf); | ||
4066 | return NULL; | ||
4067 | } | ||
4068 | |||
4069 | /** | ||
4070 | * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions | ||
4071 | * @phba: pointer to lpfc hba data structure. | ||
4072 | * | ||
4073 | * This routine is invoked to remove all memory resources allocated | ||
4074 | * to support rpis. This routine presumes the caller has released all | ||
4075 | * rpis consumed by fabric or port logins and is prepared to have | ||
4076 | * the header pages removed. | ||
4077 | **/ | ||
4078 | void | ||
4079 | lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) | ||
4080 | { | ||
4081 | struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; | ||
4082 | |||
4083 | list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, | ||
4084 | &phba->sli4_hba.lpfc_rpi_hdr_list, list) { | ||
4085 | list_del(&rpi_hdr->list); | ||
4086 | dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, | ||
4087 | rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); | ||
4088 | kfree(rpi_hdr->dmabuf); | ||
4089 | kfree(rpi_hdr); | ||
4090 | } | ||
4091 | |||
4092 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; | ||
4093 | memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); | ||
4094 | } | ||
4095 | |||
4096 | /** | ||
2959 | * lpfc_hba_alloc - Allocate driver hba data structure for a device. | 4097 | * lpfc_hba_alloc - Allocate driver hba data structure for a device. |
2960 | * @pdev: pointer to pci device data structure. | 4098 | * @pdev: pointer to pci device data structure. |
2961 | * | 4099 | * |
@@ -3316,6 +4454,1542 @@ lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) | |||
3316 | } | 4454 | } |
3317 | 4455 | ||
3318 | /** | 4456 | /** |
4457 | * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status | ||
4458 | * @phba: pointer to lpfc hba data structure. | ||
4459 | * | ||
4460 | * This routine is invoked to wait for SLI4 device Power On Self Test (POST) | ||
4461 | * done and check status. | ||
4462 | * | ||
4463 | * Return 0 if successful, otherwise -ENODEV. | ||
4464 | **/ | ||
4465 | int | ||
4466 | lpfc_sli4_post_status_check(struct lpfc_hba *phba) | ||
4467 | { | ||
4468 | struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; | ||
4469 | uint32_t onlnreg0, onlnreg1; | ||
4470 | int i, port_error = -ENODEV; | ||
4471 | |||
4472 | if (!phba->sli4_hba.STAregaddr) | ||
4473 | return -ENODEV; | ||
4474 | |||
4475 | /* With uncoverable error, log the error message and return error */ | ||
4476 | onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); | ||
4477 | onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); | ||
4478 | if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { | ||
4479 | uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); | ||
4480 | uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); | ||
4481 | if (uerrlo_reg.word0 || uerrhi_reg.word0) { | ||
4482 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4483 | "1422 HBA Unrecoverable error: " | ||
4484 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | ||
4485 | "online0_reg=0x%x, online1_reg=0x%x\n", | ||
4486 | uerrlo_reg.word0, uerrhi_reg.word0, | ||
4487 | onlnreg0, onlnreg1); | ||
4488 | } | ||
4489 | return -ENODEV; | ||
4490 | } | ||
4491 | |||
4492 | /* Wait up to 30 seconds for the SLI Port POST done and ready */ | ||
4493 | for (i = 0; i < 3000; i++) { | ||
4494 | sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); | ||
4495 | /* Encounter fatal POST error, break out */ | ||
4496 | if (bf_get(lpfc_hst_state_perr, &sta_reg)) { | ||
4497 | port_error = -ENODEV; | ||
4498 | break; | ||
4499 | } | ||
4500 | if (LPFC_POST_STAGE_ARMFW_READY == | ||
4501 | bf_get(lpfc_hst_state_port_status, &sta_reg)) { | ||
4502 | port_error = 0; | ||
4503 | break; | ||
4504 | } | ||
4505 | msleep(10); | ||
4506 | } | ||
4507 | |||
4508 | if (port_error) | ||
4509 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4510 | "1408 Failure HBA POST Status: sta_reg=0x%x, " | ||
4511 | "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " | ||
4512 | "dl=x%x, pstatus=x%x\n", sta_reg.word0, | ||
4513 | bf_get(lpfc_hst_state_perr, &sta_reg), | ||
4514 | bf_get(lpfc_hst_state_sfi, &sta_reg), | ||
4515 | bf_get(lpfc_hst_state_nip, &sta_reg), | ||
4516 | bf_get(lpfc_hst_state_ipc, &sta_reg), | ||
4517 | bf_get(lpfc_hst_state_xrom, &sta_reg), | ||
4518 | bf_get(lpfc_hst_state_dl, &sta_reg), | ||
4519 | bf_get(lpfc_hst_state_port_status, &sta_reg)); | ||
4520 | |||
4521 | /* Log device information */ | ||
4522 | scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); | ||
4523 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
4524 | "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " | ||
4525 | "FeatureL1=0x%x, FeatureL2=0x%x\n", | ||
4526 | bf_get(lpfc_scratchpad_chiptype, &scratchpad), | ||
4527 | bf_get(lpfc_scratchpad_slirev, &scratchpad), | ||
4528 | bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), | ||
4529 | bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); | ||
4530 | |||
4531 | return port_error; | ||
4532 | } | ||
4533 | |||
4534 | /** | ||
4535 | * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. | ||
4536 | * @phba: pointer to lpfc hba data structure. | ||
4537 | * | ||
4538 | * This routine is invoked to set up SLI4 BAR0 PCI config space register | ||
4539 | * memory map. | ||
4540 | **/ | ||
4541 | static void | ||
4542 | lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) | ||
4543 | { | ||
4544 | phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + | ||
4545 | LPFC_UERR_STATUS_LO; | ||
4546 | phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + | ||
4547 | LPFC_UERR_STATUS_HI; | ||
4548 | phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + | ||
4549 | LPFC_ONLINE0; | ||
4550 | phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + | ||
4551 | LPFC_ONLINE1; | ||
4552 | phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + | ||
4553 | LPFC_SCRATCHPAD; | ||
4554 | } | ||
4555 | |||
4556 | /** | ||
4557 | * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. | ||
4558 | * @phba: pointer to lpfc hba data structure. | ||
4559 | * | ||
4560 | * This routine is invoked to set up SLI4 BAR1 control status register (CSR) | ||
4561 | * memory map. | ||
4562 | **/ | ||
4563 | static void | ||
4564 | lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) | ||
4565 | { | ||
4566 | |||
4567 | phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | ||
4568 | LPFC_HST_STATE; | ||
4569 | phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | ||
4570 | LPFC_HST_ISR0; | ||
4571 | phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | ||
4572 | LPFC_HST_IMR0; | ||
4573 | phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + | ||
4574 | LPFC_HST_ISCR0; | ||
4575 | return; | ||
4576 | } | ||
4577 | |||
4578 | /** | ||
4579 | * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. | ||
4580 | * @phba: pointer to lpfc hba data structure. | ||
4581 | * @vf: virtual function number | ||
4582 | * | ||
4583 | * This routine is invoked to set up SLI4 BAR2 doorbell register memory map | ||
4584 | * based on the given viftual function number, @vf. | ||
4585 | * | ||
4586 | * Return 0 if successful, otherwise -ENODEV. | ||
4587 | **/ | ||
4588 | static int | ||
4589 | lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) | ||
4590 | { | ||
4591 | if (vf > LPFC_VIR_FUNC_MAX) | ||
4592 | return -ENODEV; | ||
4593 | |||
4594 | phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + | ||
4595 | vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); | ||
4596 | phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + | ||
4597 | vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); | ||
4598 | phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + | ||
4599 | vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); | ||
4600 | phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + | ||
4601 | vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); | ||
4602 | phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + | ||
4603 | vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); | ||
4604 | return 0; | ||
4605 | } | ||
4606 | |||
4607 | /** | ||
4608 | * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox | ||
4609 | * @phba: pointer to lpfc hba data structure. | ||
4610 | * | ||
4611 | * This routine is invoked to create the bootstrap mailbox | ||
4612 | * region consistent with the SLI-4 interface spec. This | ||
4613 | * routine allocates all memory necessary to communicate | ||
4614 | * mailbox commands to the port and sets up all alignment | ||
4615 | * needs. No locks are expected to be held when calling | ||
4616 | * this routine. | ||
4617 | * | ||
4618 | * Return codes | ||
4619 | * 0 - sucessful | ||
4620 | * ENOMEM - could not allocated memory. | ||
4621 | **/ | ||
4622 | static int | ||
4623 | lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) | ||
4624 | { | ||
4625 | uint32_t bmbx_size; | ||
4626 | struct lpfc_dmabuf *dmabuf; | ||
4627 | struct dma_address *dma_address; | ||
4628 | uint32_t pa_addr; | ||
4629 | uint64_t phys_addr; | ||
4630 | |||
4631 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
4632 | if (!dmabuf) | ||
4633 | return -ENOMEM; | ||
4634 | |||
4635 | /* | ||
4636 | * The bootstrap mailbox region is comprised of 2 parts | ||
4637 | * plus an alignment restriction of 16 bytes. | ||
4638 | */ | ||
4639 | bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); | ||
4640 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | ||
4641 | bmbx_size, | ||
4642 | &dmabuf->phys, | ||
4643 | GFP_KERNEL); | ||
4644 | if (!dmabuf->virt) { | ||
4645 | kfree(dmabuf); | ||
4646 | return -ENOMEM; | ||
4647 | } | ||
4648 | memset(dmabuf->virt, 0, bmbx_size); | ||
4649 | |||
4650 | /* | ||
4651 | * Initialize the bootstrap mailbox pointers now so that the register | ||
4652 | * operations are simple later. The mailbox dma address is required | ||
4653 | * to be 16-byte aligned. Also align the virtual memory as each | ||
4654 | * maibox is copied into the bmbx mailbox region before issuing the | ||
4655 | * command to the port. | ||
4656 | */ | ||
4657 | phba->sli4_hba.bmbx.dmabuf = dmabuf; | ||
4658 | phba->sli4_hba.bmbx.bmbx_size = bmbx_size; | ||
4659 | |||
4660 | phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, | ||
4661 | LPFC_ALIGN_16_BYTE); | ||
4662 | phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, | ||
4663 | LPFC_ALIGN_16_BYTE); | ||
4664 | |||
4665 | /* | ||
4666 | * Set the high and low physical addresses now. The SLI4 alignment | ||
4667 | * requirement is 16 bytes and the mailbox is posted to the port | ||
4668 | * as two 30-bit addresses. The other data is a bit marking whether | ||
4669 | * the 30-bit address is the high or low address. | ||
4670 | * Upcast bmbx aphys to 64bits so shift instruction compiles | ||
4671 | * clean on 32 bit machines. | ||
4672 | */ | ||
4673 | dma_address = &phba->sli4_hba.bmbx.dma_address; | ||
4674 | phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; | ||
4675 | pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); | ||
4676 | dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | | ||
4677 | LPFC_BMBX_BIT1_ADDR_HI); | ||
4678 | |||
4679 | pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); | ||
4680 | dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | | ||
4681 | LPFC_BMBX_BIT1_ADDR_LO); | ||
4682 | return 0; | ||
4683 | } | ||
4684 | |||
4685 | /** | ||
4686 | * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources | ||
4687 | * @phba: pointer to lpfc hba data structure. | ||
4688 | * | ||
4689 | * This routine is invoked to teardown the bootstrap mailbox | ||
4690 | * region and release all host resources. This routine requires | ||
4691 | * the caller to ensure all mailbox commands recovered, no | ||
4692 | * additional mailbox comands are sent, and interrupts are disabled | ||
4693 | * before calling this routine. | ||
4694 | * | ||
4695 | **/ | ||
4696 | static void | ||
4697 | lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) | ||
4698 | { | ||
4699 | dma_free_coherent(&phba->pcidev->dev, | ||
4700 | phba->sli4_hba.bmbx.bmbx_size, | ||
4701 | phba->sli4_hba.bmbx.dmabuf->virt, | ||
4702 | phba->sli4_hba.bmbx.dmabuf->phys); | ||
4703 | |||
4704 | kfree(phba->sli4_hba.bmbx.dmabuf); | ||
4705 | memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); | ||
4706 | } | ||
4707 | |||
4708 | /** | ||
4709 | * lpfc_sli4_read_config - Get the config parameters. | ||
4710 | * @phba: pointer to lpfc hba data structure. | ||
4711 | * | ||
4712 | * This routine is invoked to read the configuration parameters from the HBA. | ||
4713 | * The configuration parameters are used to set the base and maximum values | ||
4714 | * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource | ||
4715 | * allocation for the port. | ||
4716 | * | ||
4717 | * Return codes | ||
4718 | * 0 - sucessful | ||
4719 | * ENOMEM - No availble memory | ||
4720 | * EIO - The mailbox failed to complete successfully. | ||
4721 | **/ | ||
4722 | static int | ||
4723 | lpfc_sli4_read_config(struct lpfc_hba *phba) | ||
4724 | { | ||
4725 | LPFC_MBOXQ_t *pmb; | ||
4726 | struct lpfc_mbx_read_config *rd_config; | ||
4727 | uint32_t rc = 0; | ||
4728 | |||
4729 | pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
4730 | if (!pmb) { | ||
4731 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
4732 | "2011 Unable to allocate memory for issuing " | ||
4733 | "SLI_CONFIG_SPECIAL mailbox command\n"); | ||
4734 | return -ENOMEM; | ||
4735 | } | ||
4736 | |||
4737 | lpfc_read_config(phba, pmb); | ||
4738 | |||
4739 | rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); | ||
4740 | if (rc != MBX_SUCCESS) { | ||
4741 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
4742 | "2012 Mailbox failed , mbxCmd x%x " | ||
4743 | "READ_CONFIG, mbxStatus x%x\n", | ||
4744 | bf_get(lpfc_mqe_command, &pmb->u.mqe), | ||
4745 | bf_get(lpfc_mqe_status, &pmb->u.mqe)); | ||
4746 | rc = -EIO; | ||
4747 | } else { | ||
4748 | rd_config = &pmb->u.mqe.un.rd_config; | ||
4749 | phba->sli4_hba.max_cfg_param.max_xri = | ||
4750 | bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); | ||
4751 | phba->sli4_hba.max_cfg_param.xri_base = | ||
4752 | bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); | ||
4753 | phba->sli4_hba.max_cfg_param.max_vpi = | ||
4754 | bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); | ||
4755 | phba->sli4_hba.max_cfg_param.vpi_base = | ||
4756 | bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); | ||
4757 | phba->sli4_hba.max_cfg_param.max_rpi = | ||
4758 | bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); | ||
4759 | phba->sli4_hba.max_cfg_param.rpi_base = | ||
4760 | bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); | ||
4761 | phba->sli4_hba.max_cfg_param.max_vfi = | ||
4762 | bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); | ||
4763 | phba->sli4_hba.max_cfg_param.vfi_base = | ||
4764 | bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); | ||
4765 | phba->sli4_hba.max_cfg_param.max_fcfi = | ||
4766 | bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); | ||
4767 | phba->sli4_hba.max_cfg_param.fcfi_base = | ||
4768 | bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); | ||
4769 | phba->sli4_hba.max_cfg_param.max_eq = | ||
4770 | bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); | ||
4771 | phba->sli4_hba.max_cfg_param.max_rq = | ||
4772 | bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); | ||
4773 | phba->sli4_hba.max_cfg_param.max_wq = | ||
4774 | bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); | ||
4775 | phba->sli4_hba.max_cfg_param.max_cq = | ||
4776 | bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); | ||
4777 | phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); | ||
4778 | phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; | ||
4779 | phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; | ||
4780 | phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; | ||
4781 | phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; | ||
4782 | phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; | ||
4783 | phba->max_vports = phba->max_vpi; | ||
4784 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
4785 | "2003 cfg params XRI(B:%d M:%d), " | ||
4786 | "VPI(B:%d M:%d) " | ||
4787 | "VFI(B:%d M:%d) " | ||
4788 | "RPI(B:%d M:%d) " | ||
4789 | "FCFI(B:%d M:%d)\n", | ||
4790 | phba->sli4_hba.max_cfg_param.xri_base, | ||
4791 | phba->sli4_hba.max_cfg_param.max_xri, | ||
4792 | phba->sli4_hba.max_cfg_param.vpi_base, | ||
4793 | phba->sli4_hba.max_cfg_param.max_vpi, | ||
4794 | phba->sli4_hba.max_cfg_param.vfi_base, | ||
4795 | phba->sli4_hba.max_cfg_param.max_vfi, | ||
4796 | phba->sli4_hba.max_cfg_param.rpi_base, | ||
4797 | phba->sli4_hba.max_cfg_param.max_rpi, | ||
4798 | phba->sli4_hba.max_cfg_param.fcfi_base, | ||
4799 | phba->sli4_hba.max_cfg_param.max_fcfi); | ||
4800 | } | ||
4801 | mempool_free(pmb, phba->mbox_mem_pool); | ||
4802 | |||
4803 | /* Reset the DFT_HBA_Q_DEPTH to the max xri */ | ||
4804 | if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) | ||
4805 | phba->cfg_hba_queue_depth = | ||
4806 | phba->sli4_hba.max_cfg_param.max_xri; | ||
4807 | return rc; | ||
4808 | } | ||
4809 | |||
4810 | /** | ||
4811 | * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. | ||
4812 | * @phba: pointer to lpfc hba data structure. | ||
4813 | * | ||
4814 | * This routine is invoked to setup the host-side endian order to the | ||
4815 | * HBA consistent with the SLI-4 interface spec. | ||
4816 | * | ||
4817 | * Return codes | ||
4818 | * 0 - sucessful | ||
4819 | * ENOMEM - No availble memory | ||
4820 | * EIO - The mailbox failed to complete successfully. | ||
4821 | **/ | ||
4822 | static int | ||
4823 | lpfc_setup_endian_order(struct lpfc_hba *phba) | ||
4824 | { | ||
4825 | LPFC_MBOXQ_t *mboxq; | ||
4826 | uint32_t rc = 0; | ||
4827 | uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, | ||
4828 | HOST_ENDIAN_HIGH_WORD1}; | ||
4829 | |||
4830 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
4831 | if (!mboxq) { | ||
4832 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4833 | "0492 Unable to allocate memory for issuing " | ||
4834 | "SLI_CONFIG_SPECIAL mailbox command\n"); | ||
4835 | return -ENOMEM; | ||
4836 | } | ||
4837 | |||
4838 | /* | ||
4839 | * The SLI4_CONFIG_SPECIAL mailbox command requires the first two | ||
4840 | * words to contain special data values and no other data. | ||
4841 | */ | ||
4842 | memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); | ||
4843 | memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); | ||
4844 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
4845 | if (rc != MBX_SUCCESS) { | ||
4846 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4847 | "0493 SLI_CONFIG_SPECIAL mailbox failed with " | ||
4848 | "status x%x\n", | ||
4849 | rc); | ||
4850 | rc = -EIO; | ||
4851 | } | ||
4852 | |||
4853 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
4854 | return rc; | ||
4855 | } | ||
4856 | |||
4857 | /** | ||
4858 | * lpfc_sli4_queue_create - Create all the SLI4 queues | ||
4859 | * @phba: pointer to lpfc hba data structure. | ||
4860 | * | ||
4861 | * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA | ||
4862 | * operation. For each SLI4 queue type, the parameters such as queue entry | ||
4863 | * count (queue depth) shall be taken from the module parameter. For now, | ||
4864 | * we just use some constant number as place holder. | ||
4865 | * | ||
4866 | * Return codes | ||
4867 | * 0 - sucessful | ||
4868 | * ENOMEM - No availble memory | ||
4869 | * EIO - The mailbox failed to complete successfully. | ||
4870 | **/ | ||
4871 | static int | ||
4872 | lpfc_sli4_queue_create(struct lpfc_hba *phba) | ||
4873 | { | ||
4874 | struct lpfc_queue *qdesc; | ||
4875 | int fcp_eqidx, fcp_cqidx, fcp_wqidx; | ||
4876 | int cfg_fcp_wq_count; | ||
4877 | int cfg_fcp_eq_count; | ||
4878 | |||
4879 | /* | ||
4880 | * Sanity check for confiugred queue parameters against the run-time | ||
4881 | * device parameters | ||
4882 | */ | ||
4883 | |||
4884 | /* Sanity check on FCP fast-path WQ parameters */ | ||
4885 | cfg_fcp_wq_count = phba->cfg_fcp_wq_count; | ||
4886 | if (cfg_fcp_wq_count > | ||
4887 | (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { | ||
4888 | cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - | ||
4889 | LPFC_SP_WQN_DEF; | ||
4890 | if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { | ||
4891 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4892 | "2581 Not enough WQs (%d) from " | ||
4893 | "the pci function for supporting " | ||
4894 | "FCP WQs (%d)\n", | ||
4895 | phba->sli4_hba.max_cfg_param.max_wq, | ||
4896 | phba->cfg_fcp_wq_count); | ||
4897 | goto out_error; | ||
4898 | } | ||
4899 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
4900 | "2582 Not enough WQs (%d) from the pci " | ||
4901 | "function for supporting the requested " | ||
4902 | "FCP WQs (%d), the actual FCP WQs can " | ||
4903 | "be supported: %d\n", | ||
4904 | phba->sli4_hba.max_cfg_param.max_wq, | ||
4905 | phba->cfg_fcp_wq_count, cfg_fcp_wq_count); | ||
4906 | } | ||
4907 | /* The actual number of FCP work queues adopted */ | ||
4908 | phba->cfg_fcp_wq_count = cfg_fcp_wq_count; | ||
4909 | |||
4910 | /* Sanity check on FCP fast-path EQ parameters */ | ||
4911 | cfg_fcp_eq_count = phba->cfg_fcp_eq_count; | ||
4912 | if (cfg_fcp_eq_count > | ||
4913 | (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { | ||
4914 | cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - | ||
4915 | LPFC_SP_EQN_DEF; | ||
4916 | if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { | ||
4917 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4918 | "2574 Not enough EQs (%d) from the " | ||
4919 | "pci function for supporting FCP " | ||
4920 | "EQs (%d)\n", | ||
4921 | phba->sli4_hba.max_cfg_param.max_eq, | ||
4922 | phba->cfg_fcp_eq_count); | ||
4923 | goto out_error; | ||
4924 | } | ||
4925 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
4926 | "2575 Not enough EQs (%d) from the pci " | ||
4927 | "function for supporting the requested " | ||
4928 | "FCP EQs (%d), the actual FCP EQs can " | ||
4929 | "be supported: %d\n", | ||
4930 | phba->sli4_hba.max_cfg_param.max_eq, | ||
4931 | phba->cfg_fcp_eq_count, cfg_fcp_eq_count); | ||
4932 | } | ||
4933 | /* It does not make sense to have more EQs than WQs */ | ||
4934 | if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { | ||
4935 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
4936 | "2593 The number of FCP EQs (%d) is more " | ||
4937 | "than the number of FCP WQs (%d), take " | ||
4938 | "the number of FCP EQs same as than of " | ||
4939 | "WQs (%d)\n", cfg_fcp_eq_count, | ||
4940 | phba->cfg_fcp_wq_count, | ||
4941 | phba->cfg_fcp_wq_count); | ||
4942 | cfg_fcp_eq_count = phba->cfg_fcp_wq_count; | ||
4943 | } | ||
4944 | /* The actual number of FCP event queues adopted */ | ||
4945 | phba->cfg_fcp_eq_count = cfg_fcp_eq_count; | ||
4946 | /* The overall number of event queues used */ | ||
4947 | phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; | ||
4948 | |||
4949 | /* | ||
4950 | * Create Event Queues (EQs) | ||
4951 | */ | ||
4952 | |||
4953 | /* Get EQ depth from module parameter, fake the default for now */ | ||
4954 | phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; | ||
4955 | phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; | ||
4956 | |||
4957 | /* Create slow path event queue */ | ||
4958 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, | ||
4959 | phba->sli4_hba.eq_ecount); | ||
4960 | if (!qdesc) { | ||
4961 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4962 | "0496 Failed allocate slow-path EQ\n"); | ||
4963 | goto out_error; | ||
4964 | } | ||
4965 | phba->sli4_hba.sp_eq = qdesc; | ||
4966 | |||
4967 | /* Create fast-path FCP Event Queue(s) */ | ||
4968 | phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * | ||
4969 | phba->cfg_fcp_eq_count), GFP_KERNEL); | ||
4970 | if (!phba->sli4_hba.fp_eq) { | ||
4971 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4972 | "2576 Failed allocate memory for fast-path " | ||
4973 | "EQ record array\n"); | ||
4974 | goto out_free_sp_eq; | ||
4975 | } | ||
4976 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { | ||
4977 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, | ||
4978 | phba->sli4_hba.eq_ecount); | ||
4979 | if (!qdesc) { | ||
4980 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4981 | "0497 Failed allocate fast-path EQ\n"); | ||
4982 | goto out_free_fp_eq; | ||
4983 | } | ||
4984 | phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; | ||
4985 | } | ||
4986 | |||
4987 | /* | ||
4988 | * Create Complete Queues (CQs) | ||
4989 | */ | ||
4990 | |||
4991 | /* Get CQ depth from module parameter, fake the default for now */ | ||
4992 | phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; | ||
4993 | phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; | ||
4994 | |||
4995 | /* Create slow-path Mailbox Command Complete Queue */ | ||
4996 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | ||
4997 | phba->sli4_hba.cq_ecount); | ||
4998 | if (!qdesc) { | ||
4999 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5000 | "0500 Failed allocate slow-path mailbox CQ\n"); | ||
5001 | goto out_free_fp_eq; | ||
5002 | } | ||
5003 | phba->sli4_hba.mbx_cq = qdesc; | ||
5004 | |||
5005 | /* Create slow-path ELS Complete Queue */ | ||
5006 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | ||
5007 | phba->sli4_hba.cq_ecount); | ||
5008 | if (!qdesc) { | ||
5009 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5010 | "0501 Failed allocate slow-path ELS CQ\n"); | ||
5011 | goto out_free_mbx_cq; | ||
5012 | } | ||
5013 | phba->sli4_hba.els_cq = qdesc; | ||
5014 | |||
5015 | /* Create slow-path Unsolicited Receive Complete Queue */ | ||
5016 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | ||
5017 | phba->sli4_hba.cq_ecount); | ||
5018 | if (!qdesc) { | ||
5019 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5020 | "0502 Failed allocate slow-path USOL RX CQ\n"); | ||
5021 | goto out_free_els_cq; | ||
5022 | } | ||
5023 | phba->sli4_hba.rxq_cq = qdesc; | ||
5024 | |||
5025 | /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ | ||
5026 | phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * | ||
5027 | phba->cfg_fcp_eq_count), GFP_KERNEL); | ||
5028 | if (!phba->sli4_hba.fcp_cq) { | ||
5029 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5030 | "2577 Failed allocate memory for fast-path " | ||
5031 | "CQ record array\n"); | ||
5032 | goto out_free_rxq_cq; | ||
5033 | } | ||
5034 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { | ||
5035 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, | ||
5036 | phba->sli4_hba.cq_ecount); | ||
5037 | if (!qdesc) { | ||
5038 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5039 | "0499 Failed allocate fast-path FCP " | ||
5040 | "CQ (%d)\n", fcp_cqidx); | ||
5041 | goto out_free_fcp_cq; | ||
5042 | } | ||
5043 | phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; | ||
5044 | } | ||
5045 | |||
5046 | /* Create Mailbox Command Queue */ | ||
5047 | phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; | ||
5048 | phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; | ||
5049 | |||
5050 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, | ||
5051 | phba->sli4_hba.mq_ecount); | ||
5052 | if (!qdesc) { | ||
5053 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5054 | "0505 Failed allocate slow-path MQ\n"); | ||
5055 | goto out_free_fcp_cq; | ||
5056 | } | ||
5057 | phba->sli4_hba.mbx_wq = qdesc; | ||
5058 | |||
5059 | /* | ||
5060 | * Create all the Work Queues (WQs) | ||
5061 | */ | ||
5062 | phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; | ||
5063 | phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; | ||
5064 | |||
5065 | /* Create slow-path ELS Work Queue */ | ||
5066 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, | ||
5067 | phba->sli4_hba.wq_ecount); | ||
5068 | if (!qdesc) { | ||
5069 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5070 | "0504 Failed allocate slow-path ELS WQ\n"); | ||
5071 | goto out_free_mbx_wq; | ||
5072 | } | ||
5073 | phba->sli4_hba.els_wq = qdesc; | ||
5074 | |||
5075 | /* Create fast-path FCP Work Queue(s) */ | ||
5076 | phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * | ||
5077 | phba->cfg_fcp_wq_count), GFP_KERNEL); | ||
5078 | if (!phba->sli4_hba.fcp_wq) { | ||
5079 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5080 | "2578 Failed allocate memory for fast-path " | ||
5081 | "WQ record array\n"); | ||
5082 | goto out_free_els_wq; | ||
5083 | } | ||
5084 | for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { | ||
5085 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, | ||
5086 | phba->sli4_hba.wq_ecount); | ||
5087 | if (!qdesc) { | ||
5088 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5089 | "0503 Failed allocate fast-path FCP " | ||
5090 | "WQ (%d)\n", fcp_wqidx); | ||
5091 | goto out_free_fcp_wq; | ||
5092 | } | ||
5093 | phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; | ||
5094 | } | ||
5095 | |||
5096 | /* | ||
5097 | * Create Receive Queue (RQ) | ||
5098 | */ | ||
5099 | phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; | ||
5100 | phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; | ||
5101 | |||
5102 | /* Create Receive Queue for header */ | ||
5103 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, | ||
5104 | phba->sli4_hba.rq_ecount); | ||
5105 | if (!qdesc) { | ||
5106 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5107 | "0506 Failed allocate receive HRQ\n"); | ||
5108 | goto out_free_fcp_wq; | ||
5109 | } | ||
5110 | phba->sli4_hba.hdr_rq = qdesc; | ||
5111 | |||
5112 | /* Create Receive Queue for data */ | ||
5113 | qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, | ||
5114 | phba->sli4_hba.rq_ecount); | ||
5115 | if (!qdesc) { | ||
5116 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5117 | "0507 Failed allocate receive DRQ\n"); | ||
5118 | goto out_free_hdr_rq; | ||
5119 | } | ||
5120 | phba->sli4_hba.dat_rq = qdesc; | ||
5121 | |||
5122 | return 0; | ||
5123 | |||
5124 | out_free_hdr_rq: | ||
5125 | lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); | ||
5126 | phba->sli4_hba.hdr_rq = NULL; | ||
5127 | out_free_fcp_wq: | ||
5128 | for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { | ||
5129 | lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); | ||
5130 | phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; | ||
5131 | } | ||
5132 | kfree(phba->sli4_hba.fcp_wq); | ||
5133 | out_free_els_wq: | ||
5134 | lpfc_sli4_queue_free(phba->sli4_hba.els_wq); | ||
5135 | phba->sli4_hba.els_wq = NULL; | ||
5136 | out_free_mbx_wq: | ||
5137 | lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); | ||
5138 | phba->sli4_hba.mbx_wq = NULL; | ||
5139 | out_free_fcp_cq: | ||
5140 | for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { | ||
5141 | lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); | ||
5142 | phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; | ||
5143 | } | ||
5144 | kfree(phba->sli4_hba.fcp_cq); | ||
5145 | out_free_rxq_cq: | ||
5146 | lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); | ||
5147 | phba->sli4_hba.rxq_cq = NULL; | ||
5148 | out_free_els_cq: | ||
5149 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); | ||
5150 | phba->sli4_hba.els_cq = NULL; | ||
5151 | out_free_mbx_cq: | ||
5152 | lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); | ||
5153 | phba->sli4_hba.mbx_cq = NULL; | ||
5154 | out_free_fp_eq: | ||
5155 | for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { | ||
5156 | lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); | ||
5157 | phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; | ||
5158 | } | ||
5159 | kfree(phba->sli4_hba.fp_eq); | ||
5160 | out_free_sp_eq: | ||
5161 | lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); | ||
5162 | phba->sli4_hba.sp_eq = NULL; | ||
5163 | out_error: | ||
5164 | return -ENOMEM; | ||
5165 | } | ||
5166 | |||
5167 | /** | ||
5168 | * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues | ||
5169 | * @phba: pointer to lpfc hba data structure. | ||
5170 | * | ||
5171 | * This routine is invoked to release all the SLI4 queues with the FCoE HBA | ||
5172 | * operation. | ||
5173 | * | ||
5174 | * Return codes | ||
5175 | * 0 - sucessful | ||
5176 | * ENOMEM - No availble memory | ||
5177 | * EIO - The mailbox failed to complete successfully. | ||
5178 | **/ | ||
5179 | static void | ||
5180 | lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | ||
5181 | { | ||
5182 | int fcp_qidx; | ||
5183 | |||
5184 | /* Release mailbox command work queue */ | ||
5185 | lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); | ||
5186 | phba->sli4_hba.mbx_wq = NULL; | ||
5187 | |||
5188 | /* Release ELS work queue */ | ||
5189 | lpfc_sli4_queue_free(phba->sli4_hba.els_wq); | ||
5190 | phba->sli4_hba.els_wq = NULL; | ||
5191 | |||
5192 | /* Release FCP work queue */ | ||
5193 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) | ||
5194 | lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); | ||
5195 | kfree(phba->sli4_hba.fcp_wq); | ||
5196 | phba->sli4_hba.fcp_wq = NULL; | ||
5197 | |||
5198 | /* Release unsolicited receive queue */ | ||
5199 | lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); | ||
5200 | phba->sli4_hba.hdr_rq = NULL; | ||
5201 | lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); | ||
5202 | phba->sli4_hba.dat_rq = NULL; | ||
5203 | |||
5204 | /* Release unsolicited receive complete queue */ | ||
5205 | lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); | ||
5206 | phba->sli4_hba.rxq_cq = NULL; | ||
5207 | |||
5208 | /* Release ELS complete queue */ | ||
5209 | lpfc_sli4_queue_free(phba->sli4_hba.els_cq); | ||
5210 | phba->sli4_hba.els_cq = NULL; | ||
5211 | |||
5212 | /* Release mailbox command complete queue */ | ||
5213 | lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); | ||
5214 | phba->sli4_hba.mbx_cq = NULL; | ||
5215 | |||
5216 | /* Release FCP response complete queue */ | ||
5217 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) | ||
5218 | lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); | ||
5219 | kfree(phba->sli4_hba.fcp_cq); | ||
5220 | phba->sli4_hba.fcp_cq = NULL; | ||
5221 | |||
5222 | /* Release fast-path event queue */ | ||
5223 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) | ||
5224 | lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); | ||
5225 | kfree(phba->sli4_hba.fp_eq); | ||
5226 | phba->sli4_hba.fp_eq = NULL; | ||
5227 | |||
5228 | /* Release slow-path event queue */ | ||
5229 | lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); | ||
5230 | phba->sli4_hba.sp_eq = NULL; | ||
5231 | |||
5232 | return; | ||
5233 | } | ||
5234 | |||
5235 | /** | ||
5236 | * lpfc_sli4_queue_setup - Set up all the SLI4 queues | ||
5237 | * @phba: pointer to lpfc hba data structure. | ||
5238 | * | ||
5239 | * This routine is invoked to set up all the SLI4 queues for the FCoE HBA | ||
5240 | * operation. | ||
5241 | * | ||
5242 | * Return codes | ||
5243 | * 0 - sucessful | ||
5244 | * ENOMEM - No availble memory | ||
5245 | * EIO - The mailbox failed to complete successfully. | ||
5246 | **/ | ||
5247 | int | ||
5248 | lpfc_sli4_queue_setup(struct lpfc_hba *phba) | ||
5249 | { | ||
5250 | int rc = -ENOMEM; | ||
5251 | int fcp_eqidx, fcp_cqidx, fcp_wqidx; | ||
5252 | int fcp_cq_index = 0; | ||
5253 | |||
5254 | /* | ||
5255 | * Set up Event Queues (EQs) | ||
5256 | */ | ||
5257 | |||
5258 | /* Set up slow-path event queue */ | ||
5259 | if (!phba->sli4_hba.sp_eq) { | ||
5260 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5261 | "0520 Slow-path EQ not allocated\n"); | ||
5262 | goto out_error; | ||
5263 | } | ||
5264 | rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, | ||
5265 | LPFC_SP_DEF_IMAX); | ||
5266 | if (rc) { | ||
5267 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5268 | "0521 Failed setup of slow-path EQ: " | ||
5269 | "rc = 0x%x\n", rc); | ||
5270 | goto out_error; | ||
5271 | } | ||
5272 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5273 | "2583 Slow-path EQ setup: queue-id=%d\n", | ||
5274 | phba->sli4_hba.sp_eq->queue_id); | ||
5275 | |||
5276 | /* Set up fast-path event queue */ | ||
5277 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { | ||
5278 | if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { | ||
5279 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5280 | "0522 Fast-path EQ (%d) not " | ||
5281 | "allocated\n", fcp_eqidx); | ||
5282 | goto out_destroy_fp_eq; | ||
5283 | } | ||
5284 | rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], | ||
5285 | phba->cfg_fcp_imax); | ||
5286 | if (rc) { | ||
5287 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5288 | "0523 Failed setup of fast-path EQ " | ||
5289 | "(%d), rc = 0x%x\n", fcp_eqidx, rc); | ||
5290 | goto out_destroy_fp_eq; | ||
5291 | } | ||
5292 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5293 | "2584 Fast-path EQ setup: " | ||
5294 | "queue[%d]-id=%d\n", fcp_eqidx, | ||
5295 | phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); | ||
5296 | } | ||
5297 | |||
5298 | /* | ||
5299 | * Set up Complete Queues (CQs) | ||
5300 | */ | ||
5301 | |||
5302 | /* Set up slow-path MBOX Complete Queue as the first CQ */ | ||
5303 | if (!phba->sli4_hba.mbx_cq) { | ||
5304 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5305 | "0528 Mailbox CQ not allocated\n"); | ||
5306 | goto out_destroy_fp_eq; | ||
5307 | } | ||
5308 | rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, | ||
5309 | LPFC_MCQ, LPFC_MBOX); | ||
5310 | if (rc) { | ||
5311 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5312 | "0529 Failed setup of slow-path mailbox CQ: " | ||
5313 | "rc = 0x%x\n", rc); | ||
5314 | goto out_destroy_fp_eq; | ||
5315 | } | ||
5316 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5317 | "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", | ||
5318 | phba->sli4_hba.mbx_cq->queue_id, | ||
5319 | phba->sli4_hba.sp_eq->queue_id); | ||
5320 | |||
5321 | /* Set up slow-path ELS Complete Queue */ | ||
5322 | if (!phba->sli4_hba.els_cq) { | ||
5323 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5324 | "0530 ELS CQ not allocated\n"); | ||
5325 | goto out_destroy_mbx_cq; | ||
5326 | } | ||
5327 | rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, | ||
5328 | LPFC_WCQ, LPFC_ELS); | ||
5329 | if (rc) { | ||
5330 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5331 | "0531 Failed setup of slow-path ELS CQ: " | ||
5332 | "rc = 0x%x\n", rc); | ||
5333 | goto out_destroy_mbx_cq; | ||
5334 | } | ||
5335 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5336 | "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", | ||
5337 | phba->sli4_hba.els_cq->queue_id, | ||
5338 | phba->sli4_hba.sp_eq->queue_id); | ||
5339 | |||
5340 | /* Set up slow-path Unsolicited Receive Complete Queue */ | ||
5341 | if (!phba->sli4_hba.rxq_cq) { | ||
5342 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5343 | "0532 USOL RX CQ not allocated\n"); | ||
5344 | goto out_destroy_els_cq; | ||
5345 | } | ||
5346 | rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, | ||
5347 | LPFC_RCQ, LPFC_USOL); | ||
5348 | if (rc) { | ||
5349 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5350 | "0533 Failed setup of slow-path USOL RX CQ: " | ||
5351 | "rc = 0x%x\n", rc); | ||
5352 | goto out_destroy_els_cq; | ||
5353 | } | ||
5354 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5355 | "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", | ||
5356 | phba->sli4_hba.rxq_cq->queue_id, | ||
5357 | phba->sli4_hba.sp_eq->queue_id); | ||
5358 | |||
5359 | /* Set up fast-path FCP Response Complete Queue */ | ||
5360 | for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { | ||
5361 | if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { | ||
5362 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5363 | "0526 Fast-path FCP CQ (%d) not " | ||
5364 | "allocated\n", fcp_cqidx); | ||
5365 | goto out_destroy_fcp_cq; | ||
5366 | } | ||
5367 | rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], | ||
5368 | phba->sli4_hba.fp_eq[fcp_cqidx], | ||
5369 | LPFC_WCQ, LPFC_FCP); | ||
5370 | if (rc) { | ||
5371 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5372 | "0527 Failed setup of fast-path FCP " | ||
5373 | "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); | ||
5374 | goto out_destroy_fcp_cq; | ||
5375 | } | ||
5376 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5377 | "2588 FCP CQ setup: cq[%d]-id=%d, " | ||
5378 | "parent eq[%d]-id=%d\n", | ||
5379 | fcp_cqidx, | ||
5380 | phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, | ||
5381 | fcp_cqidx, | ||
5382 | phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); | ||
5383 | } | ||
5384 | |||
5385 | /* | ||
5386 | * Set up all the Work Queues (WQs) | ||
5387 | */ | ||
5388 | |||
5389 | /* Set up Mailbox Command Queue */ | ||
5390 | if (!phba->sli4_hba.mbx_wq) { | ||
5391 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5392 | "0538 Slow-path MQ not allocated\n"); | ||
5393 | goto out_destroy_fcp_cq; | ||
5394 | } | ||
5395 | rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, | ||
5396 | phba->sli4_hba.mbx_cq, LPFC_MBOX); | ||
5397 | if (rc) { | ||
5398 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5399 | "0539 Failed setup of slow-path MQ: " | ||
5400 | "rc = 0x%x\n", rc); | ||
5401 | goto out_destroy_fcp_cq; | ||
5402 | } | ||
5403 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5404 | "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", | ||
5405 | phba->sli4_hba.mbx_wq->queue_id, | ||
5406 | phba->sli4_hba.mbx_cq->queue_id); | ||
5407 | |||
5408 | /* Set up slow-path ELS Work Queue */ | ||
5409 | if (!phba->sli4_hba.els_wq) { | ||
5410 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5411 | "0536 Slow-path ELS WQ not allocated\n"); | ||
5412 | goto out_destroy_mbx_wq; | ||
5413 | } | ||
5414 | rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, | ||
5415 | phba->sli4_hba.els_cq, LPFC_ELS); | ||
5416 | if (rc) { | ||
5417 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5418 | "0537 Failed setup of slow-path ELS WQ: " | ||
5419 | "rc = 0x%x\n", rc); | ||
5420 | goto out_destroy_mbx_wq; | ||
5421 | } | ||
5422 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5423 | "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", | ||
5424 | phba->sli4_hba.els_wq->queue_id, | ||
5425 | phba->sli4_hba.els_cq->queue_id); | ||
5426 | |||
5427 | /* Set up fast-path FCP Work Queue */ | ||
5428 | for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { | ||
5429 | if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { | ||
5430 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5431 | "0534 Fast-path FCP WQ (%d) not " | ||
5432 | "allocated\n", fcp_wqidx); | ||
5433 | goto out_destroy_fcp_wq; | ||
5434 | } | ||
5435 | rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], | ||
5436 | phba->sli4_hba.fcp_cq[fcp_cq_index], | ||
5437 | LPFC_FCP); | ||
5438 | if (rc) { | ||
5439 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5440 | "0535 Failed setup of fast-path FCP " | ||
5441 | "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); | ||
5442 | goto out_destroy_fcp_wq; | ||
5443 | } | ||
5444 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5445 | "2591 FCP WQ setup: wq[%d]-id=%d, " | ||
5446 | "parent cq[%d]-id=%d\n", | ||
5447 | fcp_wqidx, | ||
5448 | phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, | ||
5449 | fcp_cq_index, | ||
5450 | phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); | ||
5451 | /* Round robin FCP Work Queue's Completion Queue assignment */ | ||
5452 | fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); | ||
5453 | } | ||
5454 | |||
5455 | /* | ||
5456 | * Create Receive Queue (RQ) | ||
5457 | */ | ||
5458 | if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { | ||
5459 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5460 | "0540 Receive Queue not allocated\n"); | ||
5461 | goto out_destroy_fcp_wq; | ||
5462 | } | ||
5463 | rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, | ||
5464 | phba->sli4_hba.rxq_cq, LPFC_USOL); | ||
5465 | if (rc) { | ||
5466 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5467 | "0541 Failed setup of Receive Queue: " | ||
5468 | "rc = 0x%x\n", rc); | ||
5469 | goto out_destroy_fcp_wq; | ||
5470 | } | ||
5471 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
5472 | "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " | ||
5473 | "parent cq-id=%d\n", | ||
5474 | phba->sli4_hba.hdr_rq->queue_id, | ||
5475 | phba->sli4_hba.dat_rq->queue_id, | ||
5476 | phba->sli4_hba.rxq_cq->queue_id); | ||
5477 | return 0; | ||
5478 | |||
5479 | out_destroy_fcp_wq: | ||
5480 | for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) | ||
5481 | lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); | ||
5482 | lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); | ||
5483 | out_destroy_mbx_wq: | ||
5484 | lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); | ||
5485 | out_destroy_fcp_cq: | ||
5486 | for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) | ||
5487 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); | ||
5488 | lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); | ||
5489 | out_destroy_els_cq: | ||
5490 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); | ||
5491 | out_destroy_mbx_cq: | ||
5492 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); | ||
5493 | out_destroy_fp_eq: | ||
5494 | for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) | ||
5495 | lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); | ||
5496 | lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); | ||
5497 | out_error: | ||
5498 | return rc; | ||
5499 | } | ||
5500 | |||
5501 | /** | ||
5502 | * lpfc_sli4_queue_unset - Unset all the SLI4 queues | ||
5503 | * @phba: pointer to lpfc hba data structure. | ||
5504 | * | ||
5505 | * This routine is invoked to unset all the SLI4 queues with the FCoE HBA | ||
5506 | * operation. | ||
5507 | * | ||
5508 | * Return codes | ||
5509 | * 0 - sucessful | ||
5510 | * ENOMEM - No availble memory | ||
5511 | * EIO - The mailbox failed to complete successfully. | ||
5512 | **/ | ||
5513 | void | ||
5514 | lpfc_sli4_queue_unset(struct lpfc_hba *phba) | ||
5515 | { | ||
5516 | int fcp_qidx; | ||
5517 | |||
5518 | /* Unset mailbox command work queue */ | ||
5519 | lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); | ||
5520 | /* Unset ELS work queue */ | ||
5521 | lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); | ||
5522 | /* Unset unsolicited receive queue */ | ||
5523 | lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); | ||
5524 | /* Unset FCP work queue */ | ||
5525 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) | ||
5526 | lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); | ||
5527 | /* Unset mailbox command complete queue */ | ||
5528 | lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); | ||
5529 | /* Unset ELS complete queue */ | ||
5530 | lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); | ||
5531 | /* Unset unsolicited receive complete queue */ | ||
5532 | lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); | ||
5533 | /* Unset FCP response complete queue */ | ||
5534 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) | ||
5535 | lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); | ||
5536 | /* Unset fast-path event queue */ | ||
5537 | for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) | ||
5538 | lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); | ||
5539 | /* Unset slow-path event queue */ | ||
5540 | lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); | ||
5541 | } | ||
5542 | |||
5543 | /** | ||
5544 | * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool | ||
5545 | * @phba: pointer to lpfc hba data structure. | ||
5546 | * | ||
5547 | * This routine is invoked to allocate and set up a pool of completion queue | ||
5548 | * events. The body of the completion queue event is a completion queue entry | ||
5549 | * CQE. For now, this pool is used for the interrupt service routine to queue | ||
5550 | * the following HBA completion queue events for the worker thread to process: | ||
5551 | * - Mailbox asynchronous events | ||
5552 | * - Receive queue completion unsolicited events | ||
5553 | * Later, this can be used for all the slow-path events. | ||
5554 | * | ||
5555 | * Return codes | ||
5556 | * 0 - sucessful | ||
5557 | * -ENOMEM - No availble memory | ||
5558 | **/ | ||
5559 | static int | ||
5560 | lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) | ||
5561 | { | ||
5562 | struct lpfc_cq_event *cq_event; | ||
5563 | int i; | ||
5564 | |||
5565 | for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { | ||
5566 | cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); | ||
5567 | if (!cq_event) | ||
5568 | goto out_pool_create_fail; | ||
5569 | list_add_tail(&cq_event->list, | ||
5570 | &phba->sli4_hba.sp_cqe_event_pool); | ||
5571 | } | ||
5572 | return 0; | ||
5573 | |||
5574 | out_pool_create_fail: | ||
5575 | lpfc_sli4_cq_event_pool_destroy(phba); | ||
5576 | return -ENOMEM; | ||
5577 | } | ||
5578 | |||
5579 | /** | ||
5580 | * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool | ||
5581 | * @phba: pointer to lpfc hba data structure. | ||
5582 | * | ||
5583 | * This routine is invoked to free the pool of completion queue events at | ||
5584 | * driver unload time. Note that, it is the responsibility of the driver | ||
5585 | * cleanup routine to free all the outstanding completion-queue events | ||
5586 | * allocated from this pool back into the pool before invoking this routine | ||
5587 | * to destroy the pool. | ||
5588 | **/ | ||
5589 | static void | ||
5590 | lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) | ||
5591 | { | ||
5592 | struct lpfc_cq_event *cq_event, *next_cq_event; | ||
5593 | |||
5594 | list_for_each_entry_safe(cq_event, next_cq_event, | ||
5595 | &phba->sli4_hba.sp_cqe_event_pool, list) { | ||
5596 | list_del(&cq_event->list); | ||
5597 | kfree(cq_event); | ||
5598 | } | ||
5599 | } | ||
5600 | |||
5601 | /** | ||
5602 | * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool | ||
5603 | * @phba: pointer to lpfc hba data structure. | ||
5604 | * | ||
5605 | * This routine is the lock free version of the API invoked to allocate a | ||
5606 | * completion-queue event from the free pool. | ||
5607 | * | ||
5608 | * Return: Pointer to the newly allocated completion-queue event if successful | ||
5609 | * NULL otherwise. | ||
5610 | **/ | ||
5611 | struct lpfc_cq_event * | ||
5612 | __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) | ||
5613 | { | ||
5614 | struct lpfc_cq_event *cq_event = NULL; | ||
5615 | |||
5616 | list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, | ||
5617 | struct lpfc_cq_event, list); | ||
5618 | return cq_event; | ||
5619 | } | ||
5620 | |||
5621 | /** | ||
5622 | * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool | ||
5623 | * @phba: pointer to lpfc hba data structure. | ||
5624 | * | ||
5625 | * This routine is the lock version of the API invoked to allocate a | ||
5626 | * completion-queue event from the free pool. | ||
5627 | * | ||
5628 | * Return: Pointer to the newly allocated completion-queue event if successful | ||
5629 | * NULL otherwise. | ||
5630 | **/ | ||
5631 | struct lpfc_cq_event * | ||
5632 | lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) | ||
5633 | { | ||
5634 | struct lpfc_cq_event *cq_event; | ||
5635 | unsigned long iflags; | ||
5636 | |||
5637 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
5638 | cq_event = __lpfc_sli4_cq_event_alloc(phba); | ||
5639 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
5640 | return cq_event; | ||
5641 | } | ||
5642 | |||
5643 | /** | ||
5644 | * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool | ||
5645 | * @phba: pointer to lpfc hba data structure. | ||
5646 | * @cq_event: pointer to the completion queue event to be freed. | ||
5647 | * | ||
5648 | * This routine is the lock free version of the API invoked to release a | ||
5649 | * completion-queue event back into the free pool. | ||
5650 | **/ | ||
5651 | void | ||
5652 | __lpfc_sli4_cq_event_release(struct lpfc_hba *phba, | ||
5653 | struct lpfc_cq_event *cq_event) | ||
5654 | { | ||
5655 | list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); | ||
5656 | } | ||
5657 | |||
5658 | /** | ||
5659 | * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool | ||
5660 | * @phba: pointer to lpfc hba data structure. | ||
5661 | * @cq_event: pointer to the completion queue event to be freed. | ||
5662 | * | ||
5663 | * This routine is the lock version of the API invoked to release a | ||
5664 | * completion-queue event back into the free pool. | ||
5665 | **/ | ||
5666 | void | ||
5667 | lpfc_sli4_cq_event_release(struct lpfc_hba *phba, | ||
5668 | struct lpfc_cq_event *cq_event) | ||
5669 | { | ||
5670 | unsigned long iflags; | ||
5671 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
5672 | __lpfc_sli4_cq_event_release(phba, cq_event); | ||
5673 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
5674 | } | ||
5675 | |||
5676 | /** | ||
5677 | * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool | ||
5678 | * @phba: pointer to lpfc hba data structure. | ||
5679 | * | ||
5680 | * This routine is to free all the pending completion-queue events to the | ||
5681 | * back into the free pool for device reset. | ||
5682 | **/ | ||
5683 | static void | ||
5684 | lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) | ||
5685 | { | ||
5686 | LIST_HEAD(cqelist); | ||
5687 | struct lpfc_cq_event *cqe; | ||
5688 | unsigned long iflags; | ||
5689 | |||
5690 | /* Retrieve all the pending WCQEs from pending WCQE lists */ | ||
5691 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
5692 | /* Pending FCP XRI abort events */ | ||
5693 | list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, | ||
5694 | &cqelist); | ||
5695 | /* Pending ELS XRI abort events */ | ||
5696 | list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, | ||
5697 | &cqelist); | ||
5698 | /* Pending asynnc events */ | ||
5699 | list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, | ||
5700 | &cqelist); | ||
5701 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
5702 | |||
5703 | while (!list_empty(&cqelist)) { | ||
5704 | list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); | ||
5705 | lpfc_sli4_cq_event_release(phba, cqe); | ||
5706 | } | ||
5707 | } | ||
5708 | |||
5709 | /** | ||
5710 | * lpfc_pci_function_reset - Reset pci function. | ||
5711 | * @phba: pointer to lpfc hba data structure. | ||
5712 | * | ||
5713 | * This routine is invoked to request a PCI function reset. It will destroys | ||
5714 | * all resources assigned to the PCI function which originates this request. | ||
5715 | * | ||
5716 | * Return codes | ||
5717 | * 0 - sucessful | ||
5718 | * ENOMEM - No availble memory | ||
5719 | * EIO - The mailbox failed to complete successfully. | ||
5720 | **/ | ||
5721 | int | ||
5722 | lpfc_pci_function_reset(struct lpfc_hba *phba) | ||
5723 | { | ||
5724 | LPFC_MBOXQ_t *mboxq; | ||
5725 | uint32_t rc = 0; | ||
5726 | uint32_t shdr_status, shdr_add_status; | ||
5727 | union lpfc_sli4_cfg_shdr *shdr; | ||
5728 | |||
5729 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
5730 | if (!mboxq) { | ||
5731 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5732 | "0494 Unable to allocate memory for issuing " | ||
5733 | "SLI_FUNCTION_RESET mailbox command\n"); | ||
5734 | return -ENOMEM; | ||
5735 | } | ||
5736 | |||
5737 | /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ | ||
5738 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
5739 | LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, | ||
5740 | LPFC_SLI4_MBX_EMBED); | ||
5741 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
5742 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
5743 | &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; | ||
5744 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
5745 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); | ||
5746 | if (rc != MBX_TIMEOUT) | ||
5747 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
5748 | if (shdr_status || shdr_add_status || rc) { | ||
5749 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5750 | "0495 SLI_FUNCTION_RESET mailbox failed with " | ||
5751 | "status x%x add_status x%x, mbx status x%x\n", | ||
5752 | shdr_status, shdr_add_status, rc); | ||
5753 | rc = -ENXIO; | ||
5754 | } | ||
5755 | return rc; | ||
5756 | } | ||
5757 | |||
5758 | /** | ||
5759 | * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands | ||
5760 | * @phba: pointer to lpfc hba data structure. | ||
5761 | * @cnt: number of nop mailbox commands to send. | ||
5762 | * | ||
5763 | * This routine is invoked to send a number @cnt of NOP mailbox command and | ||
5764 | * wait for each command to complete. | ||
5765 | * | ||
5766 | * Return: the number of NOP mailbox command completed. | ||
5767 | **/ | ||
5768 | static int | ||
5769 | lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) | ||
5770 | { | ||
5771 | LPFC_MBOXQ_t *mboxq; | ||
5772 | int length, cmdsent; | ||
5773 | uint32_t mbox_tmo; | ||
5774 | uint32_t rc = 0; | ||
5775 | uint32_t shdr_status, shdr_add_status; | ||
5776 | union lpfc_sli4_cfg_shdr *shdr; | ||
5777 | |||
5778 | if (cnt == 0) { | ||
5779 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
5780 | "2518 Requested to send 0 NOP mailbox cmd\n"); | ||
5781 | return cnt; | ||
5782 | } | ||
5783 | |||
5784 | mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
5785 | if (!mboxq) { | ||
5786 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
5787 | "2519 Unable to allocate memory for issuing " | ||
5788 | "NOP mailbox command\n"); | ||
5789 | return 0; | ||
5790 | } | ||
5791 | |||
5792 | /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ | ||
5793 | length = (sizeof(struct lpfc_mbx_nop) - | ||
5794 | sizeof(struct lpfc_sli4_cfg_mhdr)); | ||
5795 | lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, | ||
5796 | LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); | ||
5797 | |||
5798 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); | ||
5799 | for (cmdsent = 0; cmdsent < cnt; cmdsent++) { | ||
5800 | if (!phba->sli4_hba.intr_enable) | ||
5801 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
5802 | else | ||
5803 | rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); | ||
5804 | if (rc == MBX_TIMEOUT) | ||
5805 | break; | ||
5806 | /* Check return status */ | ||
5807 | shdr = (union lpfc_sli4_cfg_shdr *) | ||
5808 | &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; | ||
5809 | shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); | ||
5810 | shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, | ||
5811 | &shdr->response); | ||
5812 | if (shdr_status || shdr_add_status || rc) { | ||
5813 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
5814 | "2520 NOP mailbox command failed " | ||
5815 | "status x%x add_status x%x mbx " | ||
5816 | "status x%x\n", shdr_status, | ||
5817 | shdr_add_status, rc); | ||
5818 | break; | ||
5819 | } | ||
5820 | } | ||
5821 | |||
5822 | if (rc != MBX_TIMEOUT) | ||
5823 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
5824 | |||
5825 | return cmdsent; | ||
5826 | } | ||
5827 | |||
5828 | /** | ||
5829 | * lpfc_sli4_fcfi_unreg - Unregister fcfi to device | ||
5830 | * @phba: pointer to lpfc hba data structure. | ||
5831 | * @fcfi: fcf index. | ||
5832 | * | ||
5833 | * This routine is invoked to unregister a FCFI from device. | ||
5834 | **/ | ||
5835 | void | ||
5836 | lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) | ||
5837 | { | ||
5838 | LPFC_MBOXQ_t *mbox; | ||
5839 | uint32_t mbox_tmo; | ||
5840 | int rc; | ||
5841 | unsigned long flags; | ||
5842 | |||
5843 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
5844 | |||
5845 | if (!mbox) | ||
5846 | return; | ||
5847 | |||
5848 | lpfc_unreg_fcfi(mbox, fcfi); | ||
5849 | |||
5850 | if (!phba->sli4_hba.intr_enable) | ||
5851 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); | ||
5852 | else { | ||
5853 | mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); | ||
5854 | rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); | ||
5855 | } | ||
5856 | if (rc != MBX_TIMEOUT) | ||
5857 | mempool_free(mbox, phba->mbox_mem_pool); | ||
5858 | if (rc != MBX_SUCCESS) | ||
5859 | lpfc_printf_log(phba, KERN_ERR, LOG_SLI, | ||
5860 | "2517 Unregister FCFI command failed " | ||
5861 | "status %d, mbxStatus x%x\n", rc, | ||
5862 | bf_get(lpfc_mqe_status, &mbox->u.mqe)); | ||
5863 | else { | ||
5864 | spin_lock_irqsave(&phba->hbalock, flags); | ||
5865 | /* Mark the FCFI is no longer registered */ | ||
5866 | phba->fcf.fcf_flag &= | ||
5867 | ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); | ||
5868 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
5869 | } | ||
5870 | } | ||
5871 | |||
5872 | /** | ||
5873 | * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. | ||
5874 | * @phba: pointer to lpfc hba data structure. | ||
5875 | * | ||
5876 | * This routine is invoked to set up the PCI device memory space for device | ||
5877 | * with SLI-4 interface spec. | ||
5878 | * | ||
5879 | * Return codes | ||
5880 | * 0 - sucessful | ||
5881 | * other values - error | ||
5882 | **/ | ||
5883 | static int | ||
5884 | lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | ||
5885 | { | ||
5886 | struct pci_dev *pdev; | ||
5887 | unsigned long bar0map_len, bar1map_len, bar2map_len; | ||
5888 | int error = -ENODEV; | ||
5889 | |||
5890 | /* Obtain PCI device reference */ | ||
5891 | if (!phba->pcidev) | ||
5892 | return error; | ||
5893 | else | ||
5894 | pdev = phba->pcidev; | ||
5895 | |||
5896 | /* Set the device DMA mask size */ | ||
5897 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) | ||
5898 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) | ||
5899 | return error; | ||
5900 | |||
5901 | /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the | ||
5902 | * number of bytes required by each mapping. They are actually | ||
5903 | * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. | ||
5904 | */ | ||
5905 | phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); | ||
5906 | bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); | ||
5907 | |||
5908 | phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); | ||
5909 | bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); | ||
5910 | |||
5911 | phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); | ||
5912 | bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); | ||
5913 | |||
5914 | /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ | ||
5915 | phba->sli4_hba.conf_regs_memmap_p = | ||
5916 | ioremap(phba->pci_bar0_map, bar0map_len); | ||
5917 | if (!phba->sli4_hba.conf_regs_memmap_p) { | ||
5918 | dev_printk(KERN_ERR, &pdev->dev, | ||
5919 | "ioremap failed for SLI4 PCI config registers.\n"); | ||
5920 | goto out; | ||
5921 | } | ||
5922 | |||
5923 | /* Map SLI4 HBA Control Register base to a kernel virtual address. */ | ||
5924 | phba->sli4_hba.ctrl_regs_memmap_p = | ||
5925 | ioremap(phba->pci_bar1_map, bar1map_len); | ||
5926 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { | ||
5927 | dev_printk(KERN_ERR, &pdev->dev, | ||
5928 | "ioremap failed for SLI4 HBA control registers.\n"); | ||
5929 | goto out_iounmap_conf; | ||
5930 | } | ||
5931 | |||
5932 | /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ | ||
5933 | phba->sli4_hba.drbl_regs_memmap_p = | ||
5934 | ioremap(phba->pci_bar2_map, bar2map_len); | ||
5935 | if (!phba->sli4_hba.drbl_regs_memmap_p) { | ||
5936 | dev_printk(KERN_ERR, &pdev->dev, | ||
5937 | "ioremap failed for SLI4 HBA doorbell registers.\n"); | ||
5938 | goto out_iounmap_ctrl; | ||
5939 | } | ||
5940 | |||
5941 | /* Set up BAR0 PCI config space register memory map */ | ||
5942 | lpfc_sli4_bar0_register_memmap(phba); | ||
5943 | |||
5944 | /* Set up BAR1 register memory map */ | ||
5945 | lpfc_sli4_bar1_register_memmap(phba); | ||
5946 | |||
5947 | /* Set up BAR2 register memory map */ | ||
5948 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | ||
5949 | if (error) | ||
5950 | goto out_iounmap_all; | ||
5951 | |||
5952 | return 0; | ||
5953 | |||
5954 | out_iounmap_all: | ||
5955 | iounmap(phba->sli4_hba.drbl_regs_memmap_p); | ||
5956 | out_iounmap_ctrl: | ||
5957 | iounmap(phba->sli4_hba.ctrl_regs_memmap_p); | ||
5958 | out_iounmap_conf: | ||
5959 | iounmap(phba->sli4_hba.conf_regs_memmap_p); | ||
5960 | out: | ||
5961 | return error; | ||
5962 | } | ||
5963 | |||
5964 | /** | ||
5965 | * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. | ||
5966 | * @phba: pointer to lpfc hba data structure. | ||
5967 | * | ||
5968 | * This routine is invoked to unset the PCI device memory space for device | ||
5969 | * with SLI-4 interface spec. | ||
5970 | **/ | ||
5971 | static void | ||
5972 | lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) | ||
5973 | { | ||
5974 | struct pci_dev *pdev; | ||
5975 | |||
5976 | /* Obtain PCI device reference */ | ||
5977 | if (!phba->pcidev) | ||
5978 | return; | ||
5979 | else | ||
5980 | pdev = phba->pcidev; | ||
5981 | |||
5982 | /* Free coherent DMA memory allocated */ | ||
5983 | |||
5984 | /* Unmap I/O memory space */ | ||
5985 | iounmap(phba->sli4_hba.drbl_regs_memmap_p); | ||
5986 | iounmap(phba->sli4_hba.ctrl_regs_memmap_p); | ||
5987 | iounmap(phba->sli4_hba.conf_regs_memmap_p); | ||
5988 | |||
5989 | return; | ||
5990 | } | ||
5991 | |||
5992 | /** | ||
3319 | * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device | 5993 | * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device |
3320 | * @phba: pointer to lpfc hba data structure. | 5994 | * @phba: pointer to lpfc hba data structure. |
3321 | * | 5995 | * |
@@ -3597,6 +6271,276 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) | |||
3597 | } | 6271 | } |
3598 | 6272 | ||
3599 | /** | 6273 | /** |
6274 | * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device | ||
6275 | * @phba: pointer to lpfc hba data structure. | ||
6276 | * | ||
6277 | * This routine is invoked to enable the MSI-X interrupt vectors to device | ||
6278 | * with SLI-4 interface spec. The kernel function pci_enable_msix() is called | ||
6279 | * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, | ||
6280 | * enables either all or nothing, depending on the current availability of | ||
6281 | * PCI vector resources. The device driver is responsible for calling the | ||
6282 | * individual request_irq() to register each MSI-X vector with a interrupt | ||
6283 | * handler, which is done in this function. Note that later when device is | ||
6284 | * unloading, the driver should always call free_irq() on all MSI-X vectors | ||
6285 | * it has done request_irq() on before calling pci_disable_msix(). Failure | ||
6286 | * to do so results in a BUG_ON() and a device will be left with MSI-X | ||
6287 | * enabled and leaks its vectors. | ||
6288 | * | ||
6289 | * Return codes | ||
6290 | * 0 - sucessful | ||
6291 | * other values - error | ||
6292 | **/ | ||
6293 | static int | ||
6294 | lpfc_sli4_enable_msix(struct lpfc_hba *phba) | ||
6295 | { | ||
6296 | int rc, index; | ||
6297 | |||
6298 | /* Set up MSI-X multi-message vectors */ | ||
6299 | for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) | ||
6300 | phba->sli4_hba.msix_entries[index].entry = index; | ||
6301 | |||
6302 | /* Configure MSI-X capability structure */ | ||
6303 | rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, | ||
6304 | phba->sli4_hba.cfg_eqn); | ||
6305 | if (rc) { | ||
6306 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6307 | "0484 PCI enable MSI-X failed (%d)\n", rc); | ||
6308 | goto msi_fail_out; | ||
6309 | } | ||
6310 | /* Log MSI-X vector assignment */ | ||
6311 | for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) | ||
6312 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6313 | "0489 MSI-X entry[%d]: vector=x%x " | ||
6314 | "message=%d\n", index, | ||
6315 | phba->sli4_hba.msix_entries[index].vector, | ||
6316 | phba->sli4_hba.msix_entries[index].entry); | ||
6317 | /* | ||
6318 | * Assign MSI-X vectors to interrupt handlers | ||
6319 | */ | ||
6320 | |||
6321 | /* The first vector must associated to slow-path handler for MQ */ | ||
6322 | rc = request_irq(phba->sli4_hba.msix_entries[0].vector, | ||
6323 | &lpfc_sli4_sp_intr_handler, IRQF_SHARED, | ||
6324 | LPFC_SP_DRIVER_HANDLER_NAME, phba); | ||
6325 | if (rc) { | ||
6326 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
6327 | "0485 MSI-X slow-path request_irq failed " | ||
6328 | "(%d)\n", rc); | ||
6329 | goto msi_fail_out; | ||
6330 | } | ||
6331 | |||
6332 | /* The rest of the vector(s) are associated to fast-path handler(s) */ | ||
6333 | for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { | ||
6334 | phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; | ||
6335 | phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; | ||
6336 | rc = request_irq(phba->sli4_hba.msix_entries[index].vector, | ||
6337 | &lpfc_sli4_fp_intr_handler, IRQF_SHARED, | ||
6338 | LPFC_FP_DRIVER_HANDLER_NAME, | ||
6339 | &phba->sli4_hba.fcp_eq_hdl[index - 1]); | ||
6340 | if (rc) { | ||
6341 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
6342 | "0486 MSI-X fast-path (%d) " | ||
6343 | "request_irq failed (%d)\n", index, rc); | ||
6344 | goto cfg_fail_out; | ||
6345 | } | ||
6346 | } | ||
6347 | |||
6348 | return rc; | ||
6349 | |||
6350 | cfg_fail_out: | ||
6351 | /* free the irq already requested */ | ||
6352 | for (--index; index >= 1; index--) | ||
6353 | free_irq(phba->sli4_hba.msix_entries[index - 1].vector, | ||
6354 | &phba->sli4_hba.fcp_eq_hdl[index - 1]); | ||
6355 | |||
6356 | /* free the irq already requested */ | ||
6357 | free_irq(phba->sli4_hba.msix_entries[0].vector, phba); | ||
6358 | |||
6359 | msi_fail_out: | ||
6360 | /* Unconfigure MSI-X capability structure */ | ||
6361 | pci_disable_msix(phba->pcidev); | ||
6362 | return rc; | ||
6363 | } | ||
6364 | |||
6365 | /** | ||
6366 | * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device | ||
6367 | * @phba: pointer to lpfc hba data structure. | ||
6368 | * | ||
6369 | * This routine is invoked to release the MSI-X vectors and then disable the | ||
6370 | * MSI-X interrupt mode to device with SLI-4 interface spec. | ||
6371 | **/ | ||
6372 | static void | ||
6373 | lpfc_sli4_disable_msix(struct lpfc_hba *phba) | ||
6374 | { | ||
6375 | int index; | ||
6376 | |||
6377 | /* Free up MSI-X multi-message vectors */ | ||
6378 | free_irq(phba->sli4_hba.msix_entries[0].vector, phba); | ||
6379 | |||
6380 | for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) | ||
6381 | free_irq(phba->sli4_hba.msix_entries[index].vector, | ||
6382 | &phba->sli4_hba.fcp_eq_hdl[index - 1]); | ||
6383 | /* Disable MSI-X */ | ||
6384 | pci_disable_msix(phba->pcidev); | ||
6385 | |||
6386 | return; | ||
6387 | } | ||
6388 | |||
6389 | /** | ||
6390 | * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device | ||
6391 | * @phba: pointer to lpfc hba data structure. | ||
6392 | * | ||
6393 | * This routine is invoked to enable the MSI interrupt mode to device with | ||
6394 | * SLI-4 interface spec. The kernel function pci_enable_msi() is called | ||
6395 | * to enable the MSI vector. The device driver is responsible for calling | ||
6396 | * the request_irq() to register MSI vector with a interrupt the handler, | ||
6397 | * which is done in this function. | ||
6398 | * | ||
6399 | * Return codes | ||
6400 | * 0 - sucessful | ||
6401 | * other values - error | ||
6402 | **/ | ||
6403 | static int | ||
6404 | lpfc_sli4_enable_msi(struct lpfc_hba *phba) | ||
6405 | { | ||
6406 | int rc, index; | ||
6407 | |||
6408 | rc = pci_enable_msi(phba->pcidev); | ||
6409 | if (!rc) | ||
6410 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6411 | "0487 PCI enable MSI mode success.\n"); | ||
6412 | else { | ||
6413 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
6414 | "0488 PCI enable MSI mode failed (%d)\n", rc); | ||
6415 | return rc; | ||
6416 | } | ||
6417 | |||
6418 | rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, | ||
6419 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
6420 | if (rc) { | ||
6421 | pci_disable_msi(phba->pcidev); | ||
6422 | lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, | ||
6423 | "0490 MSI request_irq failed (%d)\n", rc); | ||
6424 | } | ||
6425 | |||
6426 | for (index = 0; index < phba->cfg_fcp_eq_count; index++) { | ||
6427 | phba->sli4_hba.fcp_eq_hdl[index].idx = index; | ||
6428 | phba->sli4_hba.fcp_eq_hdl[index].phba = phba; | ||
6429 | } | ||
6430 | |||
6431 | return rc; | ||
6432 | } | ||
6433 | |||
6434 | /** | ||
6435 | * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device | ||
6436 | * @phba: pointer to lpfc hba data structure. | ||
6437 | * | ||
6438 | * This routine is invoked to disable the MSI interrupt mode to device with | ||
6439 | * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has | ||
6440 | * done request_irq() on before calling pci_disable_msi(). Failure to do so | ||
6441 | * results in a BUG_ON() and a device will be left with MSI enabled and leaks | ||
6442 | * its vector. | ||
6443 | **/ | ||
6444 | static void | ||
6445 | lpfc_sli4_disable_msi(struct lpfc_hba *phba) | ||
6446 | { | ||
6447 | free_irq(phba->pcidev->irq, phba); | ||
6448 | pci_disable_msi(phba->pcidev); | ||
6449 | return; | ||
6450 | } | ||
6451 | |||
6452 | /** | ||
6453 | * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device | ||
6454 | * @phba: pointer to lpfc hba data structure. | ||
6455 | * | ||
6456 | * This routine is invoked to enable device interrupt and associate driver's | ||
6457 | * interrupt handler(s) to interrupt vector(s) to device with SLI-4 | ||
6458 | * interface spec. Depends on the interrupt mode configured to the driver, | ||
6459 | * the driver will try to fallback from the configured interrupt mode to an | ||
6460 | * interrupt mode which is supported by the platform, kernel, and device in | ||
6461 | * the order of: | ||
6462 | * MSI-X -> MSI -> IRQ. | ||
6463 | * | ||
6464 | * Return codes | ||
6465 | * 0 - sucessful | ||
6466 | * other values - error | ||
6467 | **/ | ||
6468 | static uint32_t | ||
6469 | lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) | ||
6470 | { | ||
6471 | uint32_t intr_mode = LPFC_INTR_ERROR; | ||
6472 | int retval, index; | ||
6473 | |||
6474 | if (cfg_mode == 2) { | ||
6475 | /* Preparation before conf_msi mbox cmd */ | ||
6476 | retval = 0; | ||
6477 | if (!retval) { | ||
6478 | /* Now, try to enable MSI-X interrupt mode */ | ||
6479 | retval = lpfc_sli4_enable_msix(phba); | ||
6480 | if (!retval) { | ||
6481 | /* Indicate initialization to MSI-X mode */ | ||
6482 | phba->intr_type = MSIX; | ||
6483 | intr_mode = 2; | ||
6484 | } | ||
6485 | } | ||
6486 | } | ||
6487 | |||
6488 | /* Fallback to MSI if MSI-X initialization failed */ | ||
6489 | if (cfg_mode >= 1 && phba->intr_type == NONE) { | ||
6490 | retval = lpfc_sli4_enable_msi(phba); | ||
6491 | if (!retval) { | ||
6492 | /* Indicate initialization to MSI mode */ | ||
6493 | phba->intr_type = MSI; | ||
6494 | intr_mode = 1; | ||
6495 | } | ||
6496 | } | ||
6497 | |||
6498 | /* Fallback to INTx if both MSI-X/MSI initalization failed */ | ||
6499 | if (phba->intr_type == NONE) { | ||
6500 | retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, | ||
6501 | IRQF_SHARED, LPFC_DRIVER_NAME, phba); | ||
6502 | if (!retval) { | ||
6503 | /* Indicate initialization to INTx mode */ | ||
6504 | phba->intr_type = INTx; | ||
6505 | intr_mode = 0; | ||
6506 | for (index = 0; index < phba->cfg_fcp_eq_count; | ||
6507 | index++) { | ||
6508 | phba->sli4_hba.fcp_eq_hdl[index].idx = index; | ||
6509 | phba->sli4_hba.fcp_eq_hdl[index].phba = phba; | ||
6510 | } | ||
6511 | } | ||
6512 | } | ||
6513 | return intr_mode; | ||
6514 | } | ||
6515 | |||
6516 | /** | ||
6517 | * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device | ||
6518 | * @phba: pointer to lpfc hba data structure. | ||
6519 | * | ||
6520 | * This routine is invoked to disable device interrupt and disassociate | ||
6521 | * the driver's interrupt handler(s) from interrupt vector(s) to device | ||
6522 | * with SLI-4 interface spec. Depending on the interrupt mode, the driver | ||
6523 | * will release the interrupt vector(s) for the message signaled interrupt. | ||
6524 | **/ | ||
6525 | static void | ||
6526 | lpfc_sli4_disable_intr(struct lpfc_hba *phba) | ||
6527 | { | ||
6528 | /* Disable the currently initialized interrupt mode */ | ||
6529 | if (phba->intr_type == MSIX) | ||
6530 | lpfc_sli4_disable_msix(phba); | ||
6531 | else if (phba->intr_type == MSI) | ||
6532 | lpfc_sli4_disable_msi(phba); | ||
6533 | else if (phba->intr_type == INTx) | ||
6534 | free_irq(phba->pcidev->irq, phba); | ||
6535 | |||
6536 | /* Reset interrupt management states */ | ||
6537 | phba->intr_type = NONE; | ||
6538 | phba->sli.slistat.sli_intr = 0; | ||
6539 | |||
6540 | return; | ||
6541 | } | ||
6542 | |||
6543 | /** | ||
3600 | * lpfc_unset_hba - Unset SLI3 hba device initialization | 6544 | * lpfc_unset_hba - Unset SLI3 hba device initialization |
3601 | * @phba: pointer to lpfc hba data structure. | 6545 | * @phba: pointer to lpfc hba data structure. |
3602 | * | 6546 | * |
@@ -3627,6 +6571,90 @@ lpfc_unset_hba(struct lpfc_hba *phba) | |||
3627 | } | 6571 | } |
3628 | 6572 | ||
3629 | /** | 6573 | /** |
6574 | * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. | ||
6575 | * @phba: pointer to lpfc hba data structure. | ||
6576 | * | ||
6577 | * This routine is invoked to unset the HBA device initialization steps to | ||
6578 | * a device with SLI-4 interface spec. | ||
6579 | **/ | ||
6580 | static void | ||
6581 | lpfc_sli4_unset_hba(struct lpfc_hba *phba) | ||
6582 | { | ||
6583 | struct lpfc_vport *vport = phba->pport; | ||
6584 | struct Scsi_Host *shost = lpfc_shost_from_vport(vport); | ||
6585 | |||
6586 | spin_lock_irq(shost->host_lock); | ||
6587 | vport->load_flag |= FC_UNLOADING; | ||
6588 | spin_unlock_irq(shost->host_lock); | ||
6589 | |||
6590 | phba->pport->work_port_events = 0; | ||
6591 | |||
6592 | lpfc_sli4_hba_down(phba); | ||
6593 | |||
6594 | lpfc_sli4_disable_intr(phba); | ||
6595 | |||
6596 | return; | ||
6597 | } | ||
6598 | |||
6599 | /** | ||
6600 | * lpfc_sli4_hba_unset - Unset the fcoe hba | ||
6601 | * @phba: Pointer to HBA context object. | ||
6602 | * | ||
6603 | * This function is called in the SLI4 code path to reset the HBA's FCoE | ||
6604 | * function. The caller is not required to hold any lock. This routine | ||
6605 | * issues PCI function reset mailbox command to reset the FCoE function. | ||
6606 | * At the end of the function, it calls lpfc_hba_down_post function to | ||
6607 | * free any pending commands. | ||
6608 | **/ | ||
6609 | static void | ||
6610 | lpfc_sli4_hba_unset(struct lpfc_hba *phba) | ||
6611 | { | ||
6612 | int wait_cnt = 0; | ||
6613 | LPFC_MBOXQ_t *mboxq; | ||
6614 | |||
6615 | lpfc_stop_hba_timers(phba); | ||
6616 | phba->sli4_hba.intr_enable = 0; | ||
6617 | |||
6618 | /* | ||
6619 | * Gracefully wait out the potential current outstanding asynchronous | ||
6620 | * mailbox command. | ||
6621 | */ | ||
6622 | |||
6623 | /* First, block any pending async mailbox command from posted */ | ||
6624 | spin_lock_irq(&phba->hbalock); | ||
6625 | phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; | ||
6626 | spin_unlock_irq(&phba->hbalock); | ||
6627 | /* Now, trying to wait it out if we can */ | ||
6628 | while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { | ||
6629 | msleep(10); | ||
6630 | if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) | ||
6631 | break; | ||
6632 | } | ||
6633 | /* Forcefully release the outstanding mailbox command if timed out */ | ||
6634 | if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { | ||
6635 | spin_lock_irq(&phba->hbalock); | ||
6636 | mboxq = phba->sli.mbox_active; | ||
6637 | mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; | ||
6638 | __lpfc_mbox_cmpl_put(phba, mboxq); | ||
6639 | phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
6640 | phba->sli.mbox_active = NULL; | ||
6641 | spin_unlock_irq(&phba->hbalock); | ||
6642 | } | ||
6643 | |||
6644 | /* Tear down the queues in the HBA */ | ||
6645 | lpfc_sli4_queue_unset(phba); | ||
6646 | |||
6647 | /* Disable PCI subsystem interrupt */ | ||
6648 | lpfc_sli4_disable_intr(phba); | ||
6649 | |||
6650 | /* Stop kthread signal shall trigger work_done one more time */ | ||
6651 | kthread_stop(phba->worker_thread); | ||
6652 | |||
6653 | /* Stop the SLI4 device port */ | ||
6654 | phba->pport->work_port_events = 0; | ||
6655 | } | ||
6656 | |||
6657 | /** | ||
3630 | * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. | 6658 | * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. |
3631 | * @pdev: pointer to PCI device | 6659 | * @pdev: pointer to PCI device |
3632 | * @pid: pointer to PCI device identifier | 6660 | * @pid: pointer to PCI device identifier |
@@ -4127,6 +7155,446 @@ lpfc_io_resume_s3(struct pci_dev *pdev) | |||
4127 | } | 7155 | } |
4128 | 7156 | ||
4129 | /** | 7157 | /** |
7158 | * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve | ||
7159 | * @phba: pointer to lpfc hba data structure. | ||
7160 | * | ||
7161 | * returns the number of ELS/CT IOCBs to reserve | ||
7162 | **/ | ||
7163 | int | ||
7164 | lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) | ||
7165 | { | ||
7166 | int max_xri = phba->sli4_hba.max_cfg_param.max_xri; | ||
7167 | |||
7168 | if (max_xri <= 100) | ||
7169 | return 4; | ||
7170 | else if (max_xri <= 256) | ||
7171 | return 8; | ||
7172 | else if (max_xri <= 512) | ||
7173 | return 16; | ||
7174 | else if (max_xri <= 1024) | ||
7175 | return 32; | ||
7176 | else | ||
7177 | return 48; | ||
7178 | } | ||
7179 | |||
7180 | /** | ||
7181 | * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys | ||
7182 | * @pdev: pointer to PCI device | ||
7183 | * @pid: pointer to PCI device identifier | ||
7184 | * | ||
7185 | * This routine is called from the kernel's PCI subsystem to device with | ||
7186 | * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is | ||
7187 | * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific | ||
7188 | * information of the device and driver to see if the driver state that it | ||
7189 | * can support this kind of device. If the match is successful, the driver | ||
7190 | * core invokes this routine. If this routine determines it can claim the HBA, | ||
7191 | * it does all the initialization that it needs to do to handle the HBA | ||
7192 | * properly. | ||
7193 | * | ||
7194 | * Return code | ||
7195 | * 0 - driver can claim the device | ||
7196 | * negative value - driver can not claim the device | ||
7197 | **/ | ||
7198 | static int __devinit | ||
7199 | lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) | ||
7200 | { | ||
7201 | struct lpfc_hba *phba; | ||
7202 | struct lpfc_vport *vport = NULL; | ||
7203 | int error; | ||
7204 | uint32_t cfg_mode, intr_mode; | ||
7205 | int mcnt; | ||
7206 | |||
7207 | /* Allocate memory for HBA structure */ | ||
7208 | phba = lpfc_hba_alloc(pdev); | ||
7209 | if (!phba) | ||
7210 | return -ENOMEM; | ||
7211 | |||
7212 | /* Perform generic PCI device enabling operation */ | ||
7213 | error = lpfc_enable_pci_dev(phba); | ||
7214 | if (error) { | ||
7215 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7216 | "1409 Failed to enable pci device.\n"); | ||
7217 | goto out_free_phba; | ||
7218 | } | ||
7219 | |||
7220 | /* Set up SLI API function jump table for PCI-device group-1 HBAs */ | ||
7221 | error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); | ||
7222 | if (error) | ||
7223 | goto out_disable_pci_dev; | ||
7224 | |||
7225 | /* Set up SLI-4 specific device PCI memory space */ | ||
7226 | error = lpfc_sli4_pci_mem_setup(phba); | ||
7227 | if (error) { | ||
7228 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7229 | "1410 Failed to set up pci memory space.\n"); | ||
7230 | goto out_disable_pci_dev; | ||
7231 | } | ||
7232 | |||
7233 | /* Set up phase-1 common device driver resources */ | ||
7234 | error = lpfc_setup_driver_resource_phase1(phba); | ||
7235 | if (error) { | ||
7236 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7237 | "1411 Failed to set up driver resource.\n"); | ||
7238 | goto out_unset_pci_mem_s4; | ||
7239 | } | ||
7240 | |||
7241 | /* Set up SLI-4 Specific device driver resources */ | ||
7242 | error = lpfc_sli4_driver_resource_setup(phba); | ||
7243 | if (error) { | ||
7244 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7245 | "1412 Failed to set up driver resource.\n"); | ||
7246 | goto out_unset_pci_mem_s4; | ||
7247 | } | ||
7248 | |||
7249 | /* Initialize and populate the iocb list per host */ | ||
7250 | error = lpfc_init_iocb_list(phba, | ||
7251 | phba->sli4_hba.max_cfg_param.max_xri); | ||
7252 | if (error) { | ||
7253 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7254 | "1413 Failed to initialize iocb list.\n"); | ||
7255 | goto out_unset_driver_resource_s4; | ||
7256 | } | ||
7257 | |||
7258 | /* Set up common device driver resources */ | ||
7259 | error = lpfc_setup_driver_resource_phase2(phba); | ||
7260 | if (error) { | ||
7261 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7262 | "1414 Failed to set up driver resource.\n"); | ||
7263 | goto out_free_iocb_list; | ||
7264 | } | ||
7265 | |||
7266 | /* Create SCSI host to the physical port */ | ||
7267 | error = lpfc_create_shost(phba); | ||
7268 | if (error) { | ||
7269 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7270 | "1415 Failed to create scsi host.\n"); | ||
7271 | goto out_unset_driver_resource; | ||
7272 | } | ||
7273 | |||
7274 | /* Configure sysfs attributes */ | ||
7275 | vport = phba->pport; | ||
7276 | error = lpfc_alloc_sysfs_attr(vport); | ||
7277 | if (error) { | ||
7278 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7279 | "1416 Failed to allocate sysfs attr\n"); | ||
7280 | goto out_destroy_shost; | ||
7281 | } | ||
7282 | |||
7283 | /* Now, trying to enable interrupt and bring up the device */ | ||
7284 | cfg_mode = phba->cfg_use_msi; | ||
7285 | while (true) { | ||
7286 | /* Put device to a known state before enabling interrupt */ | ||
7287 | lpfc_stop_port(phba); | ||
7288 | /* Configure and enable interrupt */ | ||
7289 | intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); | ||
7290 | if (intr_mode == LPFC_INTR_ERROR) { | ||
7291 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7292 | "0426 Failed to enable interrupt.\n"); | ||
7293 | error = -ENODEV; | ||
7294 | goto out_free_sysfs_attr; | ||
7295 | } | ||
7296 | /* Set up SLI-4 HBA */ | ||
7297 | if (lpfc_sli4_hba_setup(phba)) { | ||
7298 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7299 | "1421 Failed to set up hba\n"); | ||
7300 | error = -ENODEV; | ||
7301 | goto out_disable_intr; | ||
7302 | } | ||
7303 | |||
7304 | /* Send NOP mbx cmds for non-INTx mode active interrupt test */ | ||
7305 | if (intr_mode != 0) | ||
7306 | mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, | ||
7307 | LPFC_ACT_INTR_CNT); | ||
7308 | |||
7309 | /* Check active interrupts received only for MSI/MSI-X */ | ||
7310 | if (intr_mode == 0 || | ||
7311 | phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { | ||
7312 | /* Log the current active interrupt mode */ | ||
7313 | phba->intr_mode = intr_mode; | ||
7314 | lpfc_log_intr_mode(phba, intr_mode); | ||
7315 | break; | ||
7316 | } | ||
7317 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
7318 | "0451 Configure interrupt mode (%d) " | ||
7319 | "failed active interrupt test.\n", | ||
7320 | intr_mode); | ||
7321 | /* Unset the preivous SLI-4 HBA setup */ | ||
7322 | lpfc_sli4_unset_hba(phba); | ||
7323 | /* Try next level of interrupt mode */ | ||
7324 | cfg_mode = --intr_mode; | ||
7325 | } | ||
7326 | |||
7327 | /* Perform post initialization setup */ | ||
7328 | lpfc_post_init_setup(phba); | ||
7329 | |||
7330 | return 0; | ||
7331 | |||
7332 | out_disable_intr: | ||
7333 | lpfc_sli4_disable_intr(phba); | ||
7334 | out_free_sysfs_attr: | ||
7335 | lpfc_free_sysfs_attr(vport); | ||
7336 | out_destroy_shost: | ||
7337 | lpfc_destroy_shost(phba); | ||
7338 | out_unset_driver_resource: | ||
7339 | lpfc_unset_driver_resource_phase2(phba); | ||
7340 | out_free_iocb_list: | ||
7341 | lpfc_free_iocb_list(phba); | ||
7342 | out_unset_driver_resource_s4: | ||
7343 | lpfc_sli4_driver_resource_unset(phba); | ||
7344 | out_unset_pci_mem_s4: | ||
7345 | lpfc_sli4_pci_mem_unset(phba); | ||
7346 | out_disable_pci_dev: | ||
7347 | lpfc_disable_pci_dev(phba); | ||
7348 | out_free_phba: | ||
7349 | lpfc_hba_free(phba); | ||
7350 | return error; | ||
7351 | } | ||
7352 | |||
7353 | /** | ||
7354 | * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem | ||
7355 | * @pdev: pointer to PCI device | ||
7356 | * | ||
7357 | * This routine is called from the kernel's PCI subsystem to device with | ||
7358 | * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is | ||
7359 | * removed from PCI bus, it performs all the necessary cleanup for the HBA | ||
7360 | * device to be removed from the PCI subsystem properly. | ||
7361 | **/ | ||
7362 | static void __devexit | ||
7363 | lpfc_pci_remove_one_s4(struct pci_dev *pdev) | ||
7364 | { | ||
7365 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
7366 | struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; | ||
7367 | struct lpfc_vport **vports; | ||
7368 | struct lpfc_hba *phba = vport->phba; | ||
7369 | int i; | ||
7370 | |||
7371 | /* Mark the device unloading flag */ | ||
7372 | spin_lock_irq(&phba->hbalock); | ||
7373 | vport->load_flag |= FC_UNLOADING; | ||
7374 | spin_unlock_irq(&phba->hbalock); | ||
7375 | |||
7376 | /* Free the HBA sysfs attributes */ | ||
7377 | lpfc_free_sysfs_attr(vport); | ||
7378 | |||
7379 | /* Release all the vports against this physical port */ | ||
7380 | vports = lpfc_create_vport_work_array(phba); | ||
7381 | if (vports != NULL) | ||
7382 | for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) | ||
7383 | fc_vport_terminate(vports[i]->fc_vport); | ||
7384 | lpfc_destroy_vport_work_array(phba, vports); | ||
7385 | |||
7386 | /* Remove FC host and then SCSI host with the physical port */ | ||
7387 | fc_remove_host(shost); | ||
7388 | scsi_remove_host(shost); | ||
7389 | |||
7390 | /* Perform cleanup on the physical port */ | ||
7391 | lpfc_cleanup(vport); | ||
7392 | |||
7393 | /* | ||
7394 | * Bring down the SLI Layer. This step disables all interrupts, | ||
7395 | * clears the rings, discards all mailbox commands, and resets | ||
7396 | * the HBA FCoE function. | ||
7397 | */ | ||
7398 | lpfc_debugfs_terminate(vport); | ||
7399 | lpfc_sli4_hba_unset(phba); | ||
7400 | |||
7401 | spin_lock_irq(&phba->hbalock); | ||
7402 | list_del_init(&vport->listentry); | ||
7403 | spin_unlock_irq(&phba->hbalock); | ||
7404 | |||
7405 | /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi | ||
7406 | * buffers are released to their corresponding pools here. | ||
7407 | */ | ||
7408 | lpfc_scsi_free(phba); | ||
7409 | lpfc_sli4_driver_resource_unset(phba); | ||
7410 | |||
7411 | /* Unmap adapter Control and Doorbell registers */ | ||
7412 | lpfc_sli4_pci_mem_unset(phba); | ||
7413 | |||
7414 | /* Release PCI resources and disable device's PCI function */ | ||
7415 | scsi_host_put(shost); | ||
7416 | lpfc_disable_pci_dev(phba); | ||
7417 | |||
7418 | /* Finally, free the driver's device data structure */ | ||
7419 | lpfc_hba_free(phba); | ||
7420 | |||
7421 | return; | ||
7422 | } | ||
7423 | |||
7424 | /** | ||
7425 | * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt | ||
7426 | * @pdev: pointer to PCI device | ||
7427 | * @msg: power management message | ||
7428 | * | ||
7429 | * This routine is called from the kernel's PCI subsystem to support system | ||
7430 | * Power Management (PM) to device with SLI-4 interface spec. When PM invokes | ||
7431 | * this method, it quiesces the device by stopping the driver's worker | ||
7432 | * thread for the device, turning off device's interrupt and DMA, and bring | ||
7433 | * the device offline. Note that as the driver implements the minimum PM | ||
7434 | * requirements to a power-aware driver's PM support for suspend/resume -- all | ||
7435 | * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() | ||
7436 | * method call will be treated as SUSPEND and the driver will fully | ||
7437 | * reinitialize its device during resume() method call, the driver will set | ||
7438 | * device to PCI_D3hot state in PCI config space instead of setting it | ||
7439 | * according to the @msg provided by the PM. | ||
7440 | * | ||
7441 | * Return code | ||
7442 | * 0 - driver suspended the device | ||
7443 | * Error otherwise | ||
7444 | **/ | ||
7445 | static int | ||
7446 | lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) | ||
7447 | { | ||
7448 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
7449 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
7450 | |||
7451 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
7452 | "0298 PCI device Power Management suspend.\n"); | ||
7453 | |||
7454 | /* Bring down the device */ | ||
7455 | lpfc_offline_prep(phba); | ||
7456 | lpfc_offline(phba); | ||
7457 | kthread_stop(phba->worker_thread); | ||
7458 | |||
7459 | /* Disable interrupt from device */ | ||
7460 | lpfc_sli4_disable_intr(phba); | ||
7461 | |||
7462 | /* Save device state to PCI config space */ | ||
7463 | pci_save_state(pdev); | ||
7464 | pci_set_power_state(pdev, PCI_D3hot); | ||
7465 | |||
7466 | return 0; | ||
7467 | } | ||
7468 | |||
7469 | /** | ||
7470 | * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt | ||
7471 | * @pdev: pointer to PCI device | ||
7472 | * | ||
7473 | * This routine is called from the kernel's PCI subsystem to support system | ||
7474 | * Power Management (PM) to device with SLI-4 interface spac. When PM invokes | ||
7475 | * this method, it restores the device's PCI config space state and fully | ||
7476 | * reinitializes the device and brings it online. Note that as the driver | ||
7477 | * implements the minimum PM requirements to a power-aware driver's PM for | ||
7478 | * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) | ||
7479 | * to the suspend() method call will be treated as SUSPEND and the driver | ||
7480 | * will fully reinitialize its device during resume() method call, the device | ||
7481 | * will be set to PCI_D0 directly in PCI config space before restoring the | ||
7482 | * state. | ||
7483 | * | ||
7484 | * Return code | ||
7485 | * 0 - driver suspended the device | ||
7486 | * Error otherwise | ||
7487 | **/ | ||
7488 | static int | ||
7489 | lpfc_pci_resume_one_s4(struct pci_dev *pdev) | ||
7490 | { | ||
7491 | struct Scsi_Host *shost = pci_get_drvdata(pdev); | ||
7492 | struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; | ||
7493 | uint32_t intr_mode; | ||
7494 | int error; | ||
7495 | |||
7496 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
7497 | "0292 PCI device Power Management resume.\n"); | ||
7498 | |||
7499 | /* Restore device state from PCI config space */ | ||
7500 | pci_set_power_state(pdev, PCI_D0); | ||
7501 | pci_restore_state(pdev); | ||
7502 | if (pdev->is_busmaster) | ||
7503 | pci_set_master(pdev); | ||
7504 | |||
7505 | /* Startup the kernel thread for this host adapter. */ | ||
7506 | phba->worker_thread = kthread_run(lpfc_do_work, phba, | ||
7507 | "lpfc_worker_%d", phba->brd_no); | ||
7508 | if (IS_ERR(phba->worker_thread)) { | ||
7509 | error = PTR_ERR(phba->worker_thread); | ||
7510 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7511 | "0293 PM resume failed to start worker " | ||
7512 | "thread: error=x%x.\n", error); | ||
7513 | return error; | ||
7514 | } | ||
7515 | |||
7516 | /* Configure and enable interrupt */ | ||
7517 | intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); | ||
7518 | if (intr_mode == LPFC_INTR_ERROR) { | ||
7519 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
7520 | "0294 PM resume Failed to enable interrupt\n"); | ||
7521 | return -EIO; | ||
7522 | } else | ||
7523 | phba->intr_mode = intr_mode; | ||
7524 | |||
7525 | /* Restart HBA and bring it online */ | ||
7526 | lpfc_sli_brdrestart(phba); | ||
7527 | lpfc_online(phba); | ||
7528 | |||
7529 | /* Log the current active interrupt mode */ | ||
7530 | lpfc_log_intr_mode(phba, phba->intr_mode); | ||
7531 | |||
7532 | return 0; | ||
7533 | } | ||
7534 | |||
7535 | /** | ||
7536 | * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device | ||
7537 | * @pdev: pointer to PCI device. | ||
7538 | * @state: the current PCI connection state. | ||
7539 | * | ||
7540 | * This routine is called from the PCI subsystem for error handling to device | ||
7541 | * with SLI-4 interface spec. This function is called by the PCI subsystem | ||
7542 | * after a PCI bus error affecting this device has been detected. When this | ||
7543 | * function is invoked, it will need to stop all the I/Os and interrupt(s) | ||
7544 | * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET | ||
7545 | * for the PCI subsystem to perform proper recovery as desired. | ||
7546 | * | ||
7547 | * Return codes | ||
7548 | * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery | ||
7549 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
7550 | **/ | ||
7551 | static pci_ers_result_t | ||
7552 | lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) | ||
7553 | { | ||
7554 | return PCI_ERS_RESULT_NEED_RESET; | ||
7555 | } | ||
7556 | |||
7557 | /** | ||
7558 | * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch | ||
7559 | * @pdev: pointer to PCI device. | ||
7560 | * | ||
7561 | * This routine is called from the PCI subsystem for error handling to device | ||
7562 | * with SLI-4 interface spec. It is called after PCI bus has been reset to | ||
7563 | * restart the PCI card from scratch, as if from a cold-boot. During the | ||
7564 | * PCI subsystem error recovery, after the driver returns | ||
7565 | * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error | ||
7566 | * recovery and then call this routine before calling the .resume method to | ||
7567 | * recover the device. This function will initialize the HBA device, enable | ||
7568 | * the interrupt, but it will just put the HBA to offline state without | ||
7569 | * passing any I/O traffic. | ||
7570 | * | ||
7571 | * Return codes | ||
7572 | * PCI_ERS_RESULT_RECOVERED - the device has been recovered | ||
7573 | * PCI_ERS_RESULT_DISCONNECT - device could not be recovered | ||
7574 | */ | ||
7575 | static pci_ers_result_t | ||
7576 | lpfc_io_slot_reset_s4(struct pci_dev *pdev) | ||
7577 | { | ||
7578 | return PCI_ERS_RESULT_RECOVERED; | ||
7579 | } | ||
7580 | |||
7581 | /** | ||
7582 | * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device | ||
7583 | * @pdev: pointer to PCI device | ||
7584 | * | ||
7585 | * This routine is called from the PCI subsystem for error handling to device | ||
7586 | * with SLI-4 interface spec. It is called when kernel error recovery tells | ||
7587 | * the lpfc driver that it is ok to resume normal PCI operation after PCI bus | ||
7588 | * error recovery. After this call, traffic can start to flow from this device | ||
7589 | * again. | ||
7590 | **/ | ||
7591 | static void | ||
7592 | lpfc_io_resume_s4(struct pci_dev *pdev) | ||
7593 | { | ||
7594 | return; | ||
7595 | } | ||
7596 | |||
7597 | /** | ||
4130 | * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem | 7598 | * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem |
4131 | * @pdev: pointer to PCI device | 7599 | * @pdev: pointer to PCI device |
4132 | * @pid: pointer to PCI device identifier | 7600 | * @pid: pointer to PCI device identifier |
@@ -4154,6 +7622,10 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) | |||
4154 | return -ENODEV; | 7622 | return -ENODEV; |
4155 | 7623 | ||
4156 | switch (dev_id) { | 7624 | switch (dev_id) { |
7625 | case PCI_DEVICE_ID_TIGERSHARK: | ||
7626 | case PCI_DEVICE_ID_TIGERSHARK_S: | ||
7627 | rc = lpfc_pci_probe_one_s4(pdev, pid); | ||
7628 | break; | ||
4157 | default: | 7629 | default: |
4158 | rc = lpfc_pci_probe_one_s3(pdev, pid); | 7630 | rc = lpfc_pci_probe_one_s3(pdev, pid); |
4159 | break; | 7631 | break; |
@@ -4181,6 +7653,9 @@ lpfc_pci_remove_one(struct pci_dev *pdev) | |||
4181 | case LPFC_PCI_DEV_LP: | 7653 | case LPFC_PCI_DEV_LP: |
4182 | lpfc_pci_remove_one_s3(pdev); | 7654 | lpfc_pci_remove_one_s3(pdev); |
4183 | break; | 7655 | break; |
7656 | case LPFC_PCI_DEV_OC: | ||
7657 | lpfc_pci_remove_one_s4(pdev); | ||
7658 | break; | ||
4184 | default: | 7659 | default: |
4185 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7660 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4186 | "1424 Invalid PCI device group: 0x%x\n", | 7661 | "1424 Invalid PCI device group: 0x%x\n", |
@@ -4215,6 +7690,9 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) | |||
4215 | case LPFC_PCI_DEV_LP: | 7690 | case LPFC_PCI_DEV_LP: |
4216 | rc = lpfc_pci_suspend_one_s3(pdev, msg); | 7691 | rc = lpfc_pci_suspend_one_s3(pdev, msg); |
4217 | break; | 7692 | break; |
7693 | case LPFC_PCI_DEV_OC: | ||
7694 | rc = lpfc_pci_suspend_one_s4(pdev, msg); | ||
7695 | break; | ||
4218 | default: | 7696 | default: |
4219 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7697 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4220 | "1425 Invalid PCI device group: 0x%x\n", | 7698 | "1425 Invalid PCI device group: 0x%x\n", |
@@ -4248,6 +7726,9 @@ lpfc_pci_resume_one(struct pci_dev *pdev) | |||
4248 | case LPFC_PCI_DEV_LP: | 7726 | case LPFC_PCI_DEV_LP: |
4249 | rc = lpfc_pci_resume_one_s3(pdev); | 7727 | rc = lpfc_pci_resume_one_s3(pdev); |
4250 | break; | 7728 | break; |
7729 | case LPFC_PCI_DEV_OC: | ||
7730 | rc = lpfc_pci_resume_one_s4(pdev); | ||
7731 | break; | ||
4251 | default: | 7732 | default: |
4252 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7733 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4253 | "1426 Invalid PCI device group: 0x%x\n", | 7734 | "1426 Invalid PCI device group: 0x%x\n", |
@@ -4283,6 +7764,9 @@ lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | |||
4283 | case LPFC_PCI_DEV_LP: | 7764 | case LPFC_PCI_DEV_LP: |
4284 | rc = lpfc_io_error_detected_s3(pdev, state); | 7765 | rc = lpfc_io_error_detected_s3(pdev, state); |
4285 | break; | 7766 | break; |
7767 | case LPFC_PCI_DEV_OC: | ||
7768 | rc = lpfc_io_error_detected_s4(pdev, state); | ||
7769 | break; | ||
4286 | default: | 7770 | default: |
4287 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7771 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4288 | "1427 Invalid PCI device group: 0x%x\n", | 7772 | "1427 Invalid PCI device group: 0x%x\n", |
@@ -4317,6 +7801,9 @@ lpfc_io_slot_reset(struct pci_dev *pdev) | |||
4317 | case LPFC_PCI_DEV_LP: | 7801 | case LPFC_PCI_DEV_LP: |
4318 | rc = lpfc_io_slot_reset_s3(pdev); | 7802 | rc = lpfc_io_slot_reset_s3(pdev); |
4319 | break; | 7803 | break; |
7804 | case LPFC_PCI_DEV_OC: | ||
7805 | rc = lpfc_io_slot_reset_s4(pdev); | ||
7806 | break; | ||
4320 | default: | 7807 | default: |
4321 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7808 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4322 | "1428 Invalid PCI device group: 0x%x\n", | 7809 | "1428 Invalid PCI device group: 0x%x\n", |
@@ -4346,6 +7833,9 @@ lpfc_io_resume(struct pci_dev *pdev) | |||
4346 | case LPFC_PCI_DEV_LP: | 7833 | case LPFC_PCI_DEV_LP: |
4347 | lpfc_io_resume_s3(pdev); | 7834 | lpfc_io_resume_s3(pdev); |
4348 | break; | 7835 | break; |
7836 | case LPFC_PCI_DEV_OC: | ||
7837 | lpfc_io_resume_s4(pdev); | ||
7838 | break; | ||
4349 | default: | 7839 | default: |
4350 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 7840 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
4351 | "1429 Invalid PCI device group: 0x%x\n", | 7841 | "1429 Invalid PCI device group: 0x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 134fc7fc2127..7f5899b70bd2 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -28,8 +28,10 @@ | |||
28 | 28 | ||
29 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
30 | 30 | ||
31 | #include "lpfc_hw4.h" | ||
31 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 33 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | ||
33 | #include "lpfc_nl.h" | 35 | #include "lpfc_nl.h" |
34 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
35 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 35a976733398..516f4802f84e 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -28,8 +28,10 @@ | |||
28 | 28 | ||
29 | #include <scsi/scsi.h> | 29 | #include <scsi/scsi.h> |
30 | 30 | ||
31 | #include "lpfc_hw4.h" | ||
31 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 33 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | ||
33 | #include "lpfc_nl.h" | 35 | #include "lpfc_nl.h" |
34 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
35 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
@@ -45,7 +47,7 @@ | |||
45 | * @phba: HBA to allocate pools for | 47 | * @phba: HBA to allocate pools for |
46 | * | 48 | * |
47 | * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, | 49 | * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, |
48 | * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools | 50 | * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools |
49 | * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. | 51 | * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. |
50 | * | 52 | * |
51 | * Notes: Not interrupt-safe. Must be called with no locks held. If any | 53 | * Notes: Not interrupt-safe. Must be called with no locks held. If any |
@@ -56,19 +58,30 @@ | |||
56 | * -ENOMEM on failure (if any memory allocations fail) | 58 | * -ENOMEM on failure (if any memory allocations fail) |
57 | **/ | 59 | **/ |
58 | int | 60 | int |
59 | lpfc_mem_alloc(struct lpfc_hba * phba) | 61 | lpfc_mem_alloc(struct lpfc_hba *phba, int align) |
60 | { | 62 | { |
61 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; | 63 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; |
62 | int longs; | 64 | int longs; |
63 | int i; | 65 | int i; |
64 | 66 | ||
65 | phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", | 67 | if (phba->sli_rev == LPFC_SLI_REV4) |
66 | phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); | 68 | phba->lpfc_scsi_dma_buf_pool = |
69 | pci_pool_create("lpfc_scsi_dma_buf_pool", | ||
70 | phba->pcidev, | ||
71 | phba->cfg_sg_dma_buf_size, | ||
72 | phba->cfg_sg_dma_buf_size, | ||
73 | 0); | ||
74 | else | ||
75 | phba->lpfc_scsi_dma_buf_pool = | ||
76 | pci_pool_create("lpfc_scsi_dma_buf_pool", | ||
77 | phba->pcidev, phba->cfg_sg_dma_buf_size, | ||
78 | align, 0); | ||
67 | if (!phba->lpfc_scsi_dma_buf_pool) | 79 | if (!phba->lpfc_scsi_dma_buf_pool) |
68 | goto fail; | 80 | goto fail; |
69 | 81 | ||
70 | phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, | 82 | phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, |
71 | LPFC_BPL_SIZE, 8,0); | 83 | LPFC_BPL_SIZE, |
84 | align, 0); | ||
72 | if (!phba->lpfc_mbuf_pool) | 85 | if (!phba->lpfc_mbuf_pool) |
73 | goto fail_free_dma_buf_pool; | 86 | goto fail_free_dma_buf_pool; |
74 | 87 | ||
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba) | |||
97 | sizeof(struct lpfc_nodelist)); | 110 | sizeof(struct lpfc_nodelist)); |
98 | if (!phba->nlp_mem_pool) | 111 | if (!phba->nlp_mem_pool) |
99 | goto fail_free_mbox_pool; | 112 | goto fail_free_mbox_pool; |
100 | 113 | phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", | |
101 | phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, | 114 | phba->pcidev, |
102 | LPFC_BPL_SIZE, 8, 0); | 115 | LPFC_HDR_BUF_SIZE, align, 0); |
103 | if (!phba->lpfc_hbq_pool) | 116 | if (!phba->lpfc_hrb_pool) |
104 | goto fail_free_nlp_mem_pool; | 117 | goto fail_free_nlp_mem_pool; |
118 | phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", | ||
119 | phba->pcidev, | ||
120 | LPFC_DATA_BUF_SIZE, align, 0); | ||
121 | if (!phba->lpfc_drb_pool) | ||
122 | goto fail_free_hbq_pool; | ||
105 | 123 | ||
106 | /* vpi zero is reserved for the physical port so add 1 to max */ | 124 | /* vpi zero is reserved for the physical port so add 1 to max */ |
107 | longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; | 125 | longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; |
108 | phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); | 126 | phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); |
109 | if (!phba->vpi_bmask) | 127 | if (!phba->vpi_bmask) |
110 | goto fail_free_hbq_pool; | 128 | goto fail_free_dbq_pool; |
111 | 129 | ||
112 | return 0; | 130 | return 0; |
113 | 131 | ||
132 | fail_free_dbq_pool: | ||
133 | pci_pool_destroy(phba->lpfc_drb_pool); | ||
134 | phba->lpfc_drb_pool = NULL; | ||
114 | fail_free_hbq_pool: | 135 | fail_free_hbq_pool: |
115 | lpfc_sli_hbqbuf_free_all(phba); | 136 | pci_pool_destroy(phba->lpfc_hrb_pool); |
116 | pci_pool_destroy(phba->lpfc_hbq_pool); | 137 | phba->lpfc_hrb_pool = NULL; |
117 | fail_free_nlp_mem_pool: | 138 | fail_free_nlp_mem_pool: |
118 | mempool_destroy(phba->nlp_mem_pool); | 139 | mempool_destroy(phba->nlp_mem_pool); |
119 | phba->nlp_mem_pool = NULL; | 140 | phba->nlp_mem_pool = NULL; |
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba) | |||
136 | } | 157 | } |
137 | 158 | ||
138 | /** | 159 | /** |
139 | * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc | 160 | * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc |
140 | * @phba: HBA to free memory for | 161 | * @phba: HBA to free memory for |
141 | * | 162 | * |
142 | * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, | 163 | * Description: Free the memory allocated by lpfc_mem_alloc routine. This |
143 | * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and | 164 | * routine is a the counterpart of lpfc_mem_alloc. |
144 | * lpfc_nodelist. Also frees the VPI bitmask | ||
145 | * | 165 | * |
146 | * Returns: None | 166 | * Returns: None |
147 | **/ | 167 | **/ |
148 | void | 168 | void |
149 | lpfc_mem_free(struct lpfc_hba * phba) | 169 | lpfc_mem_free(struct lpfc_hba *phba) |
150 | { | 170 | { |
151 | struct lpfc_sli *psli = &phba->sli; | ||
152 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; | ||
153 | LPFC_MBOXQ_t *mbox, *next_mbox; | ||
154 | struct lpfc_dmabuf *mp; | ||
155 | int i; | 171 | int i; |
172 | struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; | ||
156 | 173 | ||
174 | /* Free VPI bitmask memory */ | ||
157 | kfree(phba->vpi_bmask); | 175 | kfree(phba->vpi_bmask); |
176 | |||
177 | /* Free HBQ pools */ | ||
158 | lpfc_sli_hbqbuf_free_all(phba); | 178 | lpfc_sli_hbqbuf_free_all(phba); |
179 | pci_pool_destroy(phba->lpfc_drb_pool); | ||
180 | phba->lpfc_drb_pool = NULL; | ||
181 | pci_pool_destroy(phba->lpfc_hrb_pool); | ||
182 | phba->lpfc_hrb_pool = NULL; | ||
183 | |||
184 | /* Free NLP memory pool */ | ||
185 | mempool_destroy(phba->nlp_mem_pool); | ||
186 | phba->nlp_mem_pool = NULL; | ||
187 | |||
188 | /* Free mbox memory pool */ | ||
189 | mempool_destroy(phba->mbox_mem_pool); | ||
190 | phba->mbox_mem_pool = NULL; | ||
191 | |||
192 | /* Free MBUF memory pool */ | ||
193 | for (i = 0; i < pool->current_count; i++) | ||
194 | pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, | ||
195 | pool->elements[i].phys); | ||
196 | kfree(pool->elements); | ||
197 | |||
198 | pci_pool_destroy(phba->lpfc_mbuf_pool); | ||
199 | phba->lpfc_mbuf_pool = NULL; | ||
159 | 200 | ||
201 | /* Free DMA buffer memory pool */ | ||
202 | pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); | ||
203 | phba->lpfc_scsi_dma_buf_pool = NULL; | ||
204 | |||
205 | return; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * lpfc_mem_free_all - Frees all PCI and driver memory | ||
210 | * @phba: HBA to free memory for | ||
211 | * | ||
212 | * Description: Free memory from PCI and driver memory pools and also those | ||
213 | * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees | ||
214 | * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees | ||
215 | * the VPI bitmask. | ||
216 | * | ||
217 | * Returns: None | ||
218 | **/ | ||
219 | void | ||
220 | lpfc_mem_free_all(struct lpfc_hba *phba) | ||
221 | { | ||
222 | struct lpfc_sli *psli = &phba->sli; | ||
223 | LPFC_MBOXQ_t *mbox, *next_mbox; | ||
224 | struct lpfc_dmabuf *mp; | ||
225 | |||
226 | /* Free memory used in mailbox queue back to mailbox memory pool */ | ||
160 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { | 227 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { |
161 | mp = (struct lpfc_dmabuf *) (mbox->context1); | 228 | mp = (struct lpfc_dmabuf *) (mbox->context1); |
162 | if (mp) { | 229 | if (mp) { |
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba) | |||
166 | list_del(&mbox->list); | 233 | list_del(&mbox->list); |
167 | mempool_free(mbox, phba->mbox_mem_pool); | 234 | mempool_free(mbox, phba->mbox_mem_pool); |
168 | } | 235 | } |
236 | /* Free memory used in mailbox cmpl list back to mailbox memory pool */ | ||
169 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { | 237 | list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { |
170 | mp = (struct lpfc_dmabuf *) (mbox->context1); | 238 | mp = (struct lpfc_dmabuf *) (mbox->context1); |
171 | if (mp) { | 239 | if (mp) { |
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba) | |||
175 | list_del(&mbox->list); | 243 | list_del(&mbox->list); |
176 | mempool_free(mbox, phba->mbox_mem_pool); | 244 | mempool_free(mbox, phba->mbox_mem_pool); |
177 | } | 245 | } |
178 | 246 | /* Free the active mailbox command back to the mailbox memory pool */ | |
247 | spin_lock_irq(&phba->hbalock); | ||
179 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 248 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; |
249 | spin_unlock_irq(&phba->hbalock); | ||
180 | if (psli->mbox_active) { | 250 | if (psli->mbox_active) { |
181 | mbox = psli->mbox_active; | 251 | mbox = psli->mbox_active; |
182 | mp = (struct lpfc_dmabuf *) (mbox->context1); | 252 | mp = (struct lpfc_dmabuf *) (mbox->context1); |
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba) | |||
188 | psli->mbox_active = NULL; | 258 | psli->mbox_active = NULL; |
189 | } | 259 | } |
190 | 260 | ||
191 | for (i = 0; i < pool->current_count; i++) | 261 | /* Free and destroy all the allocated memory pools */ |
192 | pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, | 262 | lpfc_mem_free(phba); |
193 | pool->elements[i].phys); | ||
194 | kfree(pool->elements); | ||
195 | |||
196 | pci_pool_destroy(phba->lpfc_hbq_pool); | ||
197 | mempool_destroy(phba->nlp_mem_pool); | ||
198 | mempool_destroy(phba->mbox_mem_pool); | ||
199 | |||
200 | pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); | ||
201 | pci_pool_destroy(phba->lpfc_mbuf_pool); | ||
202 | |||
203 | phba->lpfc_hbq_pool = NULL; | ||
204 | phba->nlp_mem_pool = NULL; | ||
205 | phba->mbox_mem_pool = NULL; | ||
206 | phba->lpfc_scsi_dma_buf_pool = NULL; | ||
207 | phba->lpfc_mbuf_pool = NULL; | ||
208 | 263 | ||
209 | /* Free the iocb lookup array */ | 264 | /* Free the iocb lookup array */ |
210 | kfree(psli->iocbq_lookup); | 265 | kfree(psli->iocbq_lookup); |
211 | psli->iocbq_lookup = NULL; | 266 | psli->iocbq_lookup = NULL; |
267 | |||
268 | return; | ||
212 | } | 269 | } |
213 | 270 | ||
214 | /** | 271 | /** |
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) | |||
305 | * lpfc_els_hbq_alloc - Allocate an HBQ buffer | 362 | * lpfc_els_hbq_alloc - Allocate an HBQ buffer |
306 | * @phba: HBA to allocate HBQ buffer for | 363 | * @phba: HBA to allocate HBQ buffer for |
307 | * | 364 | * |
308 | * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI | 365 | * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI |
309 | * pool along a non-DMA-mapped container for it. | 366 | * pool along a non-DMA-mapped container for it. |
310 | * | 367 | * |
311 | * Notes: Not interrupt-safe. Must be called with no locks held. | 368 | * Notes: Not interrupt-safe. Must be called with no locks held. |
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) | |||
323 | if (!hbqbp) | 380 | if (!hbqbp) |
324 | return NULL; | 381 | return NULL; |
325 | 382 | ||
326 | hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, | 383 | hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, |
327 | &hbqbp->dbuf.phys); | 384 | &hbqbp->dbuf.phys); |
328 | if (!hbqbp->dbuf.virt) { | 385 | if (!hbqbp->dbuf.virt) { |
329 | kfree(hbqbp); | 386 | kfree(hbqbp); |
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) | |||
334 | } | 391 | } |
335 | 392 | ||
336 | /** | 393 | /** |
337 | * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc | 394 | * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc |
338 | * @phba: HBA buffer was allocated for | 395 | * @phba: HBA buffer was allocated for |
339 | * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc | 396 | * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc |
340 | * | 397 | * |
@@ -348,12 +405,73 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) | |||
348 | void | 405 | void |
349 | lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) | 406 | lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) |
350 | { | 407 | { |
351 | pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); | 408 | pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); |
352 | kfree(hbqbp); | 409 | kfree(hbqbp); |
353 | return; | 410 | return; |
354 | } | 411 | } |
355 | 412 | ||
356 | /** | 413 | /** |
414 | * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer | ||
415 | * @phba: HBA to allocate a receive buffer for | ||
416 | * | ||
417 | * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI | ||
418 | * pool along a non-DMA-mapped container for it. | ||
419 | * | ||
420 | * Notes: Not interrupt-safe. Must be called with no locks held. | ||
421 | * | ||
422 | * Returns: | ||
423 | * pointer to HBQ on success | ||
424 | * NULL on failure | ||
425 | **/ | ||
426 | struct hbq_dmabuf * | ||
427 | lpfc_sli4_rb_alloc(struct lpfc_hba *phba) | ||
428 | { | ||
429 | struct hbq_dmabuf *dma_buf; | ||
430 | |||
431 | dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); | ||
432 | if (!dma_buf) | ||
433 | return NULL; | ||
434 | |||
435 | dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, | ||
436 | &dma_buf->hbuf.phys); | ||
437 | if (!dma_buf->hbuf.virt) { | ||
438 | kfree(dma_buf); | ||
439 | return NULL; | ||
440 | } | ||
441 | dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, | ||
442 | &dma_buf->dbuf.phys); | ||
443 | if (!dma_buf->dbuf.virt) { | ||
444 | pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, | ||
445 | dma_buf->hbuf.phys); | ||
446 | kfree(dma_buf); | ||
447 | return NULL; | ||
448 | } | ||
449 | dma_buf->size = LPFC_BPL_SIZE; | ||
450 | return dma_buf; | ||
451 | } | ||
452 | |||
453 | /** | ||
454 | * lpfc_sli4_rb_free - Frees a receive buffer | ||
455 | * @phba: HBA buffer was allocated for | ||
456 | * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc | ||
457 | * | ||
458 | * Description: Frees both the container and the DMA-mapped buffers returned by | ||
459 | * lpfc_sli4_rb_alloc. | ||
460 | * | ||
461 | * Notes: Can be called with or without locks held. | ||
462 | * | ||
463 | * Returns: None | ||
464 | **/ | ||
465 | void | ||
466 | lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) | ||
467 | { | ||
468 | pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); | ||
469 | pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); | ||
470 | kfree(dmab); | ||
471 | return; | ||
472 | } | ||
473 | |||
474 | /** | ||
357 | * lpfc_in_buf_free - Free a DMA buffer | 475 | * lpfc_in_buf_free - Free a DMA buffer |
358 | * @phba: HBA buffer is associated with | 476 | * @phba: HBA buffer is associated with |
359 | * @mp: Buffer to free | 477 | * @mp: Buffer to free |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 08cdc77af41c..6ba5a72f6049 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -28,8 +28,10 @@ | |||
28 | #include <scsi/scsi_host.h> | 28 | #include <scsi/scsi_host.h> |
29 | #include <scsi/scsi_transport_fc.h> | 29 | #include <scsi/scsi_transport_fc.h> |
30 | 30 | ||
31 | #include "lpfc_hw4.h" | ||
31 | #include "lpfc_hw.h" | 32 | #include "lpfc_hw.h" |
32 | #include "lpfc_sli.h" | 33 | #include "lpfc_sli.h" |
34 | #include "lpfc_sli4.h" | ||
33 | #include "lpfc_nl.h" | 35 | #include "lpfc_nl.h" |
34 | #include "lpfc_disc.h" | 36 | #include "lpfc_disc.h" |
35 | #include "lpfc_scsi.h" | 37 | #include "lpfc_scsi.h" |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a226c053c0f4..9af2db355bc6 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -31,8 +31,10 @@ | |||
31 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
32 | 32 | ||
33 | #include "lpfc_version.h" | 33 | #include "lpfc_version.h" |
34 | #include "lpfc_hw4.h" | ||
34 | #include "lpfc_hw.h" | 35 | #include "lpfc_hw.h" |
35 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_sli4.h" | ||
36 | #include "lpfc_nl.h" | 38 | #include "lpfc_nl.h" |
37 | #include "lpfc_disc.h" | 39 | #include "lpfc_disc.h" |
38 | #include "lpfc_scsi.h" | 40 | #include "lpfc_scsi.h" |
@@ -57,6 +59,8 @@ static char *dif_op_str[] = { | |||
57 | "SCSI_PROT_READ_CONVERT", | 59 | "SCSI_PROT_READ_CONVERT", |
58 | "SCSI_PROT_WRITE_CONVERT" | 60 | "SCSI_PROT_WRITE_CONVERT" |
59 | }; | 61 | }; |
62 | static void | ||
63 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); | ||
60 | 64 | ||
61 | static void | 65 | static void |
62 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) | 66 | lpfc_debug_save_data(struct scsi_cmnd *cmnd) |
@@ -565,6 +569,8 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) | |||
565 | } | 569 | } |
566 | iocb->ulpClass = CLASS3; | 570 | iocb->ulpClass = CLASS3; |
567 | psb->status = IOSTAT_SUCCESS; | 571 | psb->status = IOSTAT_SUCCESS; |
572 | /* Put it back into the SCSI buffer list */ | ||
573 | lpfc_release_scsi_buf_s4(phba, psb); | ||
568 | 574 | ||
569 | } | 575 | } |
570 | 576 | ||
@@ -572,6 +578,271 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) | |||
572 | } | 578 | } |
573 | 579 | ||
574 | /** | 580 | /** |
581 | * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort | ||
582 | * @phba: pointer to lpfc hba data structure. | ||
583 | * @axri: pointer to the fcp xri abort wcqe structure. | ||
584 | * | ||
585 | * This routine is invoked by the worker thread to process a SLI4 fast-path | ||
586 | * FCP aborted xri. | ||
587 | **/ | ||
588 | void | ||
589 | lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, | ||
590 | struct sli4_wcqe_xri_aborted *axri) | ||
591 | { | ||
592 | uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); | ||
593 | struct lpfc_scsi_buf *psb, *next_psb; | ||
594 | unsigned long iflag = 0; | ||
595 | |||
596 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); | ||
597 | list_for_each_entry_safe(psb, next_psb, | ||
598 | &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { | ||
599 | if (psb->cur_iocbq.sli4_xritag == xri) { | ||
600 | list_del(&psb->list); | ||
601 | psb->status = IOSTAT_SUCCESS; | ||
602 | spin_unlock_irqrestore( | ||
603 | &phba->sli4_hba.abts_scsi_buf_list_lock, | ||
604 | iflag); | ||
605 | lpfc_release_scsi_buf_s4(phba, psb); | ||
606 | return; | ||
607 | } | ||
608 | } | ||
609 | spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
610 | iflag); | ||
611 | } | ||
612 | |||
613 | /** | ||
614 | * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block | ||
615 | * @phba: pointer to lpfc hba data structure. | ||
616 | * | ||
617 | * This routine walks the list of scsi buffers that have been allocated and | ||
618 | * repost them to the HBA by using SGL block post. This is needed after a | ||
619 | * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine | ||
620 | * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list | ||
621 | * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. | ||
622 | * | ||
623 | * Returns: 0 = success, non-zero failure. | ||
624 | **/ | ||
625 | int | ||
626 | lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) | ||
627 | { | ||
628 | struct lpfc_scsi_buf *psb; | ||
629 | int index, status, bcnt = 0, rcnt = 0, rc = 0; | ||
630 | LIST_HEAD(sblist); | ||
631 | |||
632 | for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { | ||
633 | psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; | ||
634 | if (psb) { | ||
635 | /* Remove from SCSI buffer list */ | ||
636 | list_del(&psb->list); | ||
637 | /* Add it to a local SCSI buffer list */ | ||
638 | list_add_tail(&psb->list, &sblist); | ||
639 | if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { | ||
640 | bcnt = rcnt; | ||
641 | rcnt = 0; | ||
642 | } | ||
643 | } else | ||
644 | /* A hole present in the XRI array, need to skip */ | ||
645 | bcnt = rcnt; | ||
646 | |||
647 | if (index == phba->sli4_hba.scsi_xri_cnt - 1) | ||
648 | /* End of XRI array for SCSI buffer, complete */ | ||
649 | bcnt = rcnt; | ||
650 | |||
651 | /* Continue until collect up to a nembed page worth of sgls */ | ||
652 | if (bcnt == 0) | ||
653 | continue; | ||
654 | /* Now, post the SCSI buffer list sgls as a block */ | ||
655 | status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); | ||
656 | /* Reset SCSI buffer count for next round of posting */ | ||
657 | bcnt = 0; | ||
658 | while (!list_empty(&sblist)) { | ||
659 | list_remove_head(&sblist, psb, struct lpfc_scsi_buf, | ||
660 | list); | ||
661 | if (status) { | ||
662 | /* Put this back on the abort scsi list */ | ||
663 | psb->status = IOSTAT_LOCAL_REJECT; | ||
664 | psb->result = IOERR_ABORT_REQUESTED; | ||
665 | rc++; | ||
666 | } else | ||
667 | psb->status = IOSTAT_SUCCESS; | ||
668 | /* Put it back into the SCSI buffer list */ | ||
669 | lpfc_release_scsi_buf_s4(phba, psb); | ||
670 | } | ||
671 | } | ||
672 | return rc; | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec | ||
677 | * @vport: The virtual port for which this call being executed. | ||
678 | * @num_to_allocate: The requested number of buffers to allocate. | ||
679 | * | ||
680 | * This routine allocates a scsi buffer for device with SLI-4 interface spec, | ||
681 | * the scsi buffer contains all the necessary information needed to initiate | ||
682 | * a SCSI I/O. | ||
683 | * | ||
684 | * Return codes: | ||
685 | * int - number of scsi buffers that were allocated. | ||
686 | * 0 = failure, less than num_to_alloc is a partial failure. | ||
687 | **/ | ||
688 | static int | ||
689 | lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | ||
690 | { | ||
691 | struct lpfc_hba *phba = vport->phba; | ||
692 | struct lpfc_scsi_buf *psb; | ||
693 | struct sli4_sge *sgl; | ||
694 | IOCB_t *iocb; | ||
695 | dma_addr_t pdma_phys_fcp_cmd; | ||
696 | dma_addr_t pdma_phys_fcp_rsp; | ||
697 | dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; | ||
698 | uint16_t iotag, last_xritag = NO_XRI; | ||
699 | int status = 0, index; | ||
700 | int bcnt; | ||
701 | int non_sequential_xri = 0; | ||
702 | int rc = 0; | ||
703 | LIST_HEAD(sblist); | ||
704 | |||
705 | for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { | ||
706 | psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); | ||
707 | if (!psb) | ||
708 | break; | ||
709 | |||
710 | /* | ||
711 | * Get memory from the pci pool to map the virt space to pci bus | ||
712 | * space for an I/O. The DMA buffer includes space for the | ||
713 | * struct fcp_cmnd, struct fcp_rsp and the number of bde's | ||
714 | * necessary to support the sg_tablesize. | ||
715 | */ | ||
716 | psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, | ||
717 | GFP_KERNEL, &psb->dma_handle); | ||
718 | if (!psb->data) { | ||
719 | kfree(psb); | ||
720 | break; | ||
721 | } | ||
722 | |||
723 | /* Initialize virtual ptrs to dma_buf region. */ | ||
724 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | ||
725 | |||
726 | /* Allocate iotag for psb->cur_iocbq. */ | ||
727 | iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); | ||
728 | if (iotag == 0) { | ||
729 | kfree(psb); | ||
730 | break; | ||
731 | } | ||
732 | |||
733 | psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); | ||
734 | if (psb->cur_iocbq.sli4_xritag == NO_XRI) { | ||
735 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | ||
736 | psb->data, psb->dma_handle); | ||
737 | kfree(psb); | ||
738 | break; | ||
739 | } | ||
740 | if (last_xritag != NO_XRI | ||
741 | && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { | ||
742 | non_sequential_xri = 1; | ||
743 | } else | ||
744 | list_add_tail(&psb->list, &sblist); | ||
745 | last_xritag = psb->cur_iocbq.sli4_xritag; | ||
746 | |||
747 | index = phba->sli4_hba.scsi_xri_cnt++; | ||
748 | psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; | ||
749 | |||
750 | psb->fcp_bpl = psb->data; | ||
751 | psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) | ||
752 | - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); | ||
753 | psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + | ||
754 | sizeof(struct fcp_cmnd)); | ||
755 | |||
756 | /* Initialize local short-hand pointers. */ | ||
757 | sgl = (struct sli4_sge *)psb->fcp_bpl; | ||
758 | pdma_phys_bpl = psb->dma_handle; | ||
759 | pdma_phys_fcp_cmd = | ||
760 | (psb->dma_handle + phba->cfg_sg_dma_buf_size) | ||
761 | - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); | ||
762 | pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); | ||
763 | |||
764 | /* | ||
765 | * The first two bdes are the FCP_CMD and FCP_RSP. The balance | ||
766 | * are sg list bdes. Initialize the first two and leave the | ||
767 | * rest for queuecommand. | ||
768 | */ | ||
769 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); | ||
770 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); | ||
771 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); | ||
772 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
773 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
774 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
775 | sgl++; | ||
776 | |||
777 | /* Setup the physical region for the FCP RSP */ | ||
778 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); | ||
779 | sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); | ||
780 | bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); | ||
781 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
782 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
783 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
784 | |||
785 | /* | ||
786 | * Since the IOCB for the FCP I/O is built into this | ||
787 | * lpfc_scsi_buf, initialize it with all known data now. | ||
788 | */ | ||
789 | iocb = &psb->cur_iocbq.iocb; | ||
790 | iocb->un.fcpi64.bdl.ulpIoTag32 = 0; | ||
791 | iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; | ||
792 | /* setting the BLP size to 2 * sizeof BDE may not be correct. | ||
793 | * We are setting the bpl to point to out sgl. An sgl's | ||
794 | * entries are 16 bytes, a bpl entries are 12 bytes. | ||
795 | */ | ||
796 | iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); | ||
797 | iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); | ||
798 | iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); | ||
799 | iocb->ulpBdeCount = 1; | ||
800 | iocb->ulpLe = 1; | ||
801 | iocb->ulpClass = CLASS3; | ||
802 | if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) | ||
803 | pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; | ||
804 | else | ||
805 | pdma_phys_bpl1 = 0; | ||
806 | psb->dma_phys_bpl = pdma_phys_bpl; | ||
807 | phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; | ||
808 | if (non_sequential_xri) { | ||
809 | status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, | ||
810 | pdma_phys_bpl1, | ||
811 | psb->cur_iocbq.sli4_xritag); | ||
812 | if (status) { | ||
813 | /* Put this back on the abort scsi list */ | ||
814 | psb->status = IOSTAT_LOCAL_REJECT; | ||
815 | psb->result = IOERR_ABORT_REQUESTED; | ||
816 | rc++; | ||
817 | } else | ||
818 | psb->status = IOSTAT_SUCCESS; | ||
819 | /* Put it back into the SCSI buffer list */ | ||
820 | lpfc_release_scsi_buf_s4(phba, psb); | ||
821 | break; | ||
822 | } | ||
823 | } | ||
824 | if (bcnt) { | ||
825 | status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); | ||
826 | /* Reset SCSI buffer count for next round of posting */ | ||
827 | while (!list_empty(&sblist)) { | ||
828 | list_remove_head(&sblist, psb, struct lpfc_scsi_buf, | ||
829 | list); | ||
830 | if (status) { | ||
831 | /* Put this back on the abort scsi list */ | ||
832 | psb->status = IOSTAT_LOCAL_REJECT; | ||
833 | psb->result = IOERR_ABORT_REQUESTED; | ||
834 | rc++; | ||
835 | } else | ||
836 | psb->status = IOSTAT_SUCCESS; | ||
837 | /* Put it back into the SCSI buffer list */ | ||
838 | lpfc_release_scsi_buf_s4(phba, psb); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | return bcnt + non_sequential_xri - rc; | ||
843 | } | ||
844 | |||
845 | /** | ||
575 | * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator | 846 | * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator |
576 | * @vport: The virtual port for which this call being executed. | 847 | * @vport: The virtual port for which this call being executed. |
577 | * @num_to_allocate: The requested number of buffers to allocate. | 848 | * @num_to_allocate: The requested number of buffers to allocate. |
@@ -638,6 +909,39 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
638 | } | 909 | } |
639 | 910 | ||
640 | /** | 911 | /** |
912 | * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. | ||
913 | * @phba: The Hba for which this call is being executed. | ||
914 | * @psb: The scsi buffer which is being released. | ||
915 | * | ||
916 | * This routine releases @psb scsi buffer by adding it to tail of @phba | ||
917 | * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer | ||
918 | * and cannot be reused for at least RA_TOV amount of time if it was | ||
919 | * aborted. | ||
920 | **/ | ||
921 | static void | ||
922 | lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
923 | { | ||
924 | unsigned long iflag = 0; | ||
925 | |||
926 | if (psb->status == IOSTAT_LOCAL_REJECT | ||
927 | && psb->result == IOERR_ABORT_REQUESTED) { | ||
928 | spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
929 | iflag); | ||
930 | psb->pCmd = NULL; | ||
931 | list_add_tail(&psb->list, | ||
932 | &phba->sli4_hba.lpfc_abts_scsi_buf_list); | ||
933 | spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, | ||
934 | iflag); | ||
935 | } else { | ||
936 | |||
937 | spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); | ||
938 | psb->pCmd = NULL; | ||
939 | list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); | ||
940 | spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); | ||
941 | } | ||
942 | } | ||
943 | |||
944 | /** | ||
641 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. | 945 | * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. |
642 | * @phba: The Hba for which this call is being executed. | 946 | * @phba: The Hba for which this call is being executed. |
643 | * @psb: The scsi buffer which is being released. | 947 | * @psb: The scsi buffer which is being released. |
@@ -1455,6 +1759,115 @@ out: | |||
1455 | } | 1759 | } |
1456 | 1760 | ||
1457 | /** | 1761 | /** |
1762 | * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec | ||
1763 | * @phba: The Hba for which this call is being executed. | ||
1764 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | ||
1765 | * | ||
1766 | * This routine does the pci dma mapping for scatter-gather list of scsi cmnd | ||
1767 | * field of @lpfc_cmd for device with SLI-4 interface spec. | ||
1768 | * | ||
1769 | * Return codes: | ||
1770 | * 1 - Error | ||
1771 | * 0 - Success | ||
1772 | **/ | ||
1773 | static int | ||
1774 | lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | ||
1775 | { | ||
1776 | struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; | ||
1777 | struct scatterlist *sgel = NULL; | ||
1778 | struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; | ||
1779 | struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; | ||
1780 | IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; | ||
1781 | dma_addr_t physaddr; | ||
1782 | uint32_t num_bde = 0; | ||
1783 | uint32_t dma_len; | ||
1784 | uint32_t dma_offset = 0; | ||
1785 | int nseg; | ||
1786 | |||
1787 | /* | ||
1788 | * There are three possibilities here - use scatter-gather segment, use | ||
1789 | * the single mapping, or neither. Start the lpfc command prep by | ||
1790 | * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first | ||
1791 | * data bde entry. | ||
1792 | */ | ||
1793 | if (scsi_sg_count(scsi_cmnd)) { | ||
1794 | /* | ||
1795 | * The driver stores the segment count returned from pci_map_sg | ||
1796 | * because this a count of dma-mappings used to map the use_sg | ||
1797 | * pages. They are not guaranteed to be the same for those | ||
1798 | * architectures that implement an IOMMU. | ||
1799 | */ | ||
1800 | |||
1801 | nseg = scsi_dma_map(scsi_cmnd); | ||
1802 | if (unlikely(!nseg)) | ||
1803 | return 1; | ||
1804 | sgl += 1; | ||
1805 | /* clear the last flag in the fcp_rsp map entry */ | ||
1806 | sgl->word2 = le32_to_cpu(sgl->word2); | ||
1807 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
1808 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1809 | sgl += 1; | ||
1810 | |||
1811 | lpfc_cmd->seg_cnt = nseg; | ||
1812 | if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { | ||
1813 | printk(KERN_ERR "%s: Too many sg segments from " | ||
1814 | "dma_map_sg. Config %d, seg_cnt %d\n", | ||
1815 | __func__, phba->cfg_sg_seg_cnt, | ||
1816 | lpfc_cmd->seg_cnt); | ||
1817 | scsi_dma_unmap(scsi_cmnd); | ||
1818 | return 1; | ||
1819 | } | ||
1820 | |||
1821 | /* | ||
1822 | * The driver established a maximum scatter-gather segment count | ||
1823 | * during probe that limits the number of sg elements in any | ||
1824 | * single scsi command. Just run through the seg_cnt and format | ||
1825 | * the sge's. | ||
1826 | * When using SLI-3 the driver will try to fit all the BDEs into | ||
1827 | * the IOCB. If it can't then the BDEs get added to a BPL as it | ||
1828 | * does for SLI-2 mode. | ||
1829 | */ | ||
1830 | scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { | ||
1831 | physaddr = sg_dma_address(sgel); | ||
1832 | dma_len = sg_dma_len(sgel); | ||
1833 | bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); | ||
1834 | sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); | ||
1835 | sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); | ||
1836 | if ((num_bde + 1) == nseg) | ||
1837 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
1838 | else | ||
1839 | bf_set(lpfc_sli4_sge_last, sgl, 0); | ||
1840 | bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); | ||
1841 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1842 | sgl->word3 = cpu_to_le32(sgl->word3); | ||
1843 | dma_offset += dma_len; | ||
1844 | sgl++; | ||
1845 | } | ||
1846 | } else { | ||
1847 | sgl += 1; | ||
1848 | /* clear the last flag in the fcp_rsp map entry */ | ||
1849 | sgl->word2 = le32_to_cpu(sgl->word2); | ||
1850 | bf_set(lpfc_sli4_sge_last, sgl, 1); | ||
1851 | sgl->word2 = cpu_to_le32(sgl->word2); | ||
1852 | } | ||
1853 | |||
1854 | /* | ||
1855 | * Finish initializing those IOCB fields that are dependent on the | ||
1856 | * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is | ||
1857 | * explicitly reinitialized. | ||
1858 | * all iocb memory resources are reused. | ||
1859 | */ | ||
1860 | fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); | ||
1861 | |||
1862 | /* | ||
1863 | * Due to difference in data length between DIF/non-DIF paths, | ||
1864 | * we need to set word 4 of IOCB here | ||
1865 | */ | ||
1866 | iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); | ||
1867 | return 0; | ||
1868 | } | ||
1869 | |||
1870 | /** | ||
1458 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer | 1871 | * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer |
1459 | * @phba: The Hba for which this call is being executed. | 1872 | * @phba: The Hba for which this call is being executed. |
1460 | * @lpfc_cmd: The scsi buffer which is going to be mapped. | 1873 | * @lpfc_cmd: The scsi buffer which is going to be mapped. |
@@ -1590,6 +2003,22 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
1590 | } | 2003 | } |
1591 | 2004 | ||
1592 | /** | 2005 | /** |
2006 | * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev | ||
2007 | * @phba: The Hba for which this call is being executed. | ||
2008 | * @psb: The scsi buffer which is going to be un-mapped. | ||
2009 | * | ||
2010 | * This routine does DMA un-mapping of scatter gather list of scsi command | ||
2011 | * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to | ||
2012 | * remove the sgl for this scsi buffer then we will do it here. For now | ||
2013 | * we should be able to just call the sli3 unprep routine. | ||
2014 | **/ | ||
2015 | static void | ||
2016 | lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | ||
2017 | { | ||
2018 | lpfc_scsi_unprep_dma_buf_s3(phba, psb); | ||
2019 | } | ||
2020 | |||
2021 | /** | ||
1593 | * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list | 2022 | * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list |
1594 | * @phba: The Hba for which this call is being executed. | 2023 | * @phba: The Hba for which this call is being executed. |
1595 | * @psb: The scsi buffer which is going to be un-mapped. | 2024 | * @psb: The scsi buffer which is going to be un-mapped. |
@@ -2129,6 +2558,29 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
2129 | } | 2558 | } |
2130 | 2559 | ||
2131 | /** | 2560 | /** |
2561 | * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev | ||
2562 | * @vport: The virtual port for which this call is being executed. | ||
2563 | * @lpfc_cmd: The scsi command which needs to send. | ||
2564 | * @pnode: Pointer to lpfc_nodelist. | ||
2565 | * | ||
2566 | * This routine initializes fcp_cmnd and iocb data structure from scsi command | ||
2567 | * to transfer for device with SLI4 interface spec. | ||
2568 | **/ | ||
2569 | static void | ||
2570 | lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | ||
2571 | struct lpfc_nodelist *pnode) | ||
2572 | { | ||
2573 | /* | ||
2574 | * The prep cmnd routines do not touch the sgl or its | ||
2575 | * entries. We may not have to do anything different. | ||
2576 | * I will leave this function in place until we can | ||
2577 | * run some IO through the driver and determine if changes | ||
2578 | * are needed. | ||
2579 | */ | ||
2580 | return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); | ||
2581 | } | ||
2582 | |||
2583 | /** | ||
2132 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit | 2584 | * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit |
2133 | * @vport: The virtual port for which this call is being executed. | 2585 | * @vport: The virtual port for which this call is being executed. |
2134 | * @lpfc_cmd: The scsi command which needs to send. | 2586 | * @lpfc_cmd: The scsi command which needs to send. |
@@ -2209,6 +2661,37 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, | |||
2209 | } | 2661 | } |
2210 | 2662 | ||
2211 | /** | 2663 | /** |
2664 | * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit | ||
2665 | * @vport: The virtual port for which this call is being executed. | ||
2666 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | ||
2667 | * @lun: Logical unit number. | ||
2668 | * @task_mgmt_cmd: SCSI task management command. | ||
2669 | * | ||
2670 | * This routine creates FCP information unit corresponding to @task_mgmt_cmd | ||
2671 | * for device with SLI-4 interface spec. | ||
2672 | * | ||
2673 | * Return codes: | ||
2674 | * 0 - Error | ||
2675 | * 1 - Success | ||
2676 | **/ | ||
2677 | static int | ||
2678 | lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, | ||
2679 | struct lpfc_scsi_buf *lpfc_cmd, | ||
2680 | unsigned int lun, | ||
2681 | uint8_t task_mgmt_cmd) | ||
2682 | { | ||
2683 | /* | ||
2684 | * The prep cmnd routines do not touch the sgl or its | ||
2685 | * entries. We may not have to do anything different. | ||
2686 | * I will leave this function in place until we can | ||
2687 | * run some IO through the driver and determine if changes | ||
2688 | * are needed. | ||
2689 | */ | ||
2690 | return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, | ||
2691 | task_mgmt_cmd); | ||
2692 | } | ||
2693 | |||
2694 | /** | ||
2212 | * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info | 2695 | * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info |
2213 | * @vport: The virtual port for which this call is being executed. | 2696 | * @vport: The virtual port for which this call is being executed. |
2214 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. | 2697 | * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. |
@@ -2257,6 +2740,15 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | |||
2257 | lpfc_scsi_prep_task_mgmt_cmd_s3; | 2740 | lpfc_scsi_prep_task_mgmt_cmd_s3; |
2258 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; | 2741 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; |
2259 | break; | 2742 | break; |
2743 | case LPFC_PCI_DEV_OC: | ||
2744 | phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; | ||
2745 | phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; | ||
2746 | phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; | ||
2747 | phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; | ||
2748 | phba->lpfc_scsi_prep_task_mgmt_cmd = | ||
2749 | lpfc_scsi_prep_task_mgmt_cmd_s4; | ||
2750 | phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; | ||
2751 | break; | ||
2260 | default: | 2752 | default: |
2261 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 2753 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
2262 | "1418 Invalid HBA PCI-device group: 0x%x\n", | 2754 | "1418 Invalid HBA PCI-device group: 0x%x\n", |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c7c440d5fa29..65dfc8bd5b49 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf { | |||
140 | struct fcp_rsp *fcp_rsp; | 140 | struct fcp_rsp *fcp_rsp; |
141 | struct ulp_bde64 *fcp_bpl; | 141 | struct ulp_bde64 *fcp_bpl; |
142 | 142 | ||
143 | dma_addr_t dma_phys_bpl; | ||
144 | |||
143 | /* cur_iocbq has phys of the dma-able buffer. | 145 | /* cur_iocbq has phys of the dma-able buffer. |
144 | * Iotag is in here | 146 | * Iotag is in here |
145 | */ | 147 | */ |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index e2d07d97fa8b..706bb22a6e8e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -29,9 +29,12 @@ | |||
29 | #include <scsi/scsi_device.h> | 29 | #include <scsi/scsi_device.h> |
30 | #include <scsi/scsi_host.h> | 30 | #include <scsi/scsi_host.h> |
31 | #include <scsi/scsi_transport_fc.h> | 31 | #include <scsi/scsi_transport_fc.h> |
32 | #include <scsi/fc/fc_fs.h> | ||
32 | 33 | ||
34 | #include "lpfc_hw4.h" | ||
33 | #include "lpfc_hw.h" | 35 | #include "lpfc_hw.h" |
34 | #include "lpfc_sli.h" | 36 | #include "lpfc_sli.h" |
37 | #include "lpfc_sli4.h" | ||
35 | #include "lpfc_nl.h" | 38 | #include "lpfc_nl.h" |
36 | #include "lpfc_disc.h" | 39 | #include "lpfc_disc.h" |
37 | #include "lpfc_scsi.h" | 40 | #include "lpfc_scsi.h" |
@@ -121,6 +124,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) | |||
121 | } | 124 | } |
122 | 125 | ||
123 | /** | 126 | /** |
127 | * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. | ||
128 | * @phba: Pointer to HBA context object. | ||
129 | * @xritag: XRI value. | ||
130 | * | ||
131 | * This function clears the sglq pointer from the array of acive | ||
132 | * sglq's. The xritag that is passed in is used to index into the | ||
133 | * array. Before the xritag can be used it needs to be adjusted | ||
134 | * by subtracting the xribase. | ||
135 | * | ||
136 | * Returns sglq ponter = success, NULL = Failure. | ||
137 | **/ | ||
138 | static struct lpfc_sglq * | ||
139 | __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) | ||
140 | { | ||
141 | uint16_t adj_xri; | ||
142 | struct lpfc_sglq *sglq; | ||
143 | adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
144 | if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) | ||
145 | return NULL; | ||
146 | sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; | ||
147 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; | ||
148 | return sglq; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * __lpfc_get_active_sglq - Get the active sglq for this XRI. | ||
153 | * @phba: Pointer to HBA context object. | ||
154 | * @xritag: XRI value. | ||
155 | * | ||
156 | * This function returns the sglq pointer from the array of acive | ||
157 | * sglq's. The xritag that is passed in is used to index into the | ||
158 | * array. Before the xritag can be used it needs to be adjusted | ||
159 | * by subtracting the xribase. | ||
160 | * | ||
161 | * Returns sglq ponter = success, NULL = Failure. | ||
162 | **/ | ||
163 | static struct lpfc_sglq * | ||
164 | __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) | ||
165 | { | ||
166 | uint16_t adj_xri; | ||
167 | struct lpfc_sglq *sglq; | ||
168 | adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
169 | if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) | ||
170 | return NULL; | ||
171 | sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; | ||
172 | return sglq; | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool | ||
177 | * @phba: Pointer to HBA context object. | ||
178 | * | ||
179 | * This function is called with hbalock held. This function | ||
180 | * Gets a new driver sglq object from the sglq list. If the | ||
181 | * list is not empty then it is successful, it returns pointer to the newly | ||
182 | * allocated sglq object else it returns NULL. | ||
183 | **/ | ||
184 | static struct lpfc_sglq * | ||
185 | __lpfc_sli_get_sglq(struct lpfc_hba *phba) | ||
186 | { | ||
187 | struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; | ||
188 | struct lpfc_sglq *sglq = NULL; | ||
189 | uint16_t adj_xri; | ||
190 | list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); | ||
191 | adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; | ||
192 | phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; | ||
193 | return sglq; | ||
194 | } | ||
195 | |||
196 | /** | ||
124 | * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool | 197 | * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool |
125 | * @phba: Pointer to HBA context object. | 198 | * @phba: Pointer to HBA context object. |
126 | * | 199 | * |
@@ -298,6 +371,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) | |||
298 | case CMD_GEN_REQUEST64_CR: | 371 | case CMD_GEN_REQUEST64_CR: |
299 | case CMD_GEN_REQUEST64_CX: | 372 | case CMD_GEN_REQUEST64_CX: |
300 | case CMD_XMIT_ELS_RSP64_CX: | 373 | case CMD_XMIT_ELS_RSP64_CX: |
374 | case DSSCMD_IWRITE64_CR: | ||
375 | case DSSCMD_IWRITE64_CX: | ||
376 | case DSSCMD_IREAD64_CR: | ||
377 | case DSSCMD_IREAD64_CX: | ||
378 | case DSSCMD_INVALIDATE_DEK: | ||
379 | case DSSCMD_SET_KEK: | ||
380 | case DSSCMD_GET_KEK_ID: | ||
381 | case DSSCMD_GEN_XFER: | ||
301 | type = LPFC_SOL_IOCB; | 382 | type = LPFC_SOL_IOCB; |
302 | break; | 383 | break; |
303 | case CMD_ABORT_XRI_CN: | 384 | case CMD_ABORT_XRI_CN: |
@@ -2629,6 +2710,56 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) | |||
2629 | return retval; | 2710 | return retval; |
2630 | } | 2711 | } |
2631 | 2712 | ||
2713 | /** | ||
2714 | * lpfc_sli_brdready_s4 - Check for sli4 host ready status | ||
2715 | * @phba: Pointer to HBA context object. | ||
2716 | * @mask: Bit mask to be checked. | ||
2717 | * | ||
2718 | * This function checks the host status register to check if HBA is | ||
2719 | * ready. This function will wait in a loop for the HBA to be ready | ||
2720 | * If the HBA is not ready , the function will will reset the HBA PCI | ||
2721 | * function again. The function returns 1 when HBA fail to be ready | ||
2722 | * otherwise returns zero. | ||
2723 | **/ | ||
2724 | static int | ||
2725 | lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) | ||
2726 | { | ||
2727 | uint32_t status; | ||
2728 | int retval = 0; | ||
2729 | |||
2730 | /* Read the HBA Host Status Register */ | ||
2731 | status = lpfc_sli4_post_status_check(phba); | ||
2732 | |||
2733 | if (status) { | ||
2734 | phba->pport->port_state = LPFC_VPORT_UNKNOWN; | ||
2735 | lpfc_sli_brdrestart(phba); | ||
2736 | status = lpfc_sli4_post_status_check(phba); | ||
2737 | } | ||
2738 | |||
2739 | /* Check to see if any errors occurred during init */ | ||
2740 | if (status) { | ||
2741 | phba->link_state = LPFC_HBA_ERROR; | ||
2742 | retval = 1; | ||
2743 | } else | ||
2744 | phba->sli4_hba.intr_enable = 0; | ||
2745 | |||
2746 | return retval; | ||
2747 | } | ||
2748 | |||
2749 | /** | ||
2750 | * lpfc_sli_brdready - Wrapper func for checking the hba readyness | ||
2751 | * @phba: Pointer to HBA context object. | ||
2752 | * @mask: Bit mask to be checked. | ||
2753 | * | ||
2754 | * This routine wraps the actual SLI3 or SLI4 hba readyness check routine | ||
2755 | * from the API jump table function pointer from the lpfc_hba struct. | ||
2756 | **/ | ||
2757 | int | ||
2758 | lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) | ||
2759 | { | ||
2760 | return phba->lpfc_sli_brdready(phba, mask); | ||
2761 | } | ||
2762 | |||
2632 | #define BARRIER_TEST_PATTERN (0xdeadbeef) | 2763 | #define BARRIER_TEST_PATTERN (0xdeadbeef) |
2633 | 2764 | ||
2634 | /** | 2765 | /** |
@@ -2863,7 +2994,66 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
2863 | } | 2994 | } |
2864 | 2995 | ||
2865 | /** | 2996 | /** |
2866 | * lpfc_sli_brdrestart - Restart the HBA | 2997 | * lpfc_sli4_brdreset - Reset a sli-4 HBA |
2998 | * @phba: Pointer to HBA context object. | ||
2999 | * | ||
3000 | * This function resets a SLI4 HBA. This function disables PCI layer parity | ||
3001 | * checking during resets the device. The caller is not required to hold | ||
3002 | * any locks. | ||
3003 | * | ||
3004 | * This function returns 0 always. | ||
3005 | **/ | ||
3006 | int | ||
3007 | lpfc_sli4_brdreset(struct lpfc_hba *phba) | ||
3008 | { | ||
3009 | struct lpfc_sli *psli = &phba->sli; | ||
3010 | uint16_t cfg_value; | ||
3011 | uint8_t qindx; | ||
3012 | |||
3013 | /* Reset HBA */ | ||
3014 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3015 | "0295 Reset HBA Data: x%x x%x\n", | ||
3016 | phba->pport->port_state, psli->sli_flag); | ||
3017 | |||
3018 | /* perform board reset */ | ||
3019 | phba->fc_eventTag = 0; | ||
3020 | phba->pport->fc_myDID = 0; | ||
3021 | phba->pport->fc_prevDID = 0; | ||
3022 | |||
3023 | /* Turn off parity checking and serr during the physical reset */ | ||
3024 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); | ||
3025 | pci_write_config_word(phba->pcidev, PCI_COMMAND, | ||
3026 | (cfg_value & | ||
3027 | ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); | ||
3028 | |||
3029 | spin_lock_irq(&phba->hbalock); | ||
3030 | psli->sli_flag &= ~(LPFC_PROCESS_LA); | ||
3031 | phba->fcf.fcf_flag = 0; | ||
3032 | /* Clean up the child queue list for the CQs */ | ||
3033 | list_del_init(&phba->sli4_hba.mbx_wq->list); | ||
3034 | list_del_init(&phba->sli4_hba.els_wq->list); | ||
3035 | list_del_init(&phba->sli4_hba.hdr_rq->list); | ||
3036 | list_del_init(&phba->sli4_hba.dat_rq->list); | ||
3037 | list_del_init(&phba->sli4_hba.mbx_cq->list); | ||
3038 | list_del_init(&phba->sli4_hba.els_cq->list); | ||
3039 | list_del_init(&phba->sli4_hba.rxq_cq->list); | ||
3040 | for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) | ||
3041 | list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); | ||
3042 | for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) | ||
3043 | list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); | ||
3044 | spin_unlock_irq(&phba->hbalock); | ||
3045 | |||
3046 | /* Now physically reset the device */ | ||
3047 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT, | ||
3048 | "0389 Performing PCI function reset!\n"); | ||
3049 | /* Perform FCoE PCI function reset */ | ||
3050 | lpfc_pci_function_reset(phba); | ||
3051 | |||
3052 | return 0; | ||
3053 | } | ||
3054 | |||
3055 | /** | ||
3056 | * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba | ||
2867 | * @phba: Pointer to HBA context object. | 3057 | * @phba: Pointer to HBA context object. |
2868 | * | 3058 | * |
2869 | * This function is called in the SLI initialization code path to | 3059 | * This function is called in the SLI initialization code path to |
@@ -2875,8 +3065,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) | |||
2875 | * The function does not guarantee completion of MBX_RESTART mailbox | 3065 | * The function does not guarantee completion of MBX_RESTART mailbox |
2876 | * command before the return of this function. | 3066 | * command before the return of this function. |
2877 | **/ | 3067 | **/ |
2878 | int | 3068 | static int |
2879 | lpfc_sli_brdrestart(struct lpfc_hba *phba) | 3069 | lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) |
2880 | { | 3070 | { |
2881 | MAILBOX_t *mb; | 3071 | MAILBOX_t *mb; |
2882 | struct lpfc_sli *psli; | 3072 | struct lpfc_sli *psli; |
@@ -2915,7 +3105,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2915 | lpfc_sli_brdreset(phba); | 3105 | lpfc_sli_brdreset(phba); |
2916 | phba->pport->stopped = 0; | 3106 | phba->pport->stopped = 0; |
2917 | phba->link_state = LPFC_INIT_START; | 3107 | phba->link_state = LPFC_INIT_START; |
2918 | 3108 | phba->hba_flag = 0; | |
2919 | spin_unlock_irq(&phba->hbalock); | 3109 | spin_unlock_irq(&phba->hbalock); |
2920 | 3110 | ||
2921 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | 3111 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
@@ -2930,6 +3120,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) | |||
2930 | } | 3120 | } |
2931 | 3121 | ||
2932 | /** | 3122 | /** |
3123 | * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba | ||
3124 | * @phba: Pointer to HBA context object. | ||
3125 | * | ||
3126 | * This function is called in the SLI initialization code path to restart | ||
3127 | * a SLI4 HBA. The caller is not required to hold any lock. | ||
3128 | * At the end of the function, it calls lpfc_hba_down_post function to | ||
3129 | * free any pending commands. | ||
3130 | **/ | ||
3131 | static int | ||
3132 | lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) | ||
3133 | { | ||
3134 | struct lpfc_sli *psli = &phba->sli; | ||
3135 | |||
3136 | |||
3137 | /* Restart HBA */ | ||
3138 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, | ||
3139 | "0296 Restart HBA Data: x%x x%x\n", | ||
3140 | phba->pport->port_state, psli->sli_flag); | ||
3141 | |||
3142 | lpfc_sli4_brdreset(phba); | ||
3143 | |||
3144 | spin_lock_irq(&phba->hbalock); | ||
3145 | phba->pport->stopped = 0; | ||
3146 | phba->link_state = LPFC_INIT_START; | ||
3147 | phba->hba_flag = 0; | ||
3148 | spin_unlock_irq(&phba->hbalock); | ||
3149 | |||
3150 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | ||
3151 | psli->stats_start = get_seconds(); | ||
3152 | |||
3153 | lpfc_hba_down_post(phba); | ||
3154 | |||
3155 | return 0; | ||
3156 | } | ||
3157 | |||
3158 | /** | ||
3159 | * lpfc_sli_brdrestart - Wrapper func for restarting hba | ||
3160 | * @phba: Pointer to HBA context object. | ||
3161 | * | ||
3162 | * This routine wraps the actual SLI3 or SLI4 hba restart routine from the | ||
3163 | * API jump table function pointer from the lpfc_hba struct. | ||
3164 | **/ | ||
3165 | int | ||
3166 | lpfc_sli_brdrestart(struct lpfc_hba *phba) | ||
3167 | { | ||
3168 | return phba->lpfc_sli_brdrestart(phba); | ||
3169 | } | ||
3170 | |||
3171 | /** | ||
2933 | * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart | 3172 | * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart |
2934 | * @phba: Pointer to HBA context object. | 3173 | * @phba: Pointer to HBA context object. |
2935 | * | 3174 | * |
@@ -3353,6 +3592,488 @@ lpfc_sli_hba_setup_error: | |||
3353 | return rc; | 3592 | return rc; |
3354 | } | 3593 | } |
3355 | 3594 | ||
3595 | /** | ||
3596 | * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region | ||
3597 | * @phba: Pointer to HBA context object. | ||
3598 | * @mboxq: mailbox pointer. | ||
3599 | * This function issue a dump mailbox command to read config region | ||
3600 | * 23 and parse the records in the region and populate driver | ||
3601 | * data structure. | ||
3602 | **/ | ||
3603 | static int | ||
3604 | lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, | ||
3605 | LPFC_MBOXQ_t *mboxq) | ||
3606 | { | ||
3607 | struct lpfc_dmabuf *mp; | ||
3608 | struct lpfc_mqe *mqe; | ||
3609 | uint32_t data_length; | ||
3610 | int rc; | ||
3611 | |||
3612 | /* Program the default value of vlan_id and fc_map */ | ||
3613 | phba->valid_vlan = 0; | ||
3614 | phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; | ||
3615 | phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; | ||
3616 | phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; | ||
3617 | |||
3618 | mqe = &mboxq->u.mqe; | ||
3619 | if (lpfc_dump_fcoe_param(phba, mboxq)) | ||
3620 | return -ENOMEM; | ||
3621 | |||
3622 | mp = (struct lpfc_dmabuf *) mboxq->context1; | ||
3623 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
3624 | |||
3625 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
3626 | "(%d):2571 Mailbox cmd x%x Status x%x " | ||
3627 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
3628 | "x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
3629 | "CQ: x%x x%x x%x x%x\n", | ||
3630 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
3631 | bf_get(lpfc_mqe_command, mqe), | ||
3632 | bf_get(lpfc_mqe_status, mqe), | ||
3633 | mqe->un.mb_words[0], mqe->un.mb_words[1], | ||
3634 | mqe->un.mb_words[2], mqe->un.mb_words[3], | ||
3635 | mqe->un.mb_words[4], mqe->un.mb_words[5], | ||
3636 | mqe->un.mb_words[6], mqe->un.mb_words[7], | ||
3637 | mqe->un.mb_words[8], mqe->un.mb_words[9], | ||
3638 | mqe->un.mb_words[10], mqe->un.mb_words[11], | ||
3639 | mqe->un.mb_words[12], mqe->un.mb_words[13], | ||
3640 | mqe->un.mb_words[14], mqe->un.mb_words[15], | ||
3641 | mqe->un.mb_words[16], mqe->un.mb_words[50], | ||
3642 | mboxq->mcqe.word0, | ||
3643 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, | ||
3644 | mboxq->mcqe.trailer); | ||
3645 | |||
3646 | if (rc) { | ||
3647 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
3648 | kfree(mp); | ||
3649 | return -EIO; | ||
3650 | } | ||
3651 | data_length = mqe->un.mb_words[5]; | ||
3652 | if (data_length > DMP_FCOEPARAM_RGN_SIZE) | ||
3653 | return -EIO; | ||
3654 | |||
3655 | lpfc_parse_fcoe_conf(phba, mp->virt, data_length); | ||
3656 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
3657 | kfree(mp); | ||
3658 | return 0; | ||
3659 | } | ||
3660 | |||
3661 | /** | ||
3662 | * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data | ||
3663 | * @phba: pointer to lpfc hba data structure. | ||
3664 | * @mboxq: pointer to the LPFC_MBOXQ_t structure. | ||
3665 | * @vpd: pointer to the memory to hold resulting port vpd data. | ||
3666 | * @vpd_size: On input, the number of bytes allocated to @vpd. | ||
3667 | * On output, the number of data bytes in @vpd. | ||
3668 | * | ||
3669 | * This routine executes a READ_REV SLI4 mailbox command. In | ||
3670 | * addition, this routine gets the port vpd data. | ||
3671 | * | ||
3672 | * Return codes | ||
3673 | * 0 - sucessful | ||
3674 | * ENOMEM - could not allocated memory. | ||
3675 | **/ | ||
3676 | static int | ||
3677 | lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | ||
3678 | uint8_t *vpd, uint32_t *vpd_size) | ||
3679 | { | ||
3680 | int rc = 0; | ||
3681 | uint32_t dma_size; | ||
3682 | struct lpfc_dmabuf *dmabuf; | ||
3683 | struct lpfc_mqe *mqe; | ||
3684 | |||
3685 | dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); | ||
3686 | if (!dmabuf) | ||
3687 | return -ENOMEM; | ||
3688 | |||
3689 | /* | ||
3690 | * Get a DMA buffer for the vpd data resulting from the READ_REV | ||
3691 | * mailbox command. | ||
3692 | */ | ||
3693 | dma_size = *vpd_size; | ||
3694 | dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, | ||
3695 | dma_size, | ||
3696 | &dmabuf->phys, | ||
3697 | GFP_KERNEL); | ||
3698 | if (!dmabuf->virt) { | ||
3699 | kfree(dmabuf); | ||
3700 | return -ENOMEM; | ||
3701 | } | ||
3702 | memset(dmabuf->virt, 0, dma_size); | ||
3703 | |||
3704 | /* | ||
3705 | * The SLI4 implementation of READ_REV conflicts at word1, | ||
3706 | * bits 31:16 and SLI4 adds vpd functionality not present | ||
3707 | * in SLI3. This code corrects the conflicts. | ||
3708 | */ | ||
3709 | lpfc_read_rev(phba, mboxq); | ||
3710 | mqe = &mboxq->u.mqe; | ||
3711 | mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); | ||
3712 | mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); | ||
3713 | mqe->un.read_rev.word1 &= 0x0000FFFF; | ||
3714 | bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); | ||
3715 | bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); | ||
3716 | |||
3717 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
3718 | if (rc) { | ||
3719 | dma_free_coherent(&phba->pcidev->dev, dma_size, | ||
3720 | dmabuf->virt, dmabuf->phys); | ||
3721 | return -EIO; | ||
3722 | } | ||
3723 | |||
3724 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
3725 | "(%d):0380 Mailbox cmd x%x Status x%x " | ||
3726 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
3727 | "x%x x%x x%x x%x x%x x%x x%x x%x x%x " | ||
3728 | "CQ: x%x x%x x%x x%x\n", | ||
3729 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
3730 | bf_get(lpfc_mqe_command, mqe), | ||
3731 | bf_get(lpfc_mqe_status, mqe), | ||
3732 | mqe->un.mb_words[0], mqe->un.mb_words[1], | ||
3733 | mqe->un.mb_words[2], mqe->un.mb_words[3], | ||
3734 | mqe->un.mb_words[4], mqe->un.mb_words[5], | ||
3735 | mqe->un.mb_words[6], mqe->un.mb_words[7], | ||
3736 | mqe->un.mb_words[8], mqe->un.mb_words[9], | ||
3737 | mqe->un.mb_words[10], mqe->un.mb_words[11], | ||
3738 | mqe->un.mb_words[12], mqe->un.mb_words[13], | ||
3739 | mqe->un.mb_words[14], mqe->un.mb_words[15], | ||
3740 | mqe->un.mb_words[16], mqe->un.mb_words[50], | ||
3741 | mboxq->mcqe.word0, | ||
3742 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, | ||
3743 | mboxq->mcqe.trailer); | ||
3744 | |||
3745 | /* | ||
3746 | * The available vpd length cannot be bigger than the | ||
3747 | * DMA buffer passed to the port. Catch the less than | ||
3748 | * case and update the caller's size. | ||
3749 | */ | ||
3750 | if (mqe->un.read_rev.avail_vpd_len < *vpd_size) | ||
3751 | *vpd_size = mqe->un.read_rev.avail_vpd_len; | ||
3752 | |||
3753 | lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); | ||
3754 | dma_free_coherent(&phba->pcidev->dev, dma_size, | ||
3755 | dmabuf->virt, dmabuf->phys); | ||
3756 | kfree(dmabuf); | ||
3757 | return 0; | ||
3758 | } | ||
3759 | |||
3760 | /** | ||
3761 | * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues | ||
3762 | * @phba: pointer to lpfc hba data structure. | ||
3763 | * | ||
3764 | * This routine is called to explicitly arm the SLI4 device's completion and | ||
3765 | * event queues | ||
3766 | **/ | ||
3767 | static void | ||
3768 | lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) | ||
3769 | { | ||
3770 | uint8_t fcp_eqidx; | ||
3771 | |||
3772 | lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); | ||
3773 | lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); | ||
3774 | lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); | ||
3775 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) | ||
3776 | lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], | ||
3777 | LPFC_QUEUE_REARM); | ||
3778 | lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); | ||
3779 | for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) | ||
3780 | lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], | ||
3781 | LPFC_QUEUE_REARM); | ||
3782 | } | ||
3783 | |||
3784 | /** | ||
3785 | * lpfc_sli4_hba_setup - SLI4 device intialization PCI function | ||
3786 | * @phba: Pointer to HBA context object. | ||
3787 | * | ||
3788 | * This function is the main SLI4 device intialization PCI function. This | ||
3789 | * function is called by the HBA intialization code, HBA reset code and | ||
3790 | * HBA error attention handler code. Caller is not required to hold any | ||
3791 | * locks. | ||
3792 | **/ | ||
3793 | int | ||
3794 | lpfc_sli4_hba_setup(struct lpfc_hba *phba) | ||
3795 | { | ||
3796 | int rc; | ||
3797 | LPFC_MBOXQ_t *mboxq; | ||
3798 | struct lpfc_mqe *mqe; | ||
3799 | uint8_t *vpd; | ||
3800 | uint32_t vpd_size; | ||
3801 | uint32_t ftr_rsp = 0; | ||
3802 | struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); | ||
3803 | struct lpfc_vport *vport = phba->pport; | ||
3804 | struct lpfc_dmabuf *mp; | ||
3805 | |||
3806 | /* Perform a PCI function reset to start from clean */ | ||
3807 | rc = lpfc_pci_function_reset(phba); | ||
3808 | if (unlikely(rc)) | ||
3809 | return -ENODEV; | ||
3810 | |||
3811 | /* Check the HBA Host Status Register for readyness */ | ||
3812 | rc = lpfc_sli4_post_status_check(phba); | ||
3813 | if (unlikely(rc)) | ||
3814 | return -ENODEV; | ||
3815 | else { | ||
3816 | spin_lock_irq(&phba->hbalock); | ||
3817 | phba->sli.sli_flag |= LPFC_SLI_ACTIVE; | ||
3818 | spin_unlock_irq(&phba->hbalock); | ||
3819 | } | ||
3820 | |||
3821 | /* | ||
3822 | * Allocate a single mailbox container for initializing the | ||
3823 | * port. | ||
3824 | */ | ||
3825 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | ||
3826 | if (!mboxq) | ||
3827 | return -ENOMEM; | ||
3828 | |||
3829 | /* | ||
3830 | * Continue initialization with default values even if driver failed | ||
3831 | * to read FCoE param config regions | ||
3832 | */ | ||
3833 | if (lpfc_sli4_read_fcoe_params(phba, mboxq)) | ||
3834 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, | ||
3835 | "2570 Failed to read FCoE parameters \n"); | ||
3836 | |||
3837 | /* Issue READ_REV to collect vpd and FW information. */ | ||
3838 | vpd_size = PAGE_SIZE; | ||
3839 | vpd = kzalloc(vpd_size, GFP_KERNEL); | ||
3840 | if (!vpd) { | ||
3841 | rc = -ENOMEM; | ||
3842 | goto out_free_mbox; | ||
3843 | } | ||
3844 | |||
3845 | rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); | ||
3846 | if (unlikely(rc)) | ||
3847 | goto out_free_vpd; | ||
3848 | |||
3849 | mqe = &mboxq->u.mqe; | ||
3850 | if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, | ||
3851 | &mqe->un.read_rev) != LPFC_SLI_REV4) || | ||
3852 | (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { | ||
3853 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3854 | "0376 READ_REV Error. SLI Level %d " | ||
3855 | "FCoE enabled %d\n", | ||
3856 | bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), | ||
3857 | bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); | ||
3858 | rc = -EIO; | ||
3859 | goto out_free_vpd; | ||
3860 | } | ||
3861 | /* Single threaded at this point, no need for lock */ | ||
3862 | spin_lock_irq(&phba->hbalock); | ||
3863 | phba->hba_flag |= HBA_FCOE_SUPPORT; | ||
3864 | spin_unlock_irq(&phba->hbalock); | ||
3865 | /* | ||
3866 | * Evaluate the read rev and vpd data. Populate the driver | ||
3867 | * state with the results. If this routine fails, the failure | ||
3868 | * is not fatal as the driver will use generic values. | ||
3869 | */ | ||
3870 | rc = lpfc_parse_vpd(phba, vpd, vpd_size); | ||
3871 | if (unlikely(!rc)) { | ||
3872 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3873 | "0377 Error %d parsing vpd. " | ||
3874 | "Using defaults.\n", rc); | ||
3875 | rc = 0; | ||
3876 | } | ||
3877 | |||
3878 | /* By now, we should determine the SLI revision, hard code for now */ | ||
3879 | phba->sli_rev = LPFC_SLI_REV4; | ||
3880 | |||
3881 | /* | ||
3882 | * Discover the port's supported feature set and match it against the | ||
3883 | * hosts requests. | ||
3884 | */ | ||
3885 | lpfc_request_features(phba, mboxq); | ||
3886 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
3887 | if (unlikely(rc)) { | ||
3888 | rc = -EIO; | ||
3889 | goto out_free_vpd; | ||
3890 | } | ||
3891 | |||
3892 | /* | ||
3893 | * The port must support FCP initiator mode as this is the | ||
3894 | * only mode running in the host. | ||
3895 | */ | ||
3896 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { | ||
3897 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, | ||
3898 | "0378 No support for fcpi mode.\n"); | ||
3899 | ftr_rsp++; | ||
3900 | } | ||
3901 | |||
3902 | /* | ||
3903 | * If the port cannot support the host's requested features | ||
3904 | * then turn off the global config parameters to disable the | ||
3905 | * feature in the driver. This is not a fatal error. | ||
3906 | */ | ||
3907 | if ((phba->cfg_enable_bg) && | ||
3908 | !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) | ||
3909 | ftr_rsp++; | ||
3910 | |||
3911 | if (phba->max_vpi && phba->cfg_enable_npiv && | ||
3912 | !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) | ||
3913 | ftr_rsp++; | ||
3914 | |||
3915 | if (ftr_rsp) { | ||
3916 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, | ||
3917 | "0379 Feature Mismatch Data: x%08x %08x " | ||
3918 | "x%x x%x x%x\n", mqe->un.req_ftrs.word2, | ||
3919 | mqe->un.req_ftrs.word3, phba->cfg_enable_bg, | ||
3920 | phba->cfg_enable_npiv, phba->max_vpi); | ||
3921 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) | ||
3922 | phba->cfg_enable_bg = 0; | ||
3923 | if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) | ||
3924 | phba->cfg_enable_npiv = 0; | ||
3925 | } | ||
3926 | |||
3927 | /* These SLI3 features are assumed in SLI4 */ | ||
3928 | spin_lock_irq(&phba->hbalock); | ||
3929 | phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); | ||
3930 | spin_unlock_irq(&phba->hbalock); | ||
3931 | |||
3932 | /* Read the port's service parameters. */ | ||
3933 | lpfc_read_sparam(phba, mboxq, vport->vpi); | ||
3934 | mboxq->vport = vport; | ||
3935 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); | ||
3936 | mp = (struct lpfc_dmabuf *) mboxq->context1; | ||
3937 | if (rc == MBX_SUCCESS) { | ||
3938 | memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); | ||
3939 | rc = 0; | ||
3940 | } | ||
3941 | |||
3942 | /* | ||
3943 | * This memory was allocated by the lpfc_read_sparam routine. Release | ||
3944 | * it to the mbuf pool. | ||
3945 | */ | ||
3946 | lpfc_mbuf_free(phba, mp->virt, mp->phys); | ||
3947 | kfree(mp); | ||
3948 | mboxq->context1 = NULL; | ||
3949 | if (unlikely(rc)) { | ||
3950 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3951 | "0382 READ_SPARAM command failed " | ||
3952 | "status %d, mbxStatus x%x\n", | ||
3953 | rc, bf_get(lpfc_mqe_status, mqe)); | ||
3954 | phba->link_state = LPFC_HBA_ERROR; | ||
3955 | rc = -EIO; | ||
3956 | goto out_free_vpd; | ||
3957 | } | ||
3958 | |||
3959 | if (phba->cfg_soft_wwnn) | ||
3960 | u64_to_wwn(phba->cfg_soft_wwnn, | ||
3961 | vport->fc_sparam.nodeName.u.wwn); | ||
3962 | if (phba->cfg_soft_wwpn) | ||
3963 | u64_to_wwn(phba->cfg_soft_wwpn, | ||
3964 | vport->fc_sparam.portName.u.wwn); | ||
3965 | memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, | ||
3966 | sizeof(struct lpfc_name)); | ||
3967 | memcpy(&vport->fc_portname, &vport->fc_sparam.portName, | ||
3968 | sizeof(struct lpfc_name)); | ||
3969 | |||
3970 | /* Update the fc_host data structures with new wwn. */ | ||
3971 | fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); | ||
3972 | fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); | ||
3973 | |||
3974 | /* Register SGL pool to the device using non-embedded mailbox command */ | ||
3975 | rc = lpfc_sli4_post_sgl_list(phba); | ||
3976 | if (unlikely(rc)) { | ||
3977 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3978 | "0582 Error %d during sgl post operation", rc); | ||
3979 | rc = -ENODEV; | ||
3980 | goto out_free_vpd; | ||
3981 | } | ||
3982 | |||
3983 | /* Register SCSI SGL pool to the device */ | ||
3984 | rc = lpfc_sli4_repost_scsi_sgl_list(phba); | ||
3985 | if (unlikely(rc)) { | ||
3986 | lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, | ||
3987 | "0383 Error %d during scsi sgl post opeation", | ||
3988 | rc); | ||
3989 | /* Some Scsi buffers were moved to the abort scsi list */ | ||
3990 | /* A pci function reset will repost them */ | ||
3991 | rc = -ENODEV; | ||
3992 | goto out_free_vpd; | ||
3993 | } | ||
3994 | |||
3995 | /* Post the rpi header region to the device. */ | ||
3996 | rc = lpfc_sli4_post_all_rpi_hdrs(phba); | ||
3997 | if (unlikely(rc)) { | ||
3998 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
3999 | "0393 Error %d during rpi post operation\n", | ||
4000 | rc); | ||
4001 | rc = -ENODEV; | ||
4002 | goto out_free_vpd; | ||
4003 | } | ||
4004 | /* Temporary initialization of lpfc_fip_flag to non-fip */ | ||
4005 | bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); | ||
4006 | |||
4007 | /* Set up all the queues to the device */ | ||
4008 | rc = lpfc_sli4_queue_setup(phba); | ||
4009 | if (unlikely(rc)) { | ||
4010 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4011 | "0381 Error %d during queue setup.\n ", rc); | ||
4012 | goto out_stop_timers; | ||
4013 | } | ||
4014 | |||
4015 | /* Arm the CQs and then EQs on device */ | ||
4016 | lpfc_sli4_arm_cqeq_intr(phba); | ||
4017 | |||
4018 | /* Indicate device interrupt mode */ | ||
4019 | phba->sli4_hba.intr_enable = 1; | ||
4020 | |||
4021 | /* Allow asynchronous mailbox command to go through */ | ||
4022 | spin_lock_irq(&phba->hbalock); | ||
4023 | phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; | ||
4024 | spin_unlock_irq(&phba->hbalock); | ||
4025 | |||
4026 | /* Post receive buffers to the device */ | ||
4027 | lpfc_sli4_rb_setup(phba); | ||
4028 | |||
4029 | /* Start the ELS watchdog timer */ | ||
4030 | /* | ||
4031 | * The driver for SLI4 is not yet ready to process timeouts | ||
4032 | * or interrupts. Once it is, the comment bars can be removed. | ||
4033 | */ | ||
4034 | /* mod_timer(&vport->els_tmofunc, | ||
4035 | * jiffies + HZ * (phba->fc_ratov*2)); */ | ||
4036 | |||
4037 | /* Start heart beat timer */ | ||
4038 | mod_timer(&phba->hb_tmofunc, | ||
4039 | jiffies + HZ * LPFC_HB_MBOX_INTERVAL); | ||
4040 | phba->hb_outstanding = 0; | ||
4041 | phba->last_completion_time = jiffies; | ||
4042 | |||
4043 | /* Start error attention (ERATT) polling timer */ | ||
4044 | mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); | ||
4045 | |||
4046 | /* | ||
4047 | * The port is ready, set the host's link state to LINK_DOWN | ||
4048 | * in preparation for link interrupts. | ||
4049 | */ | ||
4050 | lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); | ||
4051 | mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; | ||
4052 | lpfc_set_loopback_flag(phba); | ||
4053 | /* Change driver state to LPFC_LINK_DOWN right before init link */ | ||
4054 | spin_lock_irq(&phba->hbalock); | ||
4055 | phba->link_state = LPFC_LINK_DOWN; | ||
4056 | spin_unlock_irq(&phba->hbalock); | ||
4057 | rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); | ||
4058 | if (unlikely(rc != MBX_NOT_FINISHED)) { | ||
4059 | kfree(vpd); | ||
4060 | return 0; | ||
4061 | } else | ||
4062 | rc = -EIO; | ||
4063 | |||
4064 | /* Unset all the queues set up in this routine when error out */ | ||
4065 | if (rc) | ||
4066 | lpfc_sli4_queue_unset(phba); | ||
4067 | |||
4068 | out_stop_timers: | ||
4069 | if (rc) | ||
4070 | lpfc_stop_hba_timers(phba); | ||
4071 | out_free_vpd: | ||
4072 | kfree(vpd); | ||
4073 | out_free_mbox: | ||
4074 | mempool_free(mboxq, phba->mbox_mem_pool); | ||
4075 | return rc; | ||
4076 | } | ||
3356 | 4077 | ||
3357 | /** | 4078 | /** |
3358 | * lpfc_mbox_timeout - Timeout call back function for mbox timer | 4079 | * lpfc_mbox_timeout - Timeout call back function for mbox timer |
@@ -3812,13 +4533,420 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, | |||
3812 | 4533 | ||
3813 | out_not_finished: | 4534 | out_not_finished: |
3814 | if (processing_queue) { | 4535 | if (processing_queue) { |
3815 | pmbox->mb.mbxStatus = MBX_NOT_FINISHED; | 4536 | pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; |
3816 | lpfc_mbox_cmpl_put(phba, pmbox); | 4537 | lpfc_mbox_cmpl_put(phba, pmbox); |
3817 | } | 4538 | } |
3818 | return MBX_NOT_FINISHED; | 4539 | return MBX_NOT_FINISHED; |
3819 | } | 4540 | } |
3820 | 4541 | ||
3821 | /** | 4542 | /** |
4543 | * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox | ||
4544 | * @phba: Pointer to HBA context object. | ||
4545 | * @mboxq: Pointer to mailbox object. | ||
4546 | * | ||
4547 | * The function posts a mailbox to the port. The mailbox is expected | ||
4548 | * to be comletely filled in and ready for the port to operate on it. | ||
4549 | * This routine executes a synchronous completion operation on the | ||
4550 | * mailbox by polling for its completion. | ||
4551 | * | ||
4552 | * The caller must not be holding any locks when calling this routine. | ||
4553 | * | ||
4554 | * Returns: | ||
4555 | * MBX_SUCCESS - mailbox posted successfully | ||
4556 | * Any of the MBX error values. | ||
4557 | **/ | ||
4558 | static int | ||
4559 | lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | ||
4560 | { | ||
4561 | int rc = MBX_SUCCESS; | ||
4562 | unsigned long iflag; | ||
4563 | uint32_t db_ready; | ||
4564 | uint32_t mcqe_status; | ||
4565 | uint32_t mbx_cmnd; | ||
4566 | unsigned long timeout; | ||
4567 | struct lpfc_sli *psli = &phba->sli; | ||
4568 | struct lpfc_mqe *mb = &mboxq->u.mqe; | ||
4569 | struct lpfc_bmbx_create *mbox_rgn; | ||
4570 | struct dma_address *dma_address; | ||
4571 | struct lpfc_register bmbx_reg; | ||
4572 | |||
4573 | /* | ||
4574 | * Only one mailbox can be active to the bootstrap mailbox region | ||
4575 | * at a time and there is no queueing provided. | ||
4576 | */ | ||
4577 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
4578 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | ||
4579 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
4580 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4581 | "(%d):2532 Mailbox command x%x (x%x) " | ||
4582 | "cannot issue Data: x%x x%x\n", | ||
4583 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4584 | mboxq->u.mb.mbxCommand, | ||
4585 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4586 | psli->sli_flag, MBX_POLL); | ||
4587 | return MBXERR_ERROR; | ||
4588 | } | ||
4589 | /* The server grabs the token and owns it until release */ | ||
4590 | psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; | ||
4591 | phba->sli.mbox_active = mboxq; | ||
4592 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
4593 | |||
4594 | /* | ||
4595 | * Initialize the bootstrap memory region to avoid stale data areas | ||
4596 | * in the mailbox post. Then copy the caller's mailbox contents to | ||
4597 | * the bmbx mailbox region. | ||
4598 | */ | ||
4599 | mbx_cmnd = bf_get(lpfc_mqe_command, mb); | ||
4600 | memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); | ||
4601 | lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, | ||
4602 | sizeof(struct lpfc_mqe)); | ||
4603 | |||
4604 | /* Post the high mailbox dma address to the port and wait for ready. */ | ||
4605 | dma_address = &phba->sli4_hba.bmbx.dma_address; | ||
4606 | writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); | ||
4607 | |||
4608 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) | ||
4609 | * 1000) + jiffies; | ||
4610 | do { | ||
4611 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); | ||
4612 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); | ||
4613 | if (!db_ready) | ||
4614 | msleep(2); | ||
4615 | |||
4616 | if (time_after(jiffies, timeout)) { | ||
4617 | rc = MBXERR_ERROR; | ||
4618 | goto exit; | ||
4619 | } | ||
4620 | } while (!db_ready); | ||
4621 | |||
4622 | /* Post the low mailbox dma address to the port. */ | ||
4623 | writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); | ||
4624 | timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) | ||
4625 | * 1000) + jiffies; | ||
4626 | do { | ||
4627 | bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); | ||
4628 | db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); | ||
4629 | if (!db_ready) | ||
4630 | msleep(2); | ||
4631 | |||
4632 | if (time_after(jiffies, timeout)) { | ||
4633 | rc = MBXERR_ERROR; | ||
4634 | goto exit; | ||
4635 | } | ||
4636 | } while (!db_ready); | ||
4637 | |||
4638 | /* | ||
4639 | * Read the CQ to ensure the mailbox has completed. | ||
4640 | * If so, update the mailbox status so that the upper layers | ||
4641 | * can complete the request normally. | ||
4642 | */ | ||
4643 | lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, | ||
4644 | sizeof(struct lpfc_mqe)); | ||
4645 | mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; | ||
4646 | lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, | ||
4647 | sizeof(struct lpfc_mcqe)); | ||
4648 | mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); | ||
4649 | |||
4650 | /* Prefix the mailbox status with range x4000 to note SLI4 status. */ | ||
4651 | if (mcqe_status != MB_CQE_STATUS_SUCCESS) { | ||
4652 | bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); | ||
4653 | rc = MBXERR_ERROR; | ||
4654 | } | ||
4655 | |||
4656 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4657 | "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " | ||
4658 | "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" | ||
4659 | " x%x x%x CQ: x%x x%x x%x x%x\n", | ||
4660 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4661 | mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4662 | bf_get(lpfc_mqe_status, mb), | ||
4663 | mb->un.mb_words[0], mb->un.mb_words[1], | ||
4664 | mb->un.mb_words[2], mb->un.mb_words[3], | ||
4665 | mb->un.mb_words[4], mb->un.mb_words[5], | ||
4666 | mb->un.mb_words[6], mb->un.mb_words[7], | ||
4667 | mb->un.mb_words[8], mb->un.mb_words[9], | ||
4668 | mb->un.mb_words[10], mb->un.mb_words[11], | ||
4669 | mb->un.mb_words[12], mboxq->mcqe.word0, | ||
4670 | mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, | ||
4671 | mboxq->mcqe.trailer); | ||
4672 | exit: | ||
4673 | /* We are holding the token, no needed for lock when release */ | ||
4674 | spin_lock_irqsave(&phba->hbalock, iflag); | ||
4675 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
4676 | phba->sli.mbox_active = NULL; | ||
4677 | spin_unlock_irqrestore(&phba->hbalock, iflag); | ||
4678 | return rc; | ||
4679 | } | ||
4680 | |||
4681 | /** | ||
4682 | * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware | ||
4683 | * @phba: Pointer to HBA context object. | ||
4684 | * @pmbox: Pointer to mailbox object. | ||
4685 | * @flag: Flag indicating how the mailbox need to be processed. | ||
4686 | * | ||
4687 | * This function is called by discovery code and HBA management code to submit | ||
4688 | * a mailbox command to firmware with SLI-4 interface spec. | ||
4689 | * | ||
4690 | * Return codes the caller owns the mailbox command after the return of the | ||
4691 | * function. | ||
4692 | **/ | ||
4693 | static int | ||
4694 | lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, | ||
4695 | uint32_t flag) | ||
4696 | { | ||
4697 | struct lpfc_sli *psli = &phba->sli; | ||
4698 | unsigned long iflags; | ||
4699 | int rc; | ||
4700 | |||
4701 | /* Detect polling mode and jump to a handler */ | ||
4702 | if (!phba->sli4_hba.intr_enable) { | ||
4703 | if (flag == MBX_POLL) | ||
4704 | rc = lpfc_sli4_post_sync_mbox(phba, mboxq); | ||
4705 | else | ||
4706 | rc = -EIO; | ||
4707 | if (rc != MBX_SUCCESS) | ||
4708 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4709 | "(%d):2541 Mailbox command x%x " | ||
4710 | "(x%x) cannot issue Data: x%x x%x\n", | ||
4711 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4712 | mboxq->u.mb.mbxCommand, | ||
4713 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4714 | psli->sli_flag, flag); | ||
4715 | return rc; | ||
4716 | } else if (flag == MBX_POLL) { | ||
4717 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4718 | "(%d):2542 Mailbox command x%x (x%x) " | ||
4719 | "cannot issue Data: x%x x%x\n", | ||
4720 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4721 | mboxq->u.mb.mbxCommand, | ||
4722 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4723 | psli->sli_flag, flag); | ||
4724 | return -EIO; | ||
4725 | } | ||
4726 | |||
4727 | /* Now, interrupt mode asynchrous mailbox command */ | ||
4728 | rc = lpfc_mbox_cmd_check(phba, mboxq); | ||
4729 | if (rc) { | ||
4730 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4731 | "(%d):2543 Mailbox command x%x (x%x) " | ||
4732 | "cannot issue Data: x%x x%x\n", | ||
4733 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4734 | mboxq->u.mb.mbxCommand, | ||
4735 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4736 | psli->sli_flag, flag); | ||
4737 | goto out_not_finished; | ||
4738 | } | ||
4739 | rc = lpfc_mbox_dev_check(phba); | ||
4740 | if (unlikely(rc)) { | ||
4741 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4742 | "(%d):2544 Mailbox command x%x (x%x) " | ||
4743 | "cannot issue Data: x%x x%x\n", | ||
4744 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4745 | mboxq->u.mb.mbxCommand, | ||
4746 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4747 | psli->sli_flag, flag); | ||
4748 | goto out_not_finished; | ||
4749 | } | ||
4750 | |||
4751 | /* Put the mailbox command to the driver internal FIFO */ | ||
4752 | psli->slistat.mbox_busy++; | ||
4753 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
4754 | lpfc_mbox_put(phba, mboxq); | ||
4755 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4756 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4757 | "(%d):0354 Mbox cmd issue - Enqueue Data: " | ||
4758 | "x%x (x%x) x%x x%x x%x\n", | ||
4759 | mboxq->vport ? mboxq->vport->vpi : 0xffffff, | ||
4760 | bf_get(lpfc_mqe_command, &mboxq->u.mqe), | ||
4761 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4762 | phba->pport->port_state, | ||
4763 | psli->sli_flag, MBX_NOWAIT); | ||
4764 | /* Wake up worker thread to transport mailbox command from head */ | ||
4765 | lpfc_worker_wake_up(phba); | ||
4766 | |||
4767 | return MBX_BUSY; | ||
4768 | |||
4769 | out_not_finished: | ||
4770 | return MBX_NOT_FINISHED; | ||
4771 | } | ||
4772 | |||
4773 | /** | ||
4774 | * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device | ||
4775 | * @phba: Pointer to HBA context object. | ||
4776 | * | ||
4777 | * This function is called by worker thread to send a mailbox command to | ||
4778 | * SLI4 HBA firmware. | ||
4779 | * | ||
4780 | **/ | ||
4781 | int | ||
4782 | lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) | ||
4783 | { | ||
4784 | struct lpfc_sli *psli = &phba->sli; | ||
4785 | LPFC_MBOXQ_t *mboxq; | ||
4786 | int rc = MBX_SUCCESS; | ||
4787 | unsigned long iflags; | ||
4788 | struct lpfc_mqe *mqe; | ||
4789 | uint32_t mbx_cmnd; | ||
4790 | |||
4791 | /* Check interrupt mode before post async mailbox command */ | ||
4792 | if (unlikely(!phba->sli4_hba.intr_enable)) | ||
4793 | return MBX_NOT_FINISHED; | ||
4794 | |||
4795 | /* Check for mailbox command service token */ | ||
4796 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
4797 | if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { | ||
4798 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4799 | return MBX_NOT_FINISHED; | ||
4800 | } | ||
4801 | if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { | ||
4802 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4803 | return MBX_NOT_FINISHED; | ||
4804 | } | ||
4805 | if (unlikely(phba->sli.mbox_active)) { | ||
4806 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4807 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4808 | "0384 There is pending active mailbox cmd\n"); | ||
4809 | return MBX_NOT_FINISHED; | ||
4810 | } | ||
4811 | /* Take the mailbox command service token */ | ||
4812 | psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; | ||
4813 | |||
4814 | /* Get the next mailbox command from head of queue */ | ||
4815 | mboxq = lpfc_mbox_get(phba); | ||
4816 | |||
4817 | /* If no more mailbox command waiting for post, we're done */ | ||
4818 | if (!mboxq) { | ||
4819 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
4820 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4821 | return MBX_SUCCESS; | ||
4822 | } | ||
4823 | phba->sli.mbox_active = mboxq; | ||
4824 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4825 | |||
4826 | /* Check device readiness for posting mailbox command */ | ||
4827 | rc = lpfc_mbox_dev_check(phba); | ||
4828 | if (unlikely(rc)) | ||
4829 | /* Driver clean routine will clean up pending mailbox */ | ||
4830 | goto out_not_finished; | ||
4831 | |||
4832 | /* Prepare the mbox command to be posted */ | ||
4833 | mqe = &mboxq->u.mqe; | ||
4834 | mbx_cmnd = bf_get(lpfc_mqe_command, mqe); | ||
4835 | |||
4836 | /* Start timer for the mbox_tmo and log some mailbox post messages */ | ||
4837 | mod_timer(&psli->mbox_tmo, (jiffies + | ||
4838 | (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); | ||
4839 | |||
4840 | lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, | ||
4841 | "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " | ||
4842 | "x%x x%x\n", | ||
4843 | mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, | ||
4844 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4845 | phba->pport->port_state, psli->sli_flag); | ||
4846 | |||
4847 | if (mbx_cmnd != MBX_HEARTBEAT) { | ||
4848 | if (mboxq->vport) { | ||
4849 | lpfc_debugfs_disc_trc(mboxq->vport, | ||
4850 | LPFC_DISC_TRC_MBOX_VPORT, | ||
4851 | "MBOX Send vport: cmd:x%x mb:x%x x%x", | ||
4852 | mbx_cmnd, mqe->un.mb_words[0], | ||
4853 | mqe->un.mb_words[1]); | ||
4854 | } else { | ||
4855 | lpfc_debugfs_disc_trc(phba->pport, | ||
4856 | LPFC_DISC_TRC_MBOX, | ||
4857 | "MBOX Send: cmd:x%x mb:x%x x%x", | ||
4858 | mbx_cmnd, mqe->un.mb_words[0], | ||
4859 | mqe->un.mb_words[1]); | ||
4860 | } | ||
4861 | } | ||
4862 | psli->slistat.mbox_cmd++; | ||
4863 | |||
4864 | /* Post the mailbox command to the port */ | ||
4865 | rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); | ||
4866 | if (rc != MBX_SUCCESS) { | ||
4867 | lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, | ||
4868 | "(%d):2533 Mailbox command x%x (x%x) " | ||
4869 | "cannot issue Data: x%x x%x\n", | ||
4870 | mboxq->vport ? mboxq->vport->vpi : 0, | ||
4871 | mboxq->u.mb.mbxCommand, | ||
4872 | lpfc_sli4_mbox_opcode_get(phba, mboxq), | ||
4873 | psli->sli_flag, MBX_NOWAIT); | ||
4874 | goto out_not_finished; | ||
4875 | } | ||
4876 | |||
4877 | return rc; | ||
4878 | |||
4879 | out_not_finished: | ||
4880 | spin_lock_irqsave(&phba->hbalock, iflags); | ||
4881 | mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; | ||
4882 | __lpfc_mbox_cmpl_put(phba, mboxq); | ||
4883 | /* Release the token */ | ||
4884 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | ||
4885 | phba->sli.mbox_active = NULL; | ||
4886 | spin_unlock_irqrestore(&phba->hbalock, iflags); | ||
4887 | |||
4888 | return MBX_NOT_FINISHED; | ||
4889 | } | ||
4890 | |||
4891 | /** | ||
4892 | * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command | ||
4893 | * @phba: Pointer to HBA context object. | ||
4894 | * @pmbox: Pointer to mailbox object. | ||
4895 | * @flag: Flag indicating how the mailbox need to be processed. | ||
4896 | * | ||
4897 | * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from | ||
4898 | * the API jump table function pointer from the lpfc_hba struct. | ||
4899 | * | ||
4900 | * Return codes the caller owns the mailbox command after the return of the | ||
4901 | * function. | ||
4902 | **/ | ||
4903 | int | ||
4904 | lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) | ||
4905 | { | ||
4906 | return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); | ||
4907 | } | ||
4908 | |||
4909 | /** | ||
4910 | * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table | ||
4911 | * @phba: The hba struct for which this call is being executed. | ||
4912 | * @dev_grp: The HBA PCI-Device group number. | ||
4913 | * | ||
4914 | * This routine sets up the mbox interface API function jump table in @phba | ||
4915 | * struct. | ||
4916 | * Returns: 0 - success, -ENODEV - failure. | ||
4917 | **/ | ||
4918 | int | ||
4919 | lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) | ||
4920 | { | ||
4921 | |||
4922 | switch (dev_grp) { | ||
4923 | case LPFC_PCI_DEV_LP: | ||
4924 | phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; | ||
4925 | phba->lpfc_sli_handle_slow_ring_event = | ||
4926 | lpfc_sli_handle_slow_ring_event_s3; | ||
4927 | phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; | ||
4928 | phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; | ||
4929 | phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; | ||
4930 | break; | ||
4931 | case LPFC_PCI_DEV_OC: | ||
4932 | phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; | ||
4933 | phba->lpfc_sli_handle_slow_ring_event = | ||
4934 | lpfc_sli_handle_slow_ring_event_s4; | ||
4935 | phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; | ||
4936 | phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; | ||
4937 | phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; | ||
4938 | break; | ||
4939 | default: | ||
4940 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
4941 | "1420 Invalid HBA PCI-device group: 0x%x\n", | ||
4942 | dev_grp); | ||
4943 | return -ENODEV; | ||
4944 | break; | ||
4945 | } | ||
4946 | return 0; | ||
4947 | } | ||
4948 | |||
4949 | /** | ||
3822 | * __lpfc_sli_ringtx_put - Add an iocb to the txq | 4950 | * __lpfc_sli_ringtx_put - Add an iocb to the txq |
3823 | * @phba: Pointer to HBA context object. | 4951 | * @phba: Pointer to HBA context object. |
3824 | * @pring: Pointer to driver SLI ring object. | 4952 | * @pring: Pointer to driver SLI ring object. |
@@ -4501,28 +5629,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) | |||
4501 | 5629 | ||
4502 | /* Return any active mbox cmds */ | 5630 | /* Return any active mbox cmds */ |
4503 | del_timer_sync(&psli->mbox_tmo); | 5631 | del_timer_sync(&psli->mbox_tmo); |
4504 | spin_lock_irqsave(&phba->hbalock, flags); | ||
4505 | 5632 | ||
4506 | spin_lock(&phba->pport->work_port_lock); | 5633 | spin_lock_irqsave(&phba->pport->work_port_lock, flags); |
4507 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; | 5634 | phba->pport->work_port_events &= ~WORKER_MBOX_TMO; |
4508 | spin_unlock(&phba->pport->work_port_lock); | 5635 | spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); |
4509 | 5636 | ||
4510 | /* Return any pending or completed mbox cmds */ | 5637 | return 1; |
4511 | list_splice_init(&phba->sli.mboxq, &completions); | 5638 | } |
4512 | if (psli->mbox_active) { | 5639 | |
4513 | list_add_tail(&psli->mbox_active->list, &completions); | 5640 | /** |
4514 | psli->mbox_active = NULL; | 5641 | * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA |
4515 | psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; | 5642 | * @phba: Pointer to HBA context object. |
4516 | } | 5643 | * |
4517 | list_splice_init(&phba->sli.mboxq_cmpl, &completions); | 5644 | * This function cleans up all queues, iocb, buffers, mailbox commands while |
4518 | spin_unlock_irqrestore(&phba->hbalock, flags); | 5645 | * shutting down the SLI4 HBA FCoE function. This function is called with no |
5646 | * lock held and always returns 1. | ||
5647 | * | ||
5648 | * This function does the following to cleanup driver FCoE function resources: | ||
5649 | * - Free discovery resources for each virtual port | ||
5650 | * - Cleanup any pending fabric iocbs | ||
5651 | * - Iterate through the iocb txq and free each entry in the list. | ||
5652 | * - Free up any buffer posted to the HBA. | ||
5653 | * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. | ||
5654 | * - Free mailbox commands in the mailbox queue. | ||
5655 | **/ | ||
5656 | int | ||
5657 | lpfc_sli4_hba_down(struct lpfc_hba *phba) | ||
5658 | { | ||
5659 | /* Stop the SLI4 device port */ | ||
5660 | lpfc_stop_port(phba); | ||
5661 | |||
5662 | /* Tear down the queues in the HBA */ | ||
5663 | lpfc_sli4_queue_unset(phba); | ||
5664 | |||
5665 | /* unregister default FCFI from the HBA */ | ||
5666 | lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); | ||
4519 | 5667 | ||
4520 | while (!list_empty(&completions)) { | ||
4521 | list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); | ||
4522 | pmb->mb.mbxStatus = MBX_NOT_FINISHED; | ||
4523 | if (pmb->mbox_cmpl) | ||
4524 | pmb->mbox_cmpl(phba,pmb); | ||
4525 | } | ||
4526 | return 1; | 5668 | return 1; |
4527 | } | 5669 | } |
4528 | 5670 | ||
@@ -4853,7 +5995,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4853 | iabt = &abtsiocbp->iocb; | 5995 | iabt = &abtsiocbp->iocb; |
4854 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; | 5996 | iabt->un.acxri.abortType = ABORT_TYPE_ABTS; |
4855 | iabt->un.acxri.abortContextTag = icmd->ulpContext; | 5997 | iabt->un.acxri.abortContextTag = icmd->ulpContext; |
4856 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; | 5998 | if (phba->sli_rev == LPFC_SLI_REV4) |
5999 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; | ||
6000 | else | ||
6001 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; | ||
4857 | iabt->ulpLe = 1; | 6002 | iabt->ulpLe = 1; |
4858 | iabt->ulpClass = icmd->ulpClass; | 6003 | iabt->ulpClass = icmd->ulpClass; |
4859 | 6004 | ||
@@ -4869,7 +6014,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
4869 | "abort cmd iotag x%x\n", | 6014 | "abort cmd iotag x%x\n", |
4870 | iabt->un.acxri.abortContextTag, | 6015 | iabt->un.acxri.abortContextTag, |
4871 | iabt->un.acxri.abortIoTag, abtsiocbp->iotag); | 6016 | iabt->un.acxri.abortIoTag, abtsiocbp->iotag); |
4872 | retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); | 6017 | retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); |
4873 | 6018 | ||
4874 | if (retval) | 6019 | if (retval) |
4875 | __lpfc_sli_release_iocbq(phba, abtsiocbp); | 6020 | __lpfc_sli_release_iocbq(phba, abtsiocbp); |
@@ -5052,7 +6197,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
5052 | cmd = &iocbq->iocb; | 6197 | cmd = &iocbq->iocb; |
5053 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; | 6198 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; |
5054 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; | 6199 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; |
5055 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; | 6200 | if (phba->sli_rev == LPFC_SLI_REV4) |
6201 | abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; | ||
6202 | else | ||
6203 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; | ||
5056 | abtsiocb->iocb.ulpLe = 1; | 6204 | abtsiocb->iocb.ulpLe = 1; |
5057 | abtsiocb->iocb.ulpClass = cmd->ulpClass; | 6205 | abtsiocb->iocb.ulpClass = cmd->ulpClass; |
5058 | abtsiocb->vport = phba->pport; | 6206 | abtsiocb->vport = phba->pport; |
@@ -5064,7 +6212,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
5064 | 6212 | ||
5065 | /* Setup callback routine and issue the command. */ | 6213 | /* Setup callback routine and issue the command. */ |
5066 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; | 6214 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; |
5067 | ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); | 6215 | ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, |
6216 | abtsiocb, 0); | ||
5068 | if (ret_val == IOCB_ERROR) { | 6217 | if (ret_val == IOCB_ERROR) { |
5069 | lpfc_sli_release_iocbq(phba, abtsiocb); | 6218 | lpfc_sli_release_iocbq(phba, abtsiocb); |
5070 | errcnt++; | 6219 | errcnt++; |
@@ -5145,7 +6294,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, | |||
5145 | **/ | 6294 | **/ |
5146 | int | 6295 | int |
5147 | lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | 6296 | lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, |
5148 | struct lpfc_sli_ring *pring, | 6297 | uint32_t ring_number, |
5149 | struct lpfc_iocbq *piocb, | 6298 | struct lpfc_iocbq *piocb, |
5150 | struct lpfc_iocbq *prspiocbq, | 6299 | struct lpfc_iocbq *prspiocbq, |
5151 | uint32_t timeout) | 6300 | uint32_t timeout) |
@@ -5176,7 +6325,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, | |||
5176 | readl(phba->HCregaddr); /* flush */ | 6325 | readl(phba->HCregaddr); /* flush */ |
5177 | } | 6326 | } |
5178 | 6327 | ||
5179 | retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); | 6328 | retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); |
5180 | if (retval == IOCB_SUCCESS) { | 6329 | if (retval == IOCB_SUCCESS) { |
5181 | timeout_req = timeout * HZ; | 6330 | timeout_req = timeout * HZ; |
5182 | timeleft = wait_event_timeout(done_q, | 6331 | timeleft = wait_event_timeout(done_q, |
@@ -5385,6 +6534,58 @@ lpfc_sli_eratt_read(struct lpfc_hba *phba) | |||
5385 | } | 6534 | } |
5386 | 6535 | ||
5387 | /** | 6536 | /** |
6537 | * lpfc_sli4_eratt_read - read sli-4 error attention events | ||
6538 | * @phba: Pointer to HBA context. | ||
6539 | * | ||
6540 | * This function is called to read the SLI4 device error attention registers | ||
6541 | * for possible error attention events. The caller must hold the hostlock | ||
6542 | * with spin_lock_irq(). | ||
6543 | * | ||
6544 | * This fucntion returns 1 when there is Error Attention in the Host Attention | ||
6545 | * Register and returns 0 otherwise. | ||
6546 | **/ | ||
6547 | static int | ||
6548 | lpfc_sli4_eratt_read(struct lpfc_hba *phba) | ||
6549 | { | ||
6550 | uint32_t uerr_sta_hi, uerr_sta_lo; | ||
6551 | uint32_t onlnreg0, onlnreg1; | ||
6552 | |||
6553 | /* For now, use the SLI4 device internal unrecoverable error | ||
6554 | * registers for error attention. This can be changed later. | ||
6555 | */ | ||
6556 | onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); | ||
6557 | onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); | ||
6558 | if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { | ||
6559 | uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); | ||
6560 | uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); | ||
6561 | if (uerr_sta_lo || uerr_sta_hi) { | ||
6562 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
6563 | "1423 HBA Unrecoverable error: " | ||
6564 | "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " | ||
6565 | "online0_reg=0x%x, online1_reg=0x%x\n", | ||
6566 | uerr_sta_lo, uerr_sta_hi, | ||
6567 | onlnreg0, onlnreg1); | ||
6568 | /* TEMP: as the driver error recover logic is not | ||
6569 | * fully developed, we just log the error message | ||
6570 | * and the device error attention action is now | ||
6571 | * temporarily disabled. | ||
6572 | */ | ||
6573 | return 0; | ||
6574 | phba->work_status[0] = uerr_sta_lo; | ||
6575 | phba->work_status[1] = uerr_sta_hi; | ||
6576 | spin_lock_irq(&phba->hbalock); | ||
6577 | /* Set the driver HA work bitmap */ | ||
6578 | phba->work_ha |= HA_ERATT; | ||
6579 | /* Indicate polling handles this ERATT */ | ||
6580 | phba->hba_flag |= HBA_ERATT_HANDLED; | ||
6581 | spin_unlock_irq(&phba->hbalock); | ||
6582 | return 1; | ||
6583 | } | ||
6584 | } | ||
6585 | return 0; | ||
6586 | } | ||
6587 | |||
6588 | /** | ||
5388 | * lpfc_sli_check_eratt - check error attention events | 6589 | * lpfc_sli_check_eratt - check error attention events |
5389 | * @phba: Pointer to HBA context. | 6590 | * @phba: Pointer to HBA context. |
5390 | * | 6591 | * |
@@ -5434,6 +6635,10 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) | |||
5434 | /* Read chip Host Attention (HA) register */ | 6635 | /* Read chip Host Attention (HA) register */ |
5435 | ha_copy = lpfc_sli_eratt_read(phba); | 6636 | ha_copy = lpfc_sli_eratt_read(phba); |
5436 | break; | 6637 | break; |
6638 | case LPFC_SLI_REV4: | ||
6639 | /* Read devcie Uncoverable Error (UERR) registers */ | ||
6640 | ha_copy = lpfc_sli4_eratt_read(phba); | ||
6641 | break; | ||
5437 | default: | 6642 | default: |
5438 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 6643 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
5439 | "0299 Invalid SLI revision (%d)\n", | 6644 | "0299 Invalid SLI revision (%d)\n", |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 883938652a6a..e6c88ee8ee96 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd { | |||
29 | LPFC_CTX_HOST | 29 | LPFC_CTX_HOST |
30 | } lpfc_ctx_cmd; | 30 | } lpfc_ctx_cmd; |
31 | 31 | ||
32 | /* This structure is used to carry the needed response IOCB states */ | ||
33 | struct lpfc_sli4_rspiocb_info { | ||
34 | uint8_t hw_status; | ||
35 | uint8_t bfield; | ||
36 | #define LPFC_XB 0x1 | ||
37 | #define LPFC_PV 0x2 | ||
38 | uint8_t priority; | ||
39 | uint8_t reserved; | ||
40 | }; | ||
41 | |||
32 | /* This structure is used to handle IOCB requests / responses */ | 42 | /* This structure is used to handle IOCB requests / responses */ |
33 | struct lpfc_iocbq { | 43 | struct lpfc_iocbq { |
34 | /* lpfc_iocbqs are used in double linked lists */ | 44 | /* lpfc_iocbqs are used in double linked lists */ |
35 | struct list_head list; | 45 | struct list_head list; |
36 | struct list_head clist; | 46 | struct list_head clist; |
37 | uint16_t iotag; /* pre-assigned IO tag */ | 47 | uint16_t iotag; /* pre-assigned IO tag */ |
38 | uint16_t rsvd1; | 48 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ |
39 | 49 | ||
40 | IOCB_t iocb; /* IOCB cmd */ | 50 | IOCB_t iocb; /* IOCB cmd */ |
41 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ | 51 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ |
@@ -65,7 +75,7 @@ struct lpfc_iocbq { | |||
65 | struct lpfc_iocbq *); | 75 | struct lpfc_iocbq *); |
66 | void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, | 76 | void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, |
67 | struct lpfc_iocbq *); | 77 | struct lpfc_iocbq *); |
68 | 78 | struct lpfc_sli4_rspiocb_info sli4_info; | |
69 | }; | 79 | }; |
70 | 80 | ||
71 | #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ | 81 | #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ |
@@ -81,14 +91,18 @@ struct lpfc_iocbq { | |||
81 | typedef struct lpfcMboxq { | 91 | typedef struct lpfcMboxq { |
82 | /* MBOXQs are used in single linked lists */ | 92 | /* MBOXQs are used in single linked lists */ |
83 | struct list_head list; /* ptr to next mailbox command */ | 93 | struct list_head list; /* ptr to next mailbox command */ |
84 | MAILBOX_t mb; /* Mailbox cmd */ | 94 | union { |
85 | struct lpfc_vport *vport;/* virutal port pointer */ | 95 | MAILBOX_t mb; /* Mailbox cmd */ |
96 | struct lpfc_mqe mqe; | ||
97 | } u; | ||
98 | struct lpfc_vport *vport;/* virtual port pointer */ | ||
86 | void *context1; /* caller context information */ | 99 | void *context1; /* caller context information */ |
87 | void *context2; /* caller context information */ | 100 | void *context2; /* caller context information */ |
88 | 101 | ||
89 | void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); | 102 | void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); |
90 | uint8_t mbox_flag; | 103 | uint8_t mbox_flag; |
91 | 104 | struct lpfc_mcqe mcqe; | |
105 | struct lpfc_mbx_nembed_sge_virt *sge_array; | ||
92 | } LPFC_MBOXQ_t; | 106 | } LPFC_MBOXQ_t; |
93 | 107 | ||
94 | #define MBX_POLL 1 /* poll mailbox till command done, then | 108 | #define MBX_POLL 1 /* poll mailbox till command done, then |
@@ -234,6 +248,7 @@ struct lpfc_sli { | |||
234 | #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ | 248 | #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ |
235 | #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ | 249 | #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ |
236 | #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ | 250 | #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ |
251 | #define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ | ||
237 | 252 | ||
238 | struct lpfc_sli_ring ring[LPFC_MAX_RING]; | 253 | struct lpfc_sli_ring ring[LPFC_MAX_RING]; |
239 | int fcp_ring; /* ring used for FCP initiator commands */ | 254 | int fcp_ring; /* ring used for FCP initiator commands */ |
@@ -261,6 +276,8 @@ struct lpfc_sli { | |||
261 | 276 | ||
262 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox | 277 | #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox |
263 | command */ | 278 | command */ |
279 | #define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox | ||
280 | command */ | ||
264 | #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write | 281 | #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write |
265 | * or erase cmds. This is especially | 282 | * or erase cmds. This is especially |
266 | * long because of the potential of | 283 | * long because of the potential of |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h new file mode 100644 index 000000000000..5196b46608d7 --- /dev/null +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -0,0 +1,467 @@ | |||
1 | /******************************************************************* | ||
2 | * This file is part of the Emulex Linux Device Driver for * | ||
3 | * Fibre Channel Host Bus Adapters. * | ||
4 | * Copyright (C) 2009 Emulex. All rights reserved. * | ||
5 | * EMULEX and SLI are trademarks of Emulex. * | ||
6 | * www.emulex.com * | ||
7 | * * | ||
8 | * This program is free software; you can redistribute it and/or * | ||
9 | * modify it under the terms of version 2 of the GNU General * | ||
10 | * Public License as published by the Free Software Foundation. * | ||
11 | * This program is distributed in the hope that it will be useful. * | ||
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | ||
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | ||
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | ||
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | ||
17 | * more details, a copy of which can be found in the file COPYING * | ||
18 | * included with this package. * | ||
19 | *******************************************************************/ | ||
20 | |||
21 | #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 | ||
22 | #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 | ||
23 | #define LPFC_GET_QE_REL_INT 32 | ||
24 | #define LPFC_RPI_LOW_WATER_MARK 10 | ||
25 | /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ | ||
26 | #define LPFC_NEMBED_MBOX_SGL_CNT 254 | ||
27 | |||
28 | /* Multi-queue arrangement for fast-path FCP work queues */ | ||
29 | #define LPFC_FN_EQN_MAX 8 | ||
30 | #define LPFC_SP_EQN_DEF 1 | ||
31 | #define LPFC_FP_EQN_DEF 1 | ||
32 | #define LPFC_FP_EQN_MIN 1 | ||
33 | #define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) | ||
34 | |||
35 | #define LPFC_FN_WQN_MAX 32 | ||
36 | #define LPFC_SP_WQN_DEF 1 | ||
37 | #define LPFC_FP_WQN_DEF 4 | ||
38 | #define LPFC_FP_WQN_MIN 1 | ||
39 | #define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) | ||
40 | |||
41 | /* | ||
42 | * Provide the default FCF Record attributes used by the driver | ||
43 | * when nonFIP mode is configured and there is no other default | ||
44 | * FCF Record attributes. | ||
45 | */ | ||
46 | #define LPFC_FCOE_FCF_DEF_INDEX 0 | ||
47 | #define LPFC_FCOE_FCF_GET_FIRST 0xFFFF | ||
48 | #define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF | ||
49 | |||
50 | /* First 3 bytes of default FCF MAC is specified by FC_MAP */ | ||
51 | #define LPFC_FCOE_FCF_MAC3 0xFF | ||
52 | #define LPFC_FCOE_FCF_MAC4 0xFF | ||
53 | #define LPFC_FCOE_FCF_MAC5 0xFE | ||
54 | #define LPFC_FCOE_FCF_MAP0 0x0E | ||
55 | #define LPFC_FCOE_FCF_MAP1 0xFC | ||
56 | #define LPFC_FCOE_FCF_MAP2 0x00 | ||
57 | #define LPFC_FCOE_MAX_RCV_SIZE 0x5AC | ||
58 | #define LPFC_FCOE_FKA_ADV_PER 0 | ||
59 | #define LPFC_FCOE_FIP_PRIORITY 0x80 | ||
60 | |||
61 | enum lpfc_sli4_queue_type { | ||
62 | LPFC_EQ, | ||
63 | LPFC_GCQ, | ||
64 | LPFC_MCQ, | ||
65 | LPFC_WCQ, | ||
66 | LPFC_RCQ, | ||
67 | LPFC_MQ, | ||
68 | LPFC_WQ, | ||
69 | LPFC_HRQ, | ||
70 | LPFC_DRQ | ||
71 | }; | ||
72 | |||
73 | /* The queue sub-type defines the functional purpose of the queue */ | ||
74 | enum lpfc_sli4_queue_subtype { | ||
75 | LPFC_NONE, | ||
76 | LPFC_MBOX, | ||
77 | LPFC_FCP, | ||
78 | LPFC_ELS, | ||
79 | LPFC_USOL | ||
80 | }; | ||
81 | |||
82 | union sli4_qe { | ||
83 | void *address; | ||
84 | struct lpfc_eqe *eqe; | ||
85 | struct lpfc_cqe *cqe; | ||
86 | struct lpfc_mcqe *mcqe; | ||
87 | struct lpfc_wcqe_complete *wcqe_complete; | ||
88 | struct lpfc_wcqe_release *wcqe_release; | ||
89 | struct sli4_wcqe_xri_aborted *wcqe_xri_aborted; | ||
90 | struct lpfc_rcqe_complete *rcqe_complete; | ||
91 | struct lpfc_mqe *mqe; | ||
92 | union lpfc_wqe *wqe; | ||
93 | struct lpfc_rqe *rqe; | ||
94 | }; | ||
95 | |||
96 | struct lpfc_queue { | ||
97 | struct list_head list; | ||
98 | enum lpfc_sli4_queue_type type; | ||
99 | enum lpfc_sli4_queue_subtype subtype; | ||
100 | struct lpfc_hba *phba; | ||
101 | struct list_head child_list; | ||
102 | uint32_t entry_count; /* Number of entries to support on the queue */ | ||
103 | uint32_t entry_size; /* Size of each queue entry. */ | ||
104 | uint32_t queue_id; /* Queue ID assigned by the hardware */ | ||
105 | struct list_head page_list; | ||
106 | uint32_t page_count; /* Number of pages allocated for this queue */ | ||
107 | |||
108 | uint32_t host_index; /* The host's index for putting or getting */ | ||
109 | uint32_t hba_index; /* The last known hba index for get or put */ | ||
110 | union sli4_qe qe[1]; /* array to index entries (must be last) */ | ||
111 | }; | ||
112 | |||
113 | struct lpfc_cq_event { | ||
114 | struct list_head list; | ||
115 | union { | ||
116 | struct lpfc_mcqe mcqe_cmpl; | ||
117 | struct lpfc_acqe_link acqe_link; | ||
118 | struct lpfc_acqe_fcoe acqe_fcoe; | ||
119 | struct lpfc_acqe_dcbx acqe_dcbx; | ||
120 | struct lpfc_rcqe rcqe_cmpl; | ||
121 | struct sli4_wcqe_xri_aborted wcqe_axri; | ||
122 | } cqe; | ||
123 | }; | ||
124 | |||
125 | struct lpfc_sli4_link { | ||
126 | uint8_t speed; | ||
127 | uint8_t duplex; | ||
128 | uint8_t status; | ||
129 | uint8_t physical; | ||
130 | uint8_t fault; | ||
131 | }; | ||
132 | |||
133 | struct lpfc_fcf { | ||
134 | uint8_t fabric_name[8]; | ||
135 | uint8_t mac_addr[6]; | ||
136 | uint16_t fcf_indx; | ||
137 | uint16_t fcfi; | ||
138 | uint32_t fcf_flag; | ||
139 | #define FCF_AVAILABLE 0x01 /* FCF available for discovery */ | ||
140 | #define FCF_REGISTERED 0x02 /* FCF registered with FW */ | ||
141 | #define FCF_DISCOVERED 0x04 /* FCF discovery started */ | ||
142 | #define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ | ||
143 | #define FCF_IN_USE 0x10 /* Atleast one discovery completed */ | ||
144 | #define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ | ||
145 | uint32_t priority; | ||
146 | uint32_t addr_mode; | ||
147 | uint16_t vlan_id; | ||
148 | }; | ||
149 | |||
150 | #define LPFC_REGION23_SIGNATURE "RG23" | ||
151 | #define LPFC_REGION23_VERSION 1 | ||
152 | #define LPFC_REGION23_LAST_REC 0xff | ||
153 | struct lpfc_fip_param_hdr { | ||
154 | uint8_t type; | ||
155 | #define FCOE_PARAM_TYPE 0xA0 | ||
156 | uint8_t length; | ||
157 | #define FCOE_PARAM_LENGTH 2 | ||
158 | uint8_t parm_version; | ||
159 | #define FIPP_VERSION 0x01 | ||
160 | uint8_t parm_flags; | ||
161 | #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 | ||
162 | #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 | ||
163 | #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags | ||
164 | #define FIPP_MODE_ON 0x2 | ||
165 | #define FIPP_MODE_OFF 0x0 | ||
166 | #define FIPP_VLAN_VALID 0x1 | ||
167 | }; | ||
168 | |||
169 | struct lpfc_fcoe_params { | ||
170 | uint8_t fc_map[3]; | ||
171 | uint8_t reserved1; | ||
172 | uint16_t vlan_tag; | ||
173 | uint8_t reserved[2]; | ||
174 | }; | ||
175 | |||
176 | struct lpfc_fcf_conn_hdr { | ||
177 | uint8_t type; | ||
178 | #define FCOE_CONN_TBL_TYPE 0xA1 | ||
179 | uint8_t length; /* words */ | ||
180 | uint8_t reserved[2]; | ||
181 | }; | ||
182 | |||
183 | struct lpfc_fcf_conn_rec { | ||
184 | uint16_t flags; | ||
185 | #define FCFCNCT_VALID 0x0001 | ||
186 | #define FCFCNCT_BOOT 0x0002 | ||
187 | #define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ | ||
188 | #define FCFCNCT_FBNM_VALID 0x0008 | ||
189 | #define FCFCNCT_SWNM_VALID 0x0010 | ||
190 | #define FCFCNCT_VLAN_VALID 0x0020 | ||
191 | #define FCFCNCT_AM_VALID 0x0040 | ||
192 | #define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ | ||
193 | #define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ | ||
194 | |||
195 | uint16_t vlan_tag; | ||
196 | uint8_t fabric_name[8]; | ||
197 | uint8_t switch_name[8]; | ||
198 | }; | ||
199 | |||
200 | struct lpfc_fcf_conn_entry { | ||
201 | struct list_head list; | ||
202 | struct lpfc_fcf_conn_rec conn_rec; | ||
203 | }; | ||
204 | |||
205 | /* | ||
206 | * Define the host's bootstrap mailbox. This structure contains | ||
207 | * the member attributes needed to create, use, and destroy the | ||
208 | * bootstrap mailbox region. | ||
209 | * | ||
210 | * The macro definitions for the bmbx data structure are defined | ||
211 | * in lpfc_hw4.h with the register definition. | ||
212 | */ | ||
213 | struct lpfc_bmbx { | ||
214 | struct lpfc_dmabuf *dmabuf; | ||
215 | struct dma_address dma_address; | ||
216 | void *avirt; | ||
217 | dma_addr_t aphys; | ||
218 | uint32_t bmbx_size; | ||
219 | }; | ||
220 | |||
221 | #define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 | ||
222 | |||
223 | #define LPFC_EQE_SIZE_4B 4 | ||
224 | #define LPFC_EQE_SIZE_16B 16 | ||
225 | #define LPFC_CQE_SIZE 16 | ||
226 | #define LPFC_WQE_SIZE 64 | ||
227 | #define LPFC_MQE_SIZE 256 | ||
228 | #define LPFC_RQE_SIZE 8 | ||
229 | |||
230 | #define LPFC_EQE_DEF_COUNT 1024 | ||
231 | #define LPFC_CQE_DEF_COUNT 256 | ||
232 | #define LPFC_WQE_DEF_COUNT 64 | ||
233 | #define LPFC_MQE_DEF_COUNT 16 | ||
234 | #define LPFC_RQE_DEF_COUNT 512 | ||
235 | |||
236 | #define LPFC_QUEUE_NOARM false | ||
237 | #define LPFC_QUEUE_REARM true | ||
238 | |||
239 | |||
240 | /* | ||
241 | * SLI4 CT field defines | ||
242 | */ | ||
243 | #define SLI4_CT_RPI 0 | ||
244 | #define SLI4_CT_VPI 1 | ||
245 | #define SLI4_CT_VFI 2 | ||
246 | #define SLI4_CT_FCFI 3 | ||
247 | |||
248 | #define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 | ||
249 | |||
250 | /* | ||
251 | * SLI4 specific data structures | ||
252 | */ | ||
253 | struct lpfc_max_cfg_param { | ||
254 | uint16_t max_xri; | ||
255 | uint16_t xri_base; | ||
256 | uint16_t xri_used; | ||
257 | uint16_t max_rpi; | ||
258 | uint16_t rpi_base; | ||
259 | uint16_t rpi_used; | ||
260 | uint16_t max_vpi; | ||
261 | uint16_t vpi_base; | ||
262 | uint16_t vpi_used; | ||
263 | uint16_t max_vfi; | ||
264 | uint16_t vfi_base; | ||
265 | uint16_t vfi_used; | ||
266 | uint16_t max_fcfi; | ||
267 | uint16_t fcfi_base; | ||
268 | uint16_t fcfi_used; | ||
269 | uint16_t max_eq; | ||
270 | uint16_t max_rq; | ||
271 | uint16_t max_cq; | ||
272 | uint16_t max_wq; | ||
273 | }; | ||
274 | |||
275 | struct lpfc_hba; | ||
276 | /* SLI4 HBA multi-fcp queue handler struct */ | ||
277 | struct lpfc_fcp_eq_hdl { | ||
278 | uint32_t idx; | ||
279 | struct lpfc_hba *phba; | ||
280 | }; | ||
281 | |||
282 | /* SLI4 HBA data structure entries */ | ||
283 | struct lpfc_sli4_hba { | ||
284 | void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for | ||
285 | PCI BAR0, config space registers */ | ||
286 | void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for | ||
287 | PCI BAR1, control registers */ | ||
288 | void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for | ||
289 | PCI BAR2, doorbell registers */ | ||
290 | /* BAR0 PCI config space register memory map */ | ||
291 | void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ | ||
292 | void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ | ||
293 | void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ | ||
294 | void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ | ||
295 | #define LPFC_ONLINE_NERR 0xFFFFFFFF | ||
296 | void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ | ||
297 | /* BAR1 FCoE function CSR register memory map */ | ||
298 | void __iomem *STAregaddr; /* Address to HST_STATE register */ | ||
299 | void __iomem *ISRregaddr; /* Address to HST_ISR register */ | ||
300 | void __iomem *IMRregaddr; /* Address to HST_IMR register */ | ||
301 | void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ | ||
302 | /* BAR2 VF-0 doorbell register memory map */ | ||
303 | void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ | ||
304 | void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ | ||
305 | void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ | ||
306 | void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ | ||
307 | void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ | ||
308 | |||
309 | struct msix_entry *msix_entries; | ||
310 | uint32_t cfg_eqn; | ||
311 | struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ | ||
312 | /* Pointers to the constructed SLI4 queues */ | ||
313 | struct lpfc_queue **fp_eq; /* Fast-path event queue */ | ||
314 | struct lpfc_queue *sp_eq; /* Slow-path event queue */ | ||
315 | struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ | ||
316 | struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ | ||
317 | struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ | ||
318 | struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ | ||
319 | struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ | ||
320 | struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ | ||
321 | struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ | ||
322 | struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ | ||
323 | struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ | ||
324 | |||
325 | /* Setup information for various queue parameters */ | ||
326 | int eq_esize; | ||
327 | int eq_ecount; | ||
328 | int cq_esize; | ||
329 | int cq_ecount; | ||
330 | int wq_esize; | ||
331 | int wq_ecount; | ||
332 | int mq_esize; | ||
333 | int mq_ecount; | ||
334 | int rq_esize; | ||
335 | int rq_ecount; | ||
336 | #define LPFC_SP_EQ_MAX_INTR_SEC 10000 | ||
337 | #define LPFC_FP_EQ_MAX_INTR_SEC 10000 | ||
338 | |||
339 | uint32_t intr_enable; | ||
340 | struct lpfc_bmbx bmbx; | ||
341 | struct lpfc_max_cfg_param max_cfg_param; | ||
342 | uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ | ||
343 | uint16_t next_rpi; | ||
344 | uint16_t scsi_xri_max; | ||
345 | uint16_t scsi_xri_cnt; | ||
346 | struct list_head lpfc_free_sgl_list; | ||
347 | struct list_head lpfc_sgl_list; | ||
348 | struct lpfc_sglq **lpfc_els_sgl_array; | ||
349 | struct list_head lpfc_abts_els_sgl_list; | ||
350 | struct lpfc_scsi_buf **lpfc_scsi_psb_array; | ||
351 | struct list_head lpfc_abts_scsi_buf_list; | ||
352 | uint32_t total_sglq_bufs; | ||
353 | struct lpfc_sglq **lpfc_sglq_active_list; | ||
354 | struct list_head lpfc_rpi_hdr_list; | ||
355 | unsigned long *rpi_bmask; | ||
356 | uint16_t rpi_count; | ||
357 | struct lpfc_sli4_flags sli4_flags; | ||
358 | struct list_head sp_rspiocb_work_queue; | ||
359 | struct list_head sp_cqe_event_pool; | ||
360 | struct list_head sp_asynce_work_queue; | ||
361 | struct list_head sp_fcp_xri_aborted_work_queue; | ||
362 | struct list_head sp_els_xri_aborted_work_queue; | ||
363 | struct list_head sp_unsol_work_queue; | ||
364 | struct lpfc_sli4_link link_state; | ||
365 | spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ | ||
366 | spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ | ||
367 | }; | ||
368 | |||
369 | enum lpfc_sge_type { | ||
370 | GEN_BUFF_TYPE, | ||
371 | SCSI_BUFF_TYPE | ||
372 | }; | ||
373 | |||
374 | struct lpfc_sglq { | ||
375 | /* lpfc_sglqs are used in double linked lists */ | ||
376 | struct list_head list; | ||
377 | struct list_head clist; | ||
378 | enum lpfc_sge_type buff_type; /* is this a scsi sgl */ | ||
379 | uint16_t iotag; /* pre-assigned IO tag */ | ||
380 | uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ | ||
381 | struct sli4_sge *sgl; /* pre-assigned SGL */ | ||
382 | void *virt; /* virtual address. */ | ||
383 | dma_addr_t phys; /* physical address */ | ||
384 | }; | ||
385 | |||
386 | struct lpfc_rpi_hdr { | ||
387 | struct list_head list; | ||
388 | uint32_t len; | ||
389 | struct lpfc_dmabuf *dmabuf; | ||
390 | uint32_t page_count; | ||
391 | uint32_t start_rpi; | ||
392 | }; | ||
393 | |||
394 | /* | ||
395 | * SLI4 specific function prototypes | ||
396 | */ | ||
397 | int lpfc_pci_function_reset(struct lpfc_hba *); | ||
398 | int lpfc_sli4_hba_setup(struct lpfc_hba *); | ||
399 | int lpfc_sli4_hba_down(struct lpfc_hba *); | ||
400 | int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, | ||
401 | uint8_t, uint32_t, bool); | ||
402 | void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); | ||
403 | void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); | ||
404 | void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, | ||
405 | struct lpfc_mbx_sge *); | ||
406 | |||
407 | void lpfc_sli4_hba_reset(struct lpfc_hba *); | ||
408 | struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, | ||
409 | uint32_t); | ||
410 | void lpfc_sli4_queue_free(struct lpfc_queue *); | ||
411 | uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); | ||
412 | uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, | ||
413 | struct lpfc_queue *, uint32_t, uint32_t); | ||
414 | uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, | ||
415 | struct lpfc_queue *, uint32_t); | ||
416 | uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, | ||
417 | struct lpfc_queue *, uint32_t); | ||
418 | uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, | ||
419 | struct lpfc_queue *, struct lpfc_queue *, uint32_t); | ||
420 | uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); | ||
421 | uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); | ||
422 | uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); | ||
423 | uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); | ||
424 | uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, | ||
425 | struct lpfc_queue *); | ||
426 | int lpfc_sli4_queue_setup(struct lpfc_hba *); | ||
427 | void lpfc_sli4_queue_unset(struct lpfc_hba *); | ||
428 | int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); | ||
429 | int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); | ||
430 | int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); | ||
431 | uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); | ||
432 | int lpfc_sli4_post_async_mbox(struct lpfc_hba *); | ||
433 | int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); | ||
434 | int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); | ||
435 | struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); | ||
436 | struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); | ||
437 | void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); | ||
438 | void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); | ||
439 | int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); | ||
440 | int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); | ||
441 | int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); | ||
442 | struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); | ||
443 | void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); | ||
444 | int lpfc_sli4_alloc_rpi(struct lpfc_hba *); | ||
445 | void lpfc_sli4_free_rpi(struct lpfc_hba *, int); | ||
446 | void lpfc_sli4_remove_rpis(struct lpfc_hba *); | ||
447 | void lpfc_sli4_async_event_proc(struct lpfc_hba *); | ||
448 | int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); | ||
449 | void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); | ||
450 | void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); | ||
451 | void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, | ||
452 | struct sli4_wcqe_xri_aborted *); | ||
453 | void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, | ||
454 | struct sli4_wcqe_xri_aborted *); | ||
455 | int lpfc_sli4_brdreset(struct lpfc_hba *); | ||
456 | int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); | ||
457 | void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); | ||
458 | int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); | ||
459 | int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); | ||
460 | uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); | ||
461 | uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); | ||
462 | void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); | ||
463 | int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); | ||
464 | void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); | ||
465 | int lpfc_sli4_post_status_check(struct lpfc_hba *); | ||
466 | uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); | ||
467 | |||
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 917ad56b0aff..59e67f7ee531 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -32,8 +32,10 @@ | |||
32 | #include <scsi/scsi_device.h> | 32 | #include <scsi/scsi_device.h> |
33 | #include <scsi/scsi_host.h> | 33 | #include <scsi/scsi_host.h> |
34 | #include <scsi/scsi_transport_fc.h> | 34 | #include <scsi/scsi_transport_fc.h> |
35 | #include "lpfc_hw4.h" | ||
35 | #include "lpfc_hw.h" | 36 | #include "lpfc_hw.h" |
36 | #include "lpfc_sli.h" | 37 | #include "lpfc_sli.h" |
38 | #include "lpfc_sli4.h" | ||
37 | #include "lpfc_nl.h" | 39 | #include "lpfc_nl.h" |
38 | #include "lpfc_disc.h" | 40 | #include "lpfc_disc.h" |
39 | #include "lpfc_scsi.h" | 41 | #include "lpfc_scsi.h" |
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) | |||
89 | vpi = 0; | 91 | vpi = 0; |
90 | else | 92 | else |
91 | set_bit(vpi, phba->vpi_bmask); | 93 | set_bit(vpi, phba->vpi_bmask); |
94 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
95 | phba->sli4_hba.max_cfg_param.vpi_used++; | ||
92 | spin_unlock_irq(&phba->hbalock); | 96 | spin_unlock_irq(&phba->hbalock); |
93 | return vpi; | 97 | return vpi; |
94 | } | 98 | } |
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) | |||
96 | static void | 100 | static void |
97 | lpfc_free_vpi(struct lpfc_hba *phba, int vpi) | 101 | lpfc_free_vpi(struct lpfc_hba *phba, int vpi) |
98 | { | 102 | { |
103 | if (vpi == 0) | ||
104 | return; | ||
99 | spin_lock_irq(&phba->hbalock); | 105 | spin_lock_irq(&phba->hbalock); |
100 | clear_bit(vpi, phba->vpi_bmask); | 106 | clear_bit(vpi, phba->vpi_bmask); |
107 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
108 | phba->sli4_hba.max_cfg_param.vpi_used--; | ||
101 | spin_unlock_irq(&phba->hbalock); | 109 | spin_unlock_irq(&phba->hbalock); |
102 | } | 110 | } |
103 | 111 | ||
@@ -308,6 +316,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) | |||
308 | goto error_out; | 316 | goto error_out; |
309 | } | 317 | } |
310 | 318 | ||
319 | /* | ||
320 | * In SLI4, the vpi must be activated before it can be used | ||
321 | * by the port. | ||
322 | */ | ||
323 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
324 | rc = lpfc_sli4_init_vpi(phba, vpi); | ||
325 | if (rc) { | ||
326 | lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, | ||
327 | "1838 Failed to INIT_VPI on vpi %d " | ||
328 | "status %d\n", vpi, rc); | ||
329 | rc = VPORT_NORESOURCES; | ||
330 | lpfc_free_vpi(phba, vpi); | ||
331 | goto error_out; | ||
332 | } | ||
333 | } | ||
311 | 334 | ||
312 | /* Assign an unused board number */ | 335 | /* Assign an unused board number */ |
313 | if ((instance = lpfc_get_instance()) < 0) { | 336 | if ((instance = lpfc_get_instance()) < 0) { |