aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-10-03 23:22:52 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-03 23:22:52 -0400
commitb462d22bf3a675dffbfe1cd7ad90eab633d822b7 (patch)
treee5a5e271c1020d6b07c29bdd35475c4d901c39fb
parentb9118b7221ebb12156d2b08d4d5647bc6076d6bb (diff)
parentabd49676c70793ee0a251bc3d8fe1604f9303210 (diff)
Merge branch 'qed-qedr-infrastructure'
Yuval Mintz says: ==================== qed*: Add qedr infrastructure support In the last couple of weeks we've been sending RFCs for the qedr driver - the RoCE driver for QLogic FastLinQ 4xxxx line of adapters. Latest RFC can be found at [1]. At Doug's advice [2], we've decided to split the series into two: - first part contains the qed backbone that's necessary for all the configurations relating to the qedr driver, as well as the qede infrastructure that is used for communication between the qedr and qede. - Second part consists of the actual qedr driver and introduces almost no changes to qed/qede. This is the first of said two parts. The second half would be sent later this week. The only 'oddity' in the devision are the Kconfig options - As this series introduces both LL2 and QEDR-based logic in qed/qede, I wanted to add the CONFIG_INFINIBAND_QEDR option here [with default n]. Otherwise, a lot of the code introduced would be dead-code [won't even be compiled] until qedr is accepted. As a result I've placed the config option in an odd place - under qlogic's Kconfig. The second series would then remove that option and add it in its correct place under the infiniband Kconfig. [I'm fine with pushing it there to begin with, but I didn't want to 'contaminate' non-qlogic configuration files with half-baked options]. Dave - I don't think you were E-mailed with Doug's suggestion. I think the notion was to have the two halves accepted side-by-side, but actually the first has no dependency issues, so it's also possible to simply take this first to net-next, and push the qedr into rdma once it's merged. But it's basically up to you and Doug; We'd align with whatever suits you best. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig14
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c274
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1792
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h316
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2954
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h216
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c35
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_roce.c314
-rw-r--r--include/linux/qed/common_hsi.h1
-rw-r--r--include/linux/qed/qed_if.h9
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
28 files changed, 6944 insertions, 22 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6ba48406899e..0df1391f9663 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -88,6 +88,9 @@ config QED
88 ---help--- 88 ---help---
89 This enables the support for ... 89 This enables the support for ...
90 90
91config QED_LL2
92 bool
93
91config QED_SRIOV 94config QED_SRIOV
92 bool "QLogic QED 25/40/100Gb SR-IOV support" 95 bool "QLogic QED 25/40/100Gb SR-IOV support"
93 depends on QED && PCI_IOV 96 depends on QED && PCI_IOV
@@ -104,4 +107,15 @@ config QEDE
104 ---help--- 107 ---help---
105 This enables the support for ... 108 This enables the support for ...
106 109
110config INFINIBAND_QEDR
111 tristate "QLogic qede RoCE sources [debug]"
112 depends on QEDE && 64BIT
113 select QED_LL2
114 default n
115 ---help---
116 This provides a temporary node that allows the compilation
117 and logical testing of the InfiniBand over Ethernet support
118 for QLogic QED. This would be replaced by the 'real' option
119 once the QEDR driver is added [+relocated].
120
107endif # NET_VENDOR_QLOGIC 121endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 86a5b4f5f870..cda0af7fbc20 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -4,3 +4,5 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
4 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ 4 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
5 qed_selftest.o qed_dcbx.o qed_debug.o 5 qed_selftest.o qed_dcbx.o qed_debug.o
6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o 6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
7qed-$(CONFIG_QED_LL2) += qed_ll2.o
8qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 0929582fc82b..653bb5735f0c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -35,6 +35,9 @@ extern const struct qed_common_ops qed_common_ops_pass;
35 35
36#define QED_WFQ_UNIT 100 36#define QED_WFQ_UNIT 100
37 37
38#define QED_WID_SIZE (1024)
39#define QED_PF_DEMS_SIZE (4)
40
38/* cau states */ 41/* cau states */
39enum qed_coalescing_mode { 42enum qed_coalescing_mode {
40 QED_COAL_MODE_DISABLE, 43 QED_COAL_MODE_DISABLE,
@@ -50,6 +53,14 @@ enum qed_mcp_protocol_type;
50static inline u32 qed_db_addr(u32 cid, u32 DEMS) 53static inline u32 qed_db_addr(u32 cid, u32 DEMS)
51{ 54{
52 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | 55 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
56 (cid * QED_PF_DEMS_SIZE);
57
58 return db_addr;
59}
60
61static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
62{
63 u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
53 FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); 64 FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
54 65
55 return db_addr; 66 return db_addr;
@@ -72,6 +83,7 @@ struct qed_sb_info;
72struct qed_sb_attn_info; 83struct qed_sb_attn_info;
73struct qed_cxt_mngr; 84struct qed_cxt_mngr;
74struct qed_sb_sp_info; 85struct qed_sb_sp_info;
86struct qed_ll2_info;
75struct qed_mcp_info; 87struct qed_mcp_info;
76 88
77struct qed_rt_data { 89struct qed_rt_data {
@@ -151,13 +163,17 @@ enum QED_RESOURCES {
151 QED_RL, 163 QED_RL,
152 QED_MAC, 164 QED_MAC,
153 QED_VLAN, 165 QED_VLAN,
166 QED_RDMA_CNQ_RAM,
154 QED_ILT, 167 QED_ILT,
168 QED_LL2_QUEUE,
169 QED_RDMA_STATS_QUEUE,
155 QED_MAX_RESC, 170 QED_MAX_RESC,
156}; 171};
157 172
158enum QED_FEATURE { 173enum QED_FEATURE {
159 QED_PF_L2_QUE, 174 QED_PF_L2_QUE,
160 QED_VF, 175 QED_VF,
176 QED_RDMA_CNQ,
161 QED_MAX_FEATURES, 177 QED_MAX_FEATURES,
162}; 178};
163 179
@@ -360,6 +376,9 @@ struct qed_hwfn {
360 struct qed_sb_attn_info *p_sb_attn; 376 struct qed_sb_attn_info *p_sb_attn;
361 377
362 /* Protocol related */ 378 /* Protocol related */
379 bool using_ll2;
380 struct qed_ll2_info *p_ll2_info;
381 struct qed_rdma_info *p_rdma_info;
363 struct qed_pf_params pf_params; 382 struct qed_pf_params pf_params;
364 383
365 bool b_rdma_enabled_in_prs; 384 bool b_rdma_enabled_in_prs;
@@ -398,6 +417,17 @@ struct qed_hwfn {
398 417
399 struct dbg_tools_data dbg_info; 418 struct dbg_tools_data dbg_info;
400 419
420 /* PWM region specific data */
421 u32 dpi_size;
422 u32 dpi_count;
423
424 /* This is used to calculate the doorbell address */
425 u32 dpi_start_offset;
426
427 /* If one of the following is set then EDPM shouldn't be used */
428 u8 dcbx_no_edpm;
429 u8 db_bar_no_edpm;
430
401 struct qed_simd_fp_handler simd_proto_handler[64]; 431 struct qed_simd_fp_handler simd_proto_handler[64];
402 432
403#ifdef CONFIG_QED_SRIOV 433#ifdef CONFIG_QED_SRIOV
@@ -407,6 +437,7 @@ struct qed_hwfn {
407#endif 437#endif
408 438
409 struct z_stream_s *stream; 439 struct z_stream_s *stream;
440 struct qed_roce_ll2_info *ll2;
410}; 441};
411 442
412struct pci_params { 443struct pci_params {
@@ -431,6 +462,8 @@ struct qed_int_params {
431 bool fp_initialized; 462 bool fp_initialized;
432 u8 fp_msix_base; 463 u8 fp_msix_base;
433 u8 fp_msix_cnt; 464 u8 fp_msix_cnt;
465 u8 rdma_msix_base;
466 u8 rdma_msix_cnt;
434}; 467};
435 468
436struct qed_dbg_feature { 469struct qed_dbg_feature {
@@ -537,7 +570,6 @@ struct qed_dev {
537 570
538 bool b_is_vf; 571 bool b_is_vf;
539 u32 drv_type; 572 u32 drv_type;
540
541 struct qed_eth_stats *reset_stats; 573 struct qed_eth_stats *reset_stats;
542 struct qed_fw_data *fw_data; 574 struct qed_fw_data *fw_data;
543 575
@@ -564,7 +596,16 @@ struct qed_dev {
564 596
565 struct qed_dbg_params dbg_params; 597 struct qed_dbg_params dbg_params;
566 598
599#ifdef CONFIG_QED_LL2
600 struct qed_cb_ll2_info *ll2;
601 u8 ll2_mac_address[ETH_ALEN];
602#endif
603
567 const struct firmware *firmware; 604 const struct firmware *firmware;
605
606 u32 rdma_max_sge;
607 u32 rdma_max_inline;
608 u32 rdma_max_srq_sge;
568}; 609};
569 610
570#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB 611#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index dd579b2ef224..82370a1a59ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -48,7 +48,13 @@
48#define TM_ELEM_SIZE 4 48#define TM_ELEM_SIZE 4
49 49
50/* ILT constants */ 50/* ILT constants */
51#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
52/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
53#define ILT_DEFAULT_HW_P_SIZE 4
54#else
51#define ILT_DEFAULT_HW_P_SIZE 3 55#define ILT_DEFAULT_HW_P_SIZE 3
56#endif
57
52#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 58#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
53#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 59#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
54 60
@@ -1839,6 +1845,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
1839 /* Set the number of required CORE connections */ 1845 /* Set the number of required CORE connections */
1840 u32 core_cids = 1; /* SPQ */ 1846 u32 core_cids = 1; /* SPQ */
1841 1847
1848 if (p_hwfn->using_ll2)
1849 core_cids += 4;
1842 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); 1850 qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
1843 1851
1844 switch (p_hwfn->hw_info.personality) { 1852 switch (p_hwfn->hw_info.personality) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c6f6f2e8192d..2b8bdaa77800 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -170,6 +170,13 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
170 */ 170 */
171void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, 171void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
172 u32 cid); 172 u32 cid);
173int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
174 enum qed_cxt_elem_type elem_type, u32 iid);
175u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
176 enum protocol_type type);
177u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
178 enum protocol_type type);
179int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
173 180
174#define QED_CTX_WORKING_MEM 0 181#define QED_CTX_WORKING_MEM 0
175#define QED_CTX_FL_MEM 1 182#define QED_CTX_FL_MEM 1
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 13d8b4075b01..754f6a908858 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -29,14 +29,19 @@
29#include "qed_hw.h" 29#include "qed_hw.h"
30#include "qed_init_ops.h" 30#include "qed_init_ops.h"
31#include "qed_int.h" 31#include "qed_int.h"
32#include "qed_ll2.h"
32#include "qed_mcp.h" 33#include "qed_mcp.h"
33#include "qed_reg_addr.h" 34#include "qed_reg_addr.h"
34#include "qed_sp.h" 35#include "qed_sp.h"
35#include "qed_sriov.h" 36#include "qed_sriov.h"
36#include "qed_vf.h" 37#include "qed_vf.h"
38#include "qed_roce.h"
37 39
38static DEFINE_SPINLOCK(qm_lock); 40static DEFINE_SPINLOCK(qm_lock);
39 41
42#define QED_MIN_DPIS (4)
43#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
44
40/* API common to all protocols */ 45/* API common to all protocols */
41enum BAR_ID { 46enum BAR_ID {
42 BAR_ID_0, /* used for GRC */ 47 BAR_ID_0, /* used for GRC */
@@ -147,6 +152,9 @@ void qed_resc_free(struct qed_dev *cdev)
147 qed_eq_free(p_hwfn, p_hwfn->p_eq); 152 qed_eq_free(p_hwfn, p_hwfn->p_eq);
148 qed_consq_free(p_hwfn, p_hwfn->p_consq); 153 qed_consq_free(p_hwfn, p_hwfn->p_consq);
149 qed_int_free(p_hwfn); 154 qed_int_free(p_hwfn);
155#ifdef CONFIG_QED_LL2
156 qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
157#endif
150 qed_iov_free(p_hwfn); 158 qed_iov_free(p_hwfn);
151 qed_dmae_info_free(p_hwfn); 159 qed_dmae_info_free(p_hwfn);
152 qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); 160 qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -403,6 +411,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
403 411
404int qed_resc_alloc(struct qed_dev *cdev) 412int qed_resc_alloc(struct qed_dev *cdev)
405{ 413{
414#ifdef CONFIG_QED_LL2
415 struct qed_ll2_info *p_ll2_info;
416#endif
406 struct qed_consq *p_consq; 417 struct qed_consq *p_consq;
407 struct qed_eq *p_eq; 418 struct qed_eq *p_eq;
408 int i, rc = 0; 419 int i, rc = 0;
@@ -513,6 +524,15 @@ int qed_resc_alloc(struct qed_dev *cdev)
513 goto alloc_no_mem; 524 goto alloc_no_mem;
514 p_hwfn->p_consq = p_consq; 525 p_hwfn->p_consq = p_consq;
515 526
527#ifdef CONFIG_QED_LL2
528 if (p_hwfn->using_ll2) {
529 p_ll2_info = qed_ll2_alloc(p_hwfn);
530 if (!p_ll2_info)
531 goto alloc_no_mem;
532 p_hwfn->p_ll2_info = p_ll2_info;
533 }
534#endif
535
516 /* DMA info initialization */ 536 /* DMA info initialization */
517 rc = qed_dmae_info_alloc(p_hwfn); 537 rc = qed_dmae_info_alloc(p_hwfn);
518 if (rc) 538 if (rc)
@@ -561,6 +581,10 @@ void qed_resc_setup(struct qed_dev *cdev)
561 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); 581 qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
562 582
563 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); 583 qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
584#ifdef CONFIG_QED_LL2
585 if (p_hwfn->using_ll2)
586 qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
587#endif
564 } 588 }
565} 589}
566 590
@@ -767,6 +791,136 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
767 return rc; 791 return rc;
768} 792}
769 793
794static int
795qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
796 struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
797{
798 u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
799 u32 dpi_bit_shift, dpi_count;
800 u32 min_dpis;
801
802 /* Calculate DPI size */
803 dpi_page_size_1 = QED_WID_SIZE * n_cpus;
804 dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
805 dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
806 dpi_page_size = roundup_pow_of_two(dpi_page_size);
807 dpi_bit_shift = ilog2(dpi_page_size / 4096);
808
809 dpi_count = pwm_region_size / dpi_page_size;
810
811 min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
812 min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
813
814 p_hwfn->dpi_size = dpi_page_size;
815 p_hwfn->dpi_count = dpi_count;
816
817 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
818
819 if (dpi_count < min_dpis)
820 return -EINVAL;
821
822 return 0;
823}
824
825enum QED_ROCE_EDPM_MODE {
826 QED_ROCE_EDPM_MODE_ENABLE = 0,
827 QED_ROCE_EDPM_MODE_FORCE_ON = 1,
828 QED_ROCE_EDPM_MODE_DISABLE = 2,
829};
830
831static int
832qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
833{
834 u32 pwm_regsize, norm_regsize;
835 u32 non_pwm_conn, min_addr_reg1;
836 u32 db_bar_size, n_cpus;
837 u32 roce_edpm_mode;
838 u32 pf_dems_shift;
839 int rc = 0;
840 u8 cond;
841
842 db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
843 if (p_hwfn->cdev->num_hwfns > 1)
844 db_bar_size /= 2;
845
846 /* Calculate doorbell regions */
847 non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
848 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
849 NULL) +
850 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
851 NULL);
852 norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
853 min_addr_reg1 = norm_regsize / 4096;
854 pwm_regsize = db_bar_size - norm_regsize;
855
856 /* Check that the normal and PWM sizes are valid */
857 if (db_bar_size < norm_regsize) {
858 DP_ERR(p_hwfn->cdev,
859 "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
860 db_bar_size, norm_regsize);
861 return -EINVAL;
862 }
863
864 if (pwm_regsize < QED_MIN_PWM_REGION) {
865 DP_ERR(p_hwfn->cdev,
866 "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
867 pwm_regsize,
868 QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
869 return -EINVAL;
870 }
871
872 /* Calculate number of DPIs */
873 roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
874 if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
875 ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
876 /* Either EDPM is mandatory, or we are attempting to allocate a
877 * WID per CPU.
878 */
879 n_cpus = num_active_cpus();
880 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
881 }
882
883 cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
884 (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
885 if (cond || p_hwfn->dcbx_no_edpm) {
886 /* Either EDPM is disabled from user configuration, or it is
887 * disabled via DCBx, or it is not mandatory and we failed to
888 * allocated a WID per CPU.
889 */
890 n_cpus = 1;
891 rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
892
893 if (cond)
894 qed_rdma_dpm_bar(p_hwfn, p_ptt);
895 }
896
897 DP_INFO(p_hwfn,
898 "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
899 norm_regsize,
900 pwm_regsize,
901 p_hwfn->dpi_size,
902 p_hwfn->dpi_count,
903 ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
904 "disabled" : "enabled");
905
906 if (rc) {
907 DP_ERR(p_hwfn,
908 "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
909 p_hwfn->dpi_count,
910 p_hwfn->pf_params.rdma_pf_params.min_dpis);
911 return -EINVAL;
912 }
913
914 p_hwfn->dpi_start_offset = norm_regsize;
915
916 /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
917 pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
918 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
919 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
920
921 return 0;
922}
923
770static int qed_hw_init_port(struct qed_hwfn *p_hwfn, 924static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
771 struct qed_ptt *p_ptt, int hw_mode) 925 struct qed_ptt *p_ptt, int hw_mode)
772{ 926{
@@ -840,6 +994,10 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
840 /* Pure runtime initializations - directly to the HW */ 994 /* Pure runtime initializations - directly to the HW */
841 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); 995 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
842 996
997 rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
998 if (rc)
999 return rc;
1000
843 if (b_hw_start) { 1001 if (b_hw_start) {
844 /* enable interrupts */ 1002 /* enable interrupts */
845 qed_int_igu_enable(p_hwfn, p_ptt, int_mode); 1003 qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1264,6 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1264 u32 *feat_num = p_hwfn->hw_info.feat_num; 1422 u32 *feat_num = p_hwfn->hw_info.feat_num;
1265 int num_features = 1; 1423 int num_features = 1;
1266 1424
1425#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
1426 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the
1427 * status blocks equally between L2 / RoCE but with consideration as
1428 * to how many l2 queues / cnqs we have
1429 */
1430 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
1431 num_features++;
1432
1433 feat_num[QED_RDMA_CNQ] =
1434 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
1435 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
1436 }
1437#endif
1267 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 1438 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1268 num_features, 1439 num_features,
1269 RESC_NUM(p_hwfn, QED_L2_QUEUE)); 1440 RESC_NUM(p_hwfn, QED_L2_QUEUE));
@@ -1304,6 +1475,10 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1304 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / 1475 resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1305 num_funcs; 1476 num_funcs;
1306 resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; 1477 resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
1478 resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs;
1479 resc_num[QED_RDMA_CNQ_RAM] = NUM_OF_CMDQS_CQS / num_funcs;
1480 resc_num[QED_RDMA_STATS_QUEUE] = RDMA_NUM_STATISTIC_COUNTERS_BB /
1481 num_funcs;
1307 1482
1308 for (i = 0; i < QED_MAX_RESC; i++) 1483 for (i = 0; i < QED_MAX_RESC; i++)
1309 resc_start[i] = resc_num[i] * enabled_func_idx; 1484 resc_start[i] = resc_num[i] * enabled_func_idx;
@@ -1327,7 +1502,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1327 "RL = %d start = %d\n" 1502 "RL = %d start = %d\n"
1328 "MAC = %d start = %d\n" 1503 "MAC = %d start = %d\n"
1329 "VLAN = %d start = %d\n" 1504 "VLAN = %d start = %d\n"
1330 "ILT = %d start = %d\n", 1505 "ILT = %d start = %d\n"
1506 "LL2_QUEUE = %d start = %d\n",
1331 p_hwfn->hw_info.resc_num[QED_SB], 1507 p_hwfn->hw_info.resc_num[QED_SB],
1332 p_hwfn->hw_info.resc_start[QED_SB], 1508 p_hwfn->hw_info.resc_start[QED_SB],
1333 p_hwfn->hw_info.resc_num[QED_L2_QUEUE], 1509 p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
@@ -1343,7 +1519,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1343 p_hwfn->hw_info.resc_num[QED_VLAN], 1519 p_hwfn->hw_info.resc_num[QED_VLAN],
1344 p_hwfn->hw_info.resc_start[QED_VLAN], 1520 p_hwfn->hw_info.resc_start[QED_VLAN],
1345 p_hwfn->hw_info.resc_num[QED_ILT], 1521 p_hwfn->hw_info.resc_num[QED_ILT],
1346 p_hwfn->hw_info.resc_start[QED_ILT]); 1522 p_hwfn->hw_info.resc_start[QED_ILT],
1523 RESC_NUM(p_hwfn, QED_LL2_QUEUE),
1524 RESC_START(p_hwfn, QED_LL2_QUEUE));
1347 1525
1348 return 0; 1526 return 0;
1349} 1527}
@@ -2133,6 +2311,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
2133 return 0; 2311 return 0;
2134} 2312}
2135 2313
2314static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
2315 u8 *p_filter)
2316{
2317 *p_high = p_filter[1] | (p_filter[0] << 8);
2318 *p_low = p_filter[5] | (p_filter[4] << 8) |
2319 (p_filter[3] << 16) | (p_filter[2] << 24);
2320}
2321
2322int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
2323 struct qed_ptt *p_ptt, u8 *p_filter)
2324{
2325 u32 high = 0, low = 0, en;
2326 int i;
2327
2328 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2329 return 0;
2330
2331 qed_llh_mac_to_filter(&high, &low, p_filter);
2332
2333 /* Find a free entry and utilize it */
2334 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2335 en = qed_rd(p_hwfn, p_ptt,
2336 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2337 if (en)
2338 continue;
2339 qed_wr(p_hwfn, p_ptt,
2340 NIG_REG_LLH_FUNC_FILTER_VALUE +
2341 2 * i * sizeof(u32), low);
2342 qed_wr(p_hwfn, p_ptt,
2343 NIG_REG_LLH_FUNC_FILTER_VALUE +
2344 (2 * i + 1) * sizeof(u32), high);
2345 qed_wr(p_hwfn, p_ptt,
2346 NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
2347 qed_wr(p_hwfn, p_ptt,
2348 NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2349 i * sizeof(u32), 0);
2350 qed_wr(p_hwfn, p_ptt,
2351 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2352 break;
2353 }
2354 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2355 DP_NOTICE(p_hwfn,
2356 "Failed to find an empty LLH filter to utilize\n");
2357 return -EINVAL;
2358 }
2359
2360 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2361 "mac: %pM is added at %d\n",
2362 p_filter, i);
2363
2364 return 0;
2365}
2366
2367void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
2368 struct qed_ptt *p_ptt, u8 *p_filter)
2369{
2370 u32 high = 0, low = 0;
2371 int i;
2372
2373 if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2374 return;
2375
2376 qed_llh_mac_to_filter(&high, &low, p_filter);
2377
2378 /* Find the entry and clean it */
2379 for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2380 if (qed_rd(p_hwfn, p_ptt,
2381 NIG_REG_LLH_FUNC_FILTER_VALUE +
2382 2 * i * sizeof(u32)) != low)
2383 continue;
2384 if (qed_rd(p_hwfn, p_ptt,
2385 NIG_REG_LLH_FUNC_FILTER_VALUE +
2386 (2 * i + 1) * sizeof(u32)) != high)
2387 continue;
2388
2389 qed_wr(p_hwfn, p_ptt,
2390 NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2391 qed_wr(p_hwfn, p_ptt,
2392 NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
2393 qed_wr(p_hwfn, p_ptt,
2394 NIG_REG_LLH_FUNC_FILTER_VALUE +
2395 (2 * i + 1) * sizeof(u32), 0);
2396
2397 DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2398 "mac: %pM is removed from %d\n",
2399 p_filter, i);
2400 break;
2401 }
2402 if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2403 DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
2404}
2405
2136static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, 2406static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2137 u32 hw_addr, void *p_eth_qzone, 2407 u32 hw_addr, void *p_eth_qzone,
2138 size_t eth_qzone_size, u8 timeset) 2408 size_t eth_qzone_size, u8 timeset)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index 343bb0344f62..b6711c106597 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
310 u8 *dst_id); 310 u8 *dst_id);
311 311
312/** 312/**
313 * @brief qed_llh_add_mac_filter - configures a MAC filter in llh
314 *
315 * @param p_hwfn
316 * @param p_ptt
317 * @param p_filter - MAC to add
318 */
319int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
320 struct qed_ptt *p_ptt, u8 *p_filter);
321
322/**
323 * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh
324 *
325 * @param p_hwfn
326 * @param p_ptt
327 * @param p_filter - MAC to remove
328 */
329void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
330 struct qed_ptt *p_ptt, u8 *p_filter);
331
332/**
313 * *@brief Cleanup of previous driver remains prior to load 333 * *@brief Cleanup of previous driver remains prior to load
314 * 334 *
315 * @param p_hwfn 335 * @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 2777d5bb4380..72eee29c677f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -727,6 +727,9 @@ struct core_tx_bd_flags {
727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 727#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 728#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 729#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
730#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1
731#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12
732
730}; 733};
731 734
732struct core_tx_bd { 735struct core_tx_bd {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000000..a6db10717d5c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,1792 @@
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#include <linux/types.h>
11#include <asm/byteorder.h>
12#include <linux/dma-mapping.h>
13#include <linux/if_vlan.h>
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/stddef.h>
18#include <linux/version.h>
19#include <linux/workqueue.h>
20#include <net/ipv6.h>
21#include <linux/bitops.h>
22#include <linux/delay.h>
23#include <linux/errno.h>
24#include <linux/etherdevice.h>
25#include <linux/io.h>
26#include <linux/list.h>
27#include <linux/mutex.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/qed/qed_ll2_if.h>
31#include "qed.h"
32#include "qed_cxt.h"
33#include "qed_dev_api.h"
34#include "qed_hsi.h"
35#include "qed_hw.h"
36#include "qed_int.h"
37#include "qed_ll2.h"
38#include "qed_mcp.h"
39#include "qed_reg_addr.h"
40#include "qed_sp.h"
41
42#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
43#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
44
45#define QED_LL2_TX_SIZE (256)
46#define QED_LL2_RX_SIZE (4096)
47
48struct qed_cb_ll2_info {
49 int rx_cnt;
50 u32 rx_size;
51 u8 handle;
52 bool frags_mapped;
53
54 /* Lock protecting LL2 buffer lists in sleepless context */
55 spinlock_t lock;
56 struct list_head list;
57
58 const struct qed_ll2_cb_ops *cbs;
59 void *cb_cookie;
60};
61
62struct qed_ll2_buffer {
63 struct list_head list;
64 void *data;
65 dma_addr_t phys_addr;
66};
67
68static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn,
69 u8 connection_handle,
70 void *cookie,
71 dma_addr_t first_frag_addr,
72 bool b_last_fragment,
73 bool b_last_packet)
74{
75 struct qed_dev *cdev = p_hwfn->cdev;
76 struct sk_buff *skb = cookie;
77
78 /* All we need to do is release the mapping */
79 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
80 skb_headlen(skb), DMA_TO_DEVICE);
81
82 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
83 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
84 b_last_fragment);
85
86 if (cdev->ll2->frags_mapped)
87 /* Case where mapped frags were received, need to
88 * free skb with nr_frags marked as 0
89 */
90 skb_shinfo(skb)->nr_frags = 0;
91
92 dev_kfree_skb_any(skb);
93}
94
95static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
96 u8 **data, dma_addr_t *phys_addr)
97{
98 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
99 if (!(*data)) {
100 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
101 return -ENOMEM;
102 }
103
104 *phys_addr = dma_map_single(&cdev->pdev->dev,
105 ((*data) + NET_SKB_PAD),
106 cdev->ll2->rx_size, DMA_FROM_DEVICE);
107 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
108 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
109 kfree((*data));
110 return -ENOMEM;
111 }
112
113 return 0;
114}
115
116static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
117 struct qed_ll2_buffer *buffer)
118{
119 spin_lock_bh(&cdev->ll2->lock);
120
121 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
122 cdev->ll2->rx_size, DMA_FROM_DEVICE);
123 kfree(buffer->data);
124 list_del(&buffer->list);
125
126 cdev->ll2->rx_cnt--;
127 if (!cdev->ll2->rx_cnt)
128 DP_INFO(cdev, "All LL2 entries were removed\n");
129
130 spin_unlock_bh(&cdev->ll2->lock);
131
132 return 0;
133}
134
135static void qed_ll2_kill_buffers(struct qed_dev *cdev)
136{
137 struct qed_ll2_buffer *buffer, *tmp_buffer;
138
139 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
140 qed_ll2_dealloc_buffer(cdev, buffer);
141}
142
143void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
144 u8 connection_handle,
145 struct qed_ll2_rx_packet *p_pkt,
146 struct core_rx_fast_path_cqe *p_cqe,
147 bool b_last_packet)
148{
149 u16 packet_length = le16_to_cpu(p_cqe->packet_length);
150 struct qed_ll2_buffer *buffer = p_pkt->cookie;
151 struct qed_dev *cdev = p_hwfn->cdev;
152 u16 vlan = le16_to_cpu(p_cqe->vlan);
153 u32 opaque_data_0, opaque_data_1;
154 u8 pad = p_cqe->placement_offset;
155 dma_addr_t new_phys_addr;
156 struct sk_buff *skb;
157 bool reuse = false;
158 int rc = -EINVAL;
159 u8 *new_data;
160
161 opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]);
162 opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]);
163
164 DP_VERBOSE(p_hwfn,
165 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
166 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
167 (u64)p_pkt->rx_buf_addr, pad, packet_length,
168 le16_to_cpu(p_cqe->parse_flags.flags), vlan,
169 opaque_data_0, opaque_data_1);
170
171 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
172 print_hex_dump(KERN_INFO, "",
173 DUMP_PREFIX_OFFSET, 16, 1,
174 buffer->data, packet_length, false);
175 }
176
177 /* Determine if data is valid */
178 if (packet_length < ETH_HLEN)
179 reuse = true;
180
181 /* Allocate a replacement for buffer; Reuse upon failure */
182 if (!reuse)
183 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
184 &new_phys_addr);
185
186 /* If need to reuse or there's no replacement buffer, repost this */
187 if (rc)
188 goto out_post;
189
190 skb = build_skb(buffer->data, 0);
191 if (!skb) {
192 rc = -ENOMEM;
193 goto out_post;
194 }
195
196 pad += NET_SKB_PAD;
197 skb_reserve(skb, pad);
198 skb_put(skb, packet_length);
199 skb_checksum_none_assert(skb);
200
201 /* Get parital ethernet information instead of eth_type_trans(),
202 * Since we don't have an associated net_device.
203 */
204 skb_reset_mac_header(skb);
205 skb->protocol = eth_hdr(skb)->h_proto;
206
207 /* Pass SKB onward */
208 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
209 if (vlan)
210 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
211 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
212 opaque_data_0, opaque_data_1);
213 }
214
215 /* Update Buffer information and update FW producer */
216 buffer->data = new_data;
217 buffer->phys_addr = new_phys_addr;
218
219out_post:
220 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle,
221 buffer->phys_addr, 0, buffer, 1);
222
223 if (rc)
224 qed_ll2_dealloc_buffer(cdev, buffer);
225}
226
227static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
228 u8 connection_handle,
229 bool b_lock,
230 bool b_only_active)
231{
232 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
233
234 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
235 return NULL;
236
237 if (!p_hwfn->p_ll2_info)
238 return NULL;
239
240 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
241
242 if (b_only_active) {
243 if (b_lock)
244 mutex_lock(&p_ll2_conn->mutex);
245 if (p_ll2_conn->b_active)
246 p_ret = p_ll2_conn;
247 if (b_lock)
248 mutex_unlock(&p_ll2_conn->mutex);
249 } else {
250 p_ret = p_ll2_conn;
251 }
252
253 return p_ret;
254}
255
256static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
257 u8 connection_handle)
258{
259 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
260}
261
262static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
263 u8 connection_handle)
264{
265 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
266}
267
268static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
269 *p_hwfn,
270 u8 connection_handle)
271{
272 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
273}
274
275static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
276{
277 bool b_last_packet = false, b_last_frag = false;
278 struct qed_ll2_tx_packet *p_pkt = NULL;
279 struct qed_ll2_info *p_ll2_conn;
280 struct qed_ll2_tx_queue *p_tx;
281 dma_addr_t tx_frag;
282
283 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
284 if (!p_ll2_conn)
285 return;
286
287 p_tx = &p_ll2_conn->tx_queue;
288
289 while (!list_empty(&p_tx->active_descq)) {
290 p_pkt = list_first_entry(&p_tx->active_descq,
291 struct qed_ll2_tx_packet, list_entry);
292 if (!p_pkt)
293 break;
294
295 list_del(&p_pkt->list_entry);
296 b_last_packet = list_empty(&p_tx->active_descq);
297 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
298 p_tx->cur_completing_packet = *p_pkt;
299 p_tx->cur_completing_bd_idx = 1;
300 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
301 tx_frag = p_pkt->bds_set[0].tx_frag;
302 if (p_ll2_conn->gsi_enable)
303 qed_ll2b_release_tx_gsi_packet(p_hwfn,
304 p_ll2_conn->my_id,
305 p_pkt->cookie,
306 tx_frag,
307 b_last_frag,
308 b_last_packet);
309 else
310 qed_ll2b_complete_tx_packet(p_hwfn,
311 p_ll2_conn->my_id,
312 p_pkt->cookie,
313 tx_frag,
314 b_last_frag,
315 b_last_packet);
316
317 }
318}
319
320static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
321{
322 struct qed_ll2_info *p_ll2_conn = p_cookie;
323 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
324 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
325 struct qed_ll2_tx_packet *p_pkt;
326 bool b_last_frag = false;
327 unsigned long flags;
328 dma_addr_t tx_frag;
329 int rc = -EINVAL;
330
331 spin_lock_irqsave(&p_tx->lock, flags);
332 if (p_tx->b_completing_packet) {
333 rc = -EBUSY;
334 goto out;
335 }
336
337 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
338 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
339 while (num_bds) {
340 if (list_empty(&p_tx->active_descq))
341 goto out;
342
343 p_pkt = list_first_entry(&p_tx->active_descq,
344 struct qed_ll2_tx_packet, list_entry);
345 if (!p_pkt)
346 goto out;
347
348 p_tx->b_completing_packet = true;
349 p_tx->cur_completing_packet = *p_pkt;
350 num_bds_in_packet = p_pkt->bd_used;
351 list_del(&p_pkt->list_entry);
352
353 if (num_bds < num_bds_in_packet) {
354 DP_NOTICE(p_hwfn,
355 "Rest of BDs does not cover whole packet\n");
356 goto out;
357 }
358
359 num_bds -= num_bds_in_packet;
360 p_tx->bds_idx += num_bds_in_packet;
361 while (num_bds_in_packet--)
362 qed_chain_consume(&p_tx->txq_chain);
363
364 p_tx->cur_completing_bd_idx = 1;
365 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
366 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
367
368 spin_unlock_irqrestore(&p_tx->lock, flags);
369 tx_frag = p_pkt->bds_set[0].tx_frag;
370 if (p_ll2_conn->gsi_enable)
371 qed_ll2b_complete_tx_gsi_packet(p_hwfn,
372 p_ll2_conn->my_id,
373 p_pkt->cookie,
374 tx_frag,
375 b_last_frag, !num_bds);
376 else
377 qed_ll2b_complete_tx_packet(p_hwfn,
378 p_ll2_conn->my_id,
379 p_pkt->cookie,
380 tx_frag,
381 b_last_frag, !num_bds);
382 spin_lock_irqsave(&p_tx->lock, flags);
383 }
384
385 p_tx->b_completing_packet = false;
386 rc = 0;
387out:
388 spin_unlock_irqrestore(&p_tx->lock, flags);
389 return rc;
390}
391
392static int
393qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
394 struct qed_ll2_info *p_ll2_info,
395 union core_rx_cqe_union *p_cqe,
396 unsigned long lock_flags, bool b_last_cqe)
397{
398 struct qed_ll2_rx_queue *p_rx = &p_ll2_info->rx_queue;
399 struct qed_ll2_rx_packet *p_pkt = NULL;
400 u16 packet_length, parse_flags, vlan;
401 u32 src_mac_addrhi;
402 u16 src_mac_addrlo;
403
404 if (!list_empty(&p_rx->active_descq))
405 p_pkt = list_first_entry(&p_rx->active_descq,
406 struct qed_ll2_rx_packet, list_entry);
407 if (!p_pkt) {
408 DP_NOTICE(p_hwfn,
409 "GSI Rx completion but active_descq is empty\n");
410 return -EIO;
411 }
412
413 list_del(&p_pkt->list_entry);
414 parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
415 packet_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
416 vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
417 src_mac_addrhi = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
418 src_mac_addrlo = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
419 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
420 DP_NOTICE(p_hwfn,
421 "Mismatch between active_descq and the LL2 Rx chain\n");
422 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
423
424 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
425 qed_ll2b_complete_rx_gsi_packet(p_hwfn,
426 p_ll2_info->my_id,
427 p_pkt->cookie,
428 p_pkt->rx_buf_addr,
429 packet_length,
430 p_cqe->rx_cqe_gsi.data_length_error,
431 parse_flags,
432 vlan,
433 src_mac_addrhi,
434 src_mac_addrlo, b_last_cqe);
435 spin_lock_irqsave(&p_rx->lock, lock_flags);
436
437 return 0;
438}
439
440static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
441 struct qed_ll2_info *p_ll2_conn,
442 union core_rx_cqe_union *p_cqe,
443 unsigned long lock_flags,
444 bool b_last_cqe)
445{
446 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
447 struct qed_ll2_rx_packet *p_pkt = NULL;
448
449 if (!list_empty(&p_rx->active_descq))
450 p_pkt = list_first_entry(&p_rx->active_descq,
451 struct qed_ll2_rx_packet, list_entry);
452 if (!p_pkt) {
453 DP_NOTICE(p_hwfn,
454 "LL2 Rx completion but active_descq is empty\n");
455 return -EIO;
456 }
457 list_del(&p_pkt->list_entry);
458
459 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
460 DP_NOTICE(p_hwfn,
461 "Mismatch between active_descq and the LL2 Rx chain\n");
462 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
463
464 spin_unlock_irqrestore(&p_rx->lock, lock_flags);
465 qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
466 p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
467 spin_lock_irqsave(&p_rx->lock, lock_flags);
468
469 return 0;
470}
471
472static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
473{
474 struct qed_ll2_info *p_ll2_conn = cookie;
475 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
476 union core_rx_cqe_union *cqe = NULL;
477 u16 cq_new_idx = 0, cq_old_idx = 0;
478 unsigned long flags = 0;
479 int rc = 0;
480
481 spin_lock_irqsave(&p_rx->lock, flags);
482 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
483 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
484
485 while (cq_new_idx != cq_old_idx) {
486 bool b_last_cqe = (cq_new_idx == cq_old_idx);
487
488 cqe = qed_chain_consume(&p_rx->rcq_chain);
489 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
490
491 DP_VERBOSE(p_hwfn,
492 QED_MSG_LL2,
493 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
494 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
495
496 switch (cqe->rx_cqe_sp.type) {
497 case CORE_RX_CQE_TYPE_SLOW_PATH:
498 DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
499 rc = -EINVAL;
500 break;
501 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
502 rc = qed_ll2_rxq_completion_gsi(p_hwfn, p_ll2_conn,
503 cqe, flags, b_last_cqe);
504 break;
505 case CORE_RX_CQE_TYPE_REGULAR:
506 rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
507 cqe, flags, b_last_cqe);
508 break;
509 default:
510 rc = -EIO;
511 }
512 }
513
514 spin_unlock_irqrestore(&p_rx->lock, flags);
515 return rc;
516}
517
518void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
519{
520 struct qed_ll2_info *p_ll2_conn = NULL;
521 struct qed_ll2_rx_packet *p_pkt = NULL;
522 struct qed_ll2_rx_queue *p_rx;
523
524 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
525 if (!p_ll2_conn)
526 return;
527
528 p_rx = &p_ll2_conn->rx_queue;
529
530 while (!list_empty(&p_rx->active_descq)) {
531 dma_addr_t rx_buf_addr;
532 void *cookie;
533 bool b_last;
534
535 p_pkt = list_first_entry(&p_rx->active_descq,
536 struct qed_ll2_rx_packet, list_entry);
537 if (!p_pkt)
538 break;
539
540 list_del(&p_pkt->list_entry);
541 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
542
543 rx_buf_addr = p_pkt->rx_buf_addr;
544 cookie = p_pkt->cookie;
545
546 b_last = list_empty(&p_rx->active_descq);
547 }
548}
549
550static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
551 struct qed_ll2_info *p_ll2_conn,
552 u8 action_on_error)
553{
554 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
555 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
556 struct core_rx_start_ramrod_data *p_ramrod = NULL;
557 struct qed_spq_entry *p_ent = NULL;
558 struct qed_sp_init_data init_data;
559 u16 cqe_pbl_size;
560 int rc = 0;
561
562 /* Get SPQ entry */
563 memset(&init_data, 0, sizeof(init_data));
564 init_data.cid = p_ll2_conn->cid;
565 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
566 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
567
568 rc = qed_sp_init_request(p_hwfn, &p_ent,
569 CORE_RAMROD_RX_QUEUE_START,
570 PROTOCOLID_CORE, &init_data);
571 if (rc)
572 return rc;
573
574 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
575
576 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
577 p_ramrod->sb_index = p_rx->rx_sb_index;
578 p_ramrod->complete_event_flg = 1;
579
580 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
581 DMA_REGPAIR_LE(p_ramrod->bd_base,
582 p_rx->rxq_chain.p_phys_addr);
583 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
584 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
585 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
586 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
587
588 p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
589 p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
590 p_ramrod->queue_id = p_ll2_conn->queue_id;
591 p_ramrod->main_func_queue = 1;
592
593 if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
594 p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) {
595 p_ramrod->mf_si_bcast_accept_all = 1;
596 p_ramrod->mf_si_mcast_accept_all = 1;
597 } else {
598 p_ramrod->mf_si_bcast_accept_all = 0;
599 p_ramrod->mf_si_mcast_accept_all = 0;
600 }
601
602 p_ramrod->action_on_error.error_type = action_on_error;
603 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
604 return qed_spq_post(p_hwfn, p_ent, NULL);
605}
606
607static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
608 struct qed_ll2_info *p_ll2_conn)
609{
610 enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
611 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
612 struct core_tx_start_ramrod_data *p_ramrod = NULL;
613 struct qed_spq_entry *p_ent = NULL;
614 struct qed_sp_init_data init_data;
615 union qed_qm_pq_params pq_params;
616 u16 pq_id = 0, pbl_size;
617 int rc = -EINVAL;
618
619 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
620 return 0;
621
622 /* Get SPQ entry */
623 memset(&init_data, 0, sizeof(init_data));
624 init_data.cid = p_ll2_conn->cid;
625 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
626 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
627
628 rc = qed_sp_init_request(p_hwfn, &p_ent,
629 CORE_RAMROD_TX_QUEUE_START,
630 PROTOCOLID_CORE, &init_data);
631 if (rc)
632 return rc;
633
634 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
635
636 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
637 p_ramrod->sb_index = p_tx->tx_sb_index;
638 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
639 p_ll2_conn->tx_stats_en = 1;
640 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
641 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
642
643 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
644 qed_chain_get_pbl_phys(&p_tx->txq_chain));
645 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
646 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
647
648 memset(&pq_params, 0, sizeof(pq_params));
649 pq_params.core.tc = p_ll2_conn->tx_tc;
650 pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
651 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
652
653 switch (conn_type) {
654 case QED_LL2_TYPE_ISCSI:
655 case QED_LL2_TYPE_ISCSI_OOO:
656 p_ramrod->conn_type = PROTOCOLID_ISCSI;
657 break;
658 case QED_LL2_TYPE_ROCE:
659 p_ramrod->conn_type = PROTOCOLID_ROCE;
660 break;
661 default:
662 p_ramrod->conn_type = PROTOCOLID_ETH;
663 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
664 }
665
666 p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
667 return qed_spq_post(p_hwfn, p_ent, NULL);
668}
669
670static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
671 struct qed_ll2_info *p_ll2_conn)
672{
673 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
674 struct qed_spq_entry *p_ent = NULL;
675 struct qed_sp_init_data init_data;
676 int rc = -EINVAL;
677
678 /* Get SPQ entry */
679 memset(&init_data, 0, sizeof(init_data));
680 init_data.cid = p_ll2_conn->cid;
681 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
682 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
683
684 rc = qed_sp_init_request(p_hwfn, &p_ent,
685 CORE_RAMROD_RX_QUEUE_STOP,
686 PROTOCOLID_CORE, &init_data);
687 if (rc)
688 return rc;
689
690 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
691
692 p_ramrod->complete_event_flg = 1;
693 p_ramrod->queue_id = p_ll2_conn->queue_id;
694
695 return qed_spq_post(p_hwfn, p_ent, NULL);
696}
697
698static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
699 struct qed_ll2_info *p_ll2_conn)
700{
701 struct qed_spq_entry *p_ent = NULL;
702 struct qed_sp_init_data init_data;
703 int rc = -EINVAL;
704
705 /* Get SPQ entry */
706 memset(&init_data, 0, sizeof(init_data));
707 init_data.cid = p_ll2_conn->cid;
708 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
709 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
710
711 rc = qed_sp_init_request(p_hwfn, &p_ent,
712 CORE_RAMROD_TX_QUEUE_STOP,
713 PROTOCOLID_CORE, &init_data);
714 if (rc)
715 return rc;
716
717 return qed_spq_post(p_hwfn, p_ent, NULL);
718}
719
720static int
721qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
722 struct qed_ll2_info *p_ll2_info, u16 rx_num_desc)
723{
724 struct qed_ll2_rx_packet *p_descq;
725 u32 capacity;
726 int rc = 0;
727
728 if (!rx_num_desc)
729 goto out;
730
731 rc = qed_chain_alloc(p_hwfn->cdev,
732 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
733 QED_CHAIN_MODE_NEXT_PTR,
734 QED_CHAIN_CNT_TYPE_U16,
735 rx_num_desc,
736 sizeof(struct core_rx_bd),
737 &p_ll2_info->rx_queue.rxq_chain);
738 if (rc) {
739 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
740 goto out;
741 }
742
743 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
744 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
745 GFP_KERNEL);
746 if (!p_descq) {
747 rc = -ENOMEM;
748 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
749 goto out;
750 }
751 p_ll2_info->rx_queue.descq_array = p_descq;
752
753 rc = qed_chain_alloc(p_hwfn->cdev,
754 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
755 QED_CHAIN_MODE_PBL,
756 QED_CHAIN_CNT_TYPE_U16,
757 rx_num_desc,
758 sizeof(struct core_rx_fast_path_cqe),
759 &p_ll2_info->rx_queue.rcq_chain);
760 if (rc) {
761 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
762 goto out;
763 }
764
765 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
766 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
767 p_ll2_info->conn_type, rx_num_desc);
768
769out:
770 return rc;
771}
772
773static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
774 struct qed_ll2_info *p_ll2_info,
775 u16 tx_num_desc)
776{
777 struct qed_ll2_tx_packet *p_descq;
778 u32 capacity;
779 int rc = 0;
780
781 if (!tx_num_desc)
782 goto out;
783
784 rc = qed_chain_alloc(p_hwfn->cdev,
785 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
786 QED_CHAIN_MODE_PBL,
787 QED_CHAIN_CNT_TYPE_U16,
788 tx_num_desc,
789 sizeof(struct core_tx_bd),
790 &p_ll2_info->tx_queue.txq_chain);
791 if (rc)
792 goto out;
793
794 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
795 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
796 GFP_KERNEL);
797 if (!p_descq) {
798 rc = -ENOMEM;
799 goto out;
800 }
801 p_ll2_info->tx_queue.descq_array = p_descq;
802
803 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
804 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
805 p_ll2_info->conn_type, tx_num_desc);
806
807out:
808 if (rc)
809 DP_NOTICE(p_hwfn,
810 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
811 tx_num_desc);
812 return rc;
813}
814
815int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
816 struct qed_ll2_info *p_params,
817 u16 rx_num_desc,
818 u16 tx_num_desc,
819 u8 *p_connection_handle)
820{
821 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
822 struct qed_ll2_info *p_ll2_info = NULL;
823 int rc;
824 u8 i;
825
826 if (!p_connection_handle || !p_hwfn->p_ll2_info)
827 return -EINVAL;
828
829 /* Find a free connection to be used */
830 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
831 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
832 if (p_hwfn->p_ll2_info[i].b_active) {
833 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
834 continue;
835 }
836
837 p_hwfn->p_ll2_info[i].b_active = true;
838 p_ll2_info = &p_hwfn->p_ll2_info[i];
839 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
840 break;
841 }
842 if (!p_ll2_info)
843 return -EBUSY;
844
845 p_ll2_info->conn_type = p_params->conn_type;
846 p_ll2_info->mtu = p_params->mtu;
847 p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
848 p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
849 p_ll2_info->tx_tc = p_params->tx_tc;
850 p_ll2_info->tx_dest = p_params->tx_dest;
851 p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
852 p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
853 p_ll2_info->gsi_enable = p_params->gsi_enable;
854
855 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
856 if (rc)
857 goto q_allocate_fail;
858
859 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc);
860 if (rc)
861 goto q_allocate_fail;
862
863 /* Register callbacks for the Rx/Tx queues */
864 comp_rx_cb = qed_ll2_rxq_completion;
865 comp_tx_cb = qed_ll2_txq_completion;
866
867 if (rx_num_desc) {
868 qed_int_register_cb(p_hwfn, comp_rx_cb,
869 &p_hwfn->p_ll2_info[i],
870 &p_ll2_info->rx_queue.rx_sb_index,
871 &p_ll2_info->rx_queue.p_fw_cons);
872 p_ll2_info->rx_queue.b_cb_registred = true;
873 }
874
875 if (tx_num_desc) {
876 qed_int_register_cb(p_hwfn,
877 comp_tx_cb,
878 &p_hwfn->p_ll2_info[i],
879 &p_ll2_info->tx_queue.tx_sb_index,
880 &p_ll2_info->tx_queue.p_fw_cons);
881 p_ll2_info->tx_queue.b_cb_registred = true;
882 }
883
884 *p_connection_handle = i;
885 return rc;
886
887q_allocate_fail:
888 qed_ll2_release_connection(p_hwfn, i);
889 return -ENOMEM;
890}
891
892static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
893 struct qed_ll2_info *p_ll2_conn)
894{
895 u8 action_on_error = 0;
896
897 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
898 return 0;
899
900 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
901
902 SET_FIELD(action_on_error,
903 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
904 p_ll2_conn->ai_err_packet_too_big);
905 SET_FIELD(action_on_error,
906 CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
907
908 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
909}
910
911int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
912{
913 struct qed_ll2_info *p_ll2_conn;
914 struct qed_ll2_rx_queue *p_rx;
915 struct qed_ll2_tx_queue *p_tx;
916 int rc = -EINVAL;
917 u32 i, capacity;
918 u8 qid;
919
920 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
921 if (!p_ll2_conn)
922 return -EINVAL;
923 p_rx = &p_ll2_conn->rx_queue;
924 p_tx = &p_ll2_conn->tx_queue;
925
926 qed_chain_reset(&p_rx->rxq_chain);
927 qed_chain_reset(&p_rx->rcq_chain);
928 INIT_LIST_HEAD(&p_rx->active_descq);
929 INIT_LIST_HEAD(&p_rx->free_descq);
930 INIT_LIST_HEAD(&p_rx->posting_descq);
931 spin_lock_init(&p_rx->lock);
932 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
933 for (i = 0; i < capacity; i++)
934 list_add_tail(&p_rx->descq_array[i].list_entry,
935 &p_rx->free_descq);
936 *p_rx->p_fw_cons = 0;
937
938 qed_chain_reset(&p_tx->txq_chain);
939 INIT_LIST_HEAD(&p_tx->active_descq);
940 INIT_LIST_HEAD(&p_tx->free_descq);
941 INIT_LIST_HEAD(&p_tx->sending_descq);
942 spin_lock_init(&p_tx->lock);
943 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
944 for (i = 0; i < capacity; i++)
945 list_add_tail(&p_tx->descq_array[i].list_entry,
946 &p_tx->free_descq);
947 p_tx->cur_completing_bd_idx = 0;
948 p_tx->bds_idx = 0;
949 p_tx->b_completing_packet = false;
950 p_tx->cur_send_packet = NULL;
951 p_tx->cur_send_frag_num = 0;
952 p_tx->cur_completing_frag_num = 0;
953 *p_tx->p_fw_cons = 0;
954
955 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
956
957 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
958 p_ll2_conn->queue_id = qid;
959 p_ll2_conn->tx_stats_id = qid;
960 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
961 GTT_BAR0_MAP_REG_TSDM_RAM +
962 TSTORM_LL2_RX_PRODS_OFFSET(qid);
963 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
964 qed_db_addr(p_ll2_conn->cid,
965 DQ_DEMS_LEGACY);
966
967 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
968 if (rc)
969 return rc;
970
971 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
972 if (rc)
973 return rc;
974
975 if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
976 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1);
977
978 return rc;
979}
980
981static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
982 struct qed_ll2_rx_queue *p_rx,
983 struct qed_ll2_rx_packet *p_curp)
984{
985 struct qed_ll2_rx_packet *p_posting_packet = NULL;
986 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
987 bool b_notify_fw = false;
988 u16 bd_prod, cq_prod;
989
990 /* This handles the flushing of already posted buffers */
991 while (!list_empty(&p_rx->posting_descq)) {
992 p_posting_packet = list_first_entry(&p_rx->posting_descq,
993 struct qed_ll2_rx_packet,
994 list_entry);
995 list_del(&p_posting_packet->list_entry);
996 list_add_tail(&p_posting_packet->list_entry,
997 &p_rx->active_descq);
998 b_notify_fw = true;
999 }
1000
1001 /* This handles the supplied packet [if there is one] */
1002 if (p_curp) {
1003 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1004 b_notify_fw = true;
1005 }
1006
1007 if (!b_notify_fw)
1008 return;
1009
1010 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1011 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1012 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1013 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1014 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1015}
1016
1017int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
1018 u8 connection_handle,
1019 dma_addr_t addr,
1020 u16 buf_len, void *cookie, u8 notify_fw)
1021{
1022 struct core_rx_bd_with_buff_len *p_curb = NULL;
1023 struct qed_ll2_rx_packet *p_curp = NULL;
1024 struct qed_ll2_info *p_ll2_conn;
1025 struct qed_ll2_rx_queue *p_rx;
1026 unsigned long flags;
1027 void *p_data;
1028 int rc = 0;
1029
1030 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1031 if (!p_ll2_conn)
1032 return -EINVAL;
1033 p_rx = &p_ll2_conn->rx_queue;
1034
1035 spin_lock_irqsave(&p_rx->lock, flags);
1036 if (!list_empty(&p_rx->free_descq))
1037 p_curp = list_first_entry(&p_rx->free_descq,
1038 struct qed_ll2_rx_packet, list_entry);
1039 if (p_curp) {
1040 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1041 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1042 p_data = qed_chain_produce(&p_rx->rxq_chain);
1043 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1044 qed_chain_produce(&p_rx->rcq_chain);
1045 }
1046 }
1047
1048 /* If we're lacking entires, let's try to flush buffers to FW */
1049 if (!p_curp || !p_curb) {
1050 rc = -EBUSY;
1051 p_curp = NULL;
1052 goto out_notify;
1053 }
1054
1055 /* We have an Rx packet we can fill */
1056 DMA_REGPAIR_LE(p_curb->addr, addr);
1057 p_curb->buff_length = cpu_to_le16(buf_len);
1058 p_curp->rx_buf_addr = addr;
1059 p_curp->cookie = cookie;
1060 p_curp->rxq_bd = p_curb;
1061 p_curp->buf_length = buf_len;
1062 list_del(&p_curp->list_entry);
1063
1064 /* Check if we only want to enqueue this packet without informing FW */
1065 if (!notify_fw) {
1066 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1067 goto out;
1068 }
1069
1070out_notify:
1071 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1072out:
1073 spin_unlock_irqrestore(&p_rx->lock, flags);
1074 return rc;
1075}
1076
1077static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1078 struct qed_ll2_tx_queue *p_tx,
1079 struct qed_ll2_tx_packet *p_curp,
1080 u8 num_of_bds,
1081 dma_addr_t first_frag,
1082 u16 first_frag_len, void *p_cookie,
1083 u8 notify_fw)
1084{
1085 list_del(&p_curp->list_entry);
1086 p_curp->cookie = p_cookie;
1087 p_curp->bd_used = num_of_bds;
1088 p_curp->notify_fw = notify_fw;
1089 p_tx->cur_send_packet = p_curp;
1090 p_tx->cur_send_frag_num = 0;
1091
1092 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag;
1093 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len;
1094 p_tx->cur_send_frag_num++;
1095}
1096
1097static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1098 struct qed_ll2_info *p_ll2,
1099 struct qed_ll2_tx_packet *p_curp,
1100 u8 num_of_bds,
1101 enum core_tx_dest tx_dest,
1102 u16 vlan,
1103 u8 bd_flags,
1104 u16 l4_hdr_offset_w,
1105 enum core_roce_flavor_type type,
1106 dma_addr_t first_frag,
1107 u16 first_frag_len)
1108{
1109 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1110 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1111 struct core_tx_bd *start_bd = NULL;
1112 u16 frag_idx;
1113
1114 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1115 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
1116 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1117 cpu_to_le16(l4_hdr_offset_w));
1118 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1119 start_bd->bd_flags.as_bitfield = bd_flags;
1120 start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
1121 CORE_TX_BD_FLAGS_START_BD_SHIFT;
1122 SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
1123 DMA_REGPAIR_LE(start_bd->addr, first_frag);
1124 start_bd->nbytes = cpu_to_le16(first_frag_len);
1125
1126 SET_FIELD(start_bd->bd_flags.as_bitfield, CORE_TX_BD_FLAGS_ROCE_FLAV,
1127 type);
1128
1129 DP_VERBOSE(p_hwfn,
1130 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1131 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1132 p_ll2->queue_id,
1133 p_ll2->cid,
1134 p_ll2->conn_type,
1135 prod_idx,
1136 first_frag_len,
1137 num_of_bds,
1138 le32_to_cpu(start_bd->addr.hi),
1139 le32_to_cpu(start_bd->addr.lo));
1140
1141 if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds)
1142 return;
1143
1144 /* Need to provide the packet with additional BDs for frags */
1145 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1146 frag_idx < num_of_bds; frag_idx++) {
1147 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1148
1149 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1150 (*p_bd)->bd_flags.as_bitfield = 0;
1151 (*p_bd)->bitfield1 = 0;
1152 (*p_bd)->bitfield0 = 0;
1153 p_curp->bds_set[frag_idx].tx_frag = 0;
1154 p_curp->bds_set[frag_idx].frag_len = 0;
1155 }
1156}
1157
1158/* This should be called while the Txq spinlock is being held */
1159static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1160 struct qed_ll2_info *p_ll2_conn)
1161{
1162 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1163 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1164 struct qed_ll2_tx_packet *p_pkt = NULL;
1165 struct core_db_data db_msg = { 0, 0, 0 };
1166 u16 bd_prod;
1167
1168 /* If there are missing BDs, don't do anything now */
1169 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1170 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1171 return;
1172
1173 /* Push the current packet to the list and clean after it */
1174 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1175 &p_ll2_conn->tx_queue.sending_descq);
1176 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1177 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1178
1179 /* Notify FW of packet only if requested to */
1180 if (!b_notify)
1181 return;
1182
1183 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1184
1185 while (!list_empty(&p_tx->sending_descq)) {
1186 p_pkt = list_first_entry(&p_tx->sending_descq,
1187 struct qed_ll2_tx_packet, list_entry);
1188 if (!p_pkt)
1189 break;
1190
1191 list_del(&p_pkt->list_entry);
1192 list_add_tail(&p_pkt->list_entry, &p_tx->active_descq);
1193 }
1194
1195 SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1196 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1197 SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1198 DQ_XCM_CORE_TX_BD_PROD_CMD);
1199 db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1200 db_msg.spq_prod = cpu_to_le16(bd_prod);
1201
1202 /* Make sure the BDs data is updated before ringing the doorbell */
1203 wmb();
1204
1205 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
1206
1207 DP_VERBOSE(p_hwfn,
1208 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1209 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1210 p_ll2_conn->queue_id,
1211 p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
1212}
1213
1214int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
1215 u8 connection_handle,
1216 u8 num_of_bds,
1217 u16 vlan,
1218 u8 bd_flags,
1219 u16 l4_hdr_offset_w,
1220 enum qed_ll2_roce_flavor_type qed_roce_flavor,
1221 dma_addr_t first_frag,
1222 u16 first_frag_len, void *cookie, u8 notify_fw)
1223{
1224 struct qed_ll2_tx_packet *p_curp = NULL;
1225 struct qed_ll2_info *p_ll2_conn = NULL;
1226 enum core_roce_flavor_type roce_flavor;
1227 struct qed_ll2_tx_queue *p_tx;
1228 struct qed_chain *p_tx_chain;
1229 unsigned long flags;
1230 int rc = 0;
1231
1232 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1233 if (!p_ll2_conn)
1234 return -EINVAL;
1235 p_tx = &p_ll2_conn->tx_queue;
1236 p_tx_chain = &p_tx->txq_chain;
1237
1238 if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
1239 return -EIO;
1240
1241 spin_lock_irqsave(&p_tx->lock, flags);
1242 if (p_tx->cur_send_packet) {
1243 rc = -EEXIST;
1244 goto out;
1245 }
1246
1247 /* Get entry, but only if we have tx elements for it */
1248 if (!list_empty(&p_tx->free_descq))
1249 p_curp = list_first_entry(&p_tx->free_descq,
1250 struct qed_ll2_tx_packet, list_entry);
1251 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds)
1252 p_curp = NULL;
1253
1254 if (!p_curp) {
1255 rc = -EBUSY;
1256 goto out;
1257 }
1258
1259 if (qed_roce_flavor == QED_LL2_ROCE) {
1260 roce_flavor = CORE_ROCE;
1261 } else if (qed_roce_flavor == QED_LL2_RROCE) {
1262 roce_flavor = CORE_RROCE;
1263 } else {
1264 rc = -EINVAL;
1265 goto out;
1266 }
1267
1268 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1269 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp,
1270 num_of_bds, first_frag,
1271 first_frag_len, cookie, notify_fw);
1272 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp,
1273 num_of_bds, CORE_TX_DEST_NW,
1274 vlan, bd_flags, l4_hdr_offset_w,
1275 roce_flavor,
1276 first_frag, first_frag_len);
1277
1278 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1279
1280out:
1281 spin_unlock_irqrestore(&p_tx->lock, flags);
1282 return rc;
1283}
1284
1285int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
1286 u8 connection_handle,
1287 dma_addr_t addr, u16 nbytes)
1288{
1289 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1290 struct qed_ll2_info *p_ll2_conn = NULL;
1291 u16 cur_send_frag_num = 0;
1292 struct core_tx_bd *p_bd;
1293 unsigned long flags;
1294
1295 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1296 if (!p_ll2_conn)
1297 return -EINVAL;
1298
1299 if (!p_ll2_conn->tx_queue.cur_send_packet)
1300 return -EINVAL;
1301
1302 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1303 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1304
1305 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1306 return -EINVAL;
1307
1308 /* Fill the BD information, and possibly notify FW */
1309 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1310 DMA_REGPAIR_LE(p_bd->addr, addr);
1311 p_bd->nbytes = cpu_to_le16(nbytes);
1312 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1313 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1314
1315 p_ll2_conn->tx_queue.cur_send_frag_num++;
1316
1317 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1318 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1319 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1320
1321 return 0;
1322}
1323
1324int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1325{
1326 struct qed_ll2_info *p_ll2_conn = NULL;
1327 int rc = -EINVAL;
1328
1329 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1330 if (!p_ll2_conn)
1331 return -EINVAL;
1332
1333 /* Stop Tx & Rx of connection, if needed */
1334 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1335 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1336 if (rc)
1337 return rc;
1338 qed_ll2_txq_flush(p_hwfn, connection_handle);
1339 }
1340
1341 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1342 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1343 if (rc)
1344 return rc;
1345 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1346 }
1347
1348 return rc;
1349}
1350
1351void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
1352{
1353 struct qed_ll2_info *p_ll2_conn = NULL;
1354
1355 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1356 if (!p_ll2_conn)
1357 return;
1358
1359 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1360 p_ll2_conn->rx_queue.b_cb_registred = false;
1361 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1362 }
1363
1364 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1365 p_ll2_conn->tx_queue.b_cb_registred = false;
1366 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1367 }
1368
1369 kfree(p_ll2_conn->tx_queue.descq_array);
1370 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
1371
1372 kfree(p_ll2_conn->rx_queue.descq_array);
1373 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
1374 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
1375
1376 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
1377
1378 mutex_lock(&p_ll2_conn->mutex);
1379 p_ll2_conn->b_active = false;
1380 mutex_unlock(&p_ll2_conn->mutex);
1381}
1382
1383struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn)
1384{
1385 struct qed_ll2_info *p_ll2_connections;
1386 u8 i;
1387
1388 /* Allocate LL2's set struct */
1389 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
1390 sizeof(struct qed_ll2_info), GFP_KERNEL);
1391 if (!p_ll2_connections) {
1392 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
1393 return NULL;
1394 }
1395
1396 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1397 p_ll2_connections[i].my_id = i;
1398
1399 return p_ll2_connections;
1400}
1401
1402void qed_ll2_setup(struct qed_hwfn *p_hwfn,
1403 struct qed_ll2_info *p_ll2_connections)
1404{
1405 int i;
1406
1407 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
1408 mutex_init(&p_ll2_connections[i].mutex);
1409}
1410
1411void qed_ll2_free(struct qed_hwfn *p_hwfn,
1412 struct qed_ll2_info *p_ll2_connections)
1413{
1414 kfree(p_ll2_connections);
1415}
1416
1417static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
1418 struct qed_ptt *p_ptt,
1419 struct qed_ll2_info *p_ll2_conn,
1420 struct qed_ll2_stats *p_stats)
1421{
1422 struct core_ll2_tstorm_per_queue_stat tstats;
1423 u8 qid = p_ll2_conn->queue_id;
1424 u32 tstats_addr;
1425
1426 memset(&tstats, 0, sizeof(tstats));
1427 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1428 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
1429 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
1430
1431 p_stats->packet_too_big_discard =
1432 HILO_64_REGPAIR(tstats.packet_too_big_discard);
1433 p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard);
1434}
1435
1436static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
1437 struct qed_ptt *p_ptt,
1438 struct qed_ll2_info *p_ll2_conn,
1439 struct qed_ll2_stats *p_stats)
1440{
1441 struct core_ll2_ustorm_per_queue_stat ustats;
1442 u8 qid = p_ll2_conn->queue_id;
1443 u32 ustats_addr;
1444
1445 memset(&ustats, 0, sizeof(ustats));
1446 ustats_addr = BAR0_MAP_REG_USDM_RAM +
1447 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
1448 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
1449
1450 p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1451 p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1452 p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1453 p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1454 p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1455 p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1456}
1457
1458static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
1459 struct qed_ptt *p_ptt,
1460 struct qed_ll2_info *p_ll2_conn,
1461 struct qed_ll2_stats *p_stats)
1462{
1463 struct core_ll2_pstorm_per_queue_stat pstats;
1464 u8 stats_id = p_ll2_conn->tx_stats_id;
1465 u32 pstats_addr;
1466
1467 memset(&pstats, 0, sizeof(pstats));
1468 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
1469 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
1470 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
1471
1472 p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1473 p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1474 p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1475 p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1476 p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1477 p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1478}
1479
1480int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
1481 u8 connection_handle, struct qed_ll2_stats *p_stats)
1482{
1483 struct qed_ll2_info *p_ll2_conn = NULL;
1484 struct qed_ptt *p_ptt;
1485
1486 memset(p_stats, 0, sizeof(*p_stats));
1487
1488 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
1489 !p_hwfn->p_ll2_info)
1490 return -EINVAL;
1491
1492 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
1493
1494 p_ptt = qed_ptt_acquire(p_hwfn);
1495 if (!p_ptt) {
1496 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1497 return -EINVAL;
1498 }
1499
1500 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1501 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1502 if (p_ll2_conn->tx_stats_en)
1503 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
1504
1505 qed_ptt_release(p_hwfn, p_ptt);
1506 return 0;
1507}
1508
1509static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
1510 const struct qed_ll2_cb_ops *ops,
1511 void *cookie)
1512{
1513 cdev->ll2->cbs = ops;
1514 cdev->ll2->cb_cookie = cookie;
1515}
1516
1517static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
1518{
1519 struct qed_ll2_info ll2_info;
1520 struct qed_ll2_buffer *buffer;
1521 enum qed_ll2_conn_type conn_type;
1522 struct qed_ptt *p_ptt;
1523 int rc, i;
1524
1525 /* Initialize LL2 locks & lists */
1526 INIT_LIST_HEAD(&cdev->ll2->list);
1527 spin_lock_init(&cdev->ll2->lock);
1528 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
1529 L1_CACHE_BYTES + params->mtu;
1530 cdev->ll2->frags_mapped = params->frags_mapped;
1531
1532 /*Allocate memory for LL2 */
1533 DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n",
1534 cdev->ll2->rx_size);
1535 for (i = 0; i < QED_LL2_RX_SIZE; i++) {
1536 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
1537 if (!buffer) {
1538 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
1539 goto fail;
1540 }
1541
1542 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
1543 &buffer->phys_addr);
1544 if (rc) {
1545 kfree(buffer);
1546 goto fail;
1547 }
1548
1549 list_add_tail(&buffer->list, &cdev->ll2->list);
1550 }
1551
1552 switch (QED_LEADING_HWFN(cdev)->hw_info.personality) {
1553 case QED_PCI_ISCSI:
1554 conn_type = QED_LL2_TYPE_ISCSI;
1555 break;
1556 case QED_PCI_ETH_ROCE:
1557 conn_type = QED_LL2_TYPE_ROCE;
1558 break;
1559 default:
1560 conn_type = QED_LL2_TYPE_TEST;
1561 }
1562
1563 /* Prepare the temporary ll2 information */
1564 memset(&ll2_info, 0, sizeof(ll2_info));
1565 ll2_info.conn_type = conn_type;
1566 ll2_info.mtu = params->mtu;
1567 ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
1568 ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
1569 ll2_info.tx_tc = 0;
1570 ll2_info.tx_dest = CORE_TX_DEST_NW;
1571 ll2_info.gsi_enable = 1;
1572
1573 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info,
1574 QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
1575 &cdev->ll2->handle);
1576 if (rc) {
1577 DP_INFO(cdev, "Failed to acquire LL2 connection\n");
1578 goto fail;
1579 }
1580
1581 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
1582 cdev->ll2->handle);
1583 if (rc) {
1584 DP_INFO(cdev, "Failed to establish LL2 connection\n");
1585 goto release_fail;
1586 }
1587
1588 /* Post all Rx buffers to FW */
1589 spin_lock_bh(&cdev->ll2->lock);
1590 list_for_each_entry(buffer, &cdev->ll2->list, list) {
1591 rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
1592 cdev->ll2->handle,
1593 buffer->phys_addr, 0, buffer, 1);
1594 if (rc) {
1595 DP_INFO(cdev,
1596 "Failed to post an Rx buffer; Deleting it\n");
1597 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
1598 cdev->ll2->rx_size, DMA_FROM_DEVICE);
1599 kfree(buffer->data);
1600 list_del(&buffer->list);
1601 kfree(buffer);
1602 } else {
1603 cdev->ll2->rx_cnt++;
1604 }
1605 }
1606 spin_unlock_bh(&cdev->ll2->lock);
1607
1608 if (!cdev->ll2->rx_cnt) {
1609 DP_INFO(cdev, "Failed passing even a single Rx buffer\n");
1610 goto release_terminate;
1611 }
1612
1613 if (!is_valid_ether_addr(params->ll2_mac_address)) {
1614 DP_INFO(cdev, "Invalid Ethernet address\n");
1615 goto release_terminate;
1616 }
1617
1618 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1619 if (!p_ptt) {
1620 DP_INFO(cdev, "Failed to acquire PTT\n");
1621 goto release_terminate;
1622 }
1623
1624 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1625 params->ll2_mac_address);
1626 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1627 if (rc) {
1628 DP_ERR(cdev, "Failed to allocate LLH filter\n");
1629 goto release_terminate_all;
1630 }
1631
1632 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
1633
1634 return 0;
1635
1636release_terminate_all:
1637
1638release_terminate:
1639 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1640release_fail:
1641 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1642fail:
1643 qed_ll2_kill_buffers(cdev);
1644 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1645 return -EINVAL;
1646}
1647
1648static int qed_ll2_stop(struct qed_dev *cdev)
1649{
1650 struct qed_ptt *p_ptt;
1651 int rc;
1652
1653 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
1654 return 0;
1655
1656 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1657 if (!p_ptt) {
1658 DP_INFO(cdev, "Failed to acquire PTT\n");
1659 goto fail;
1660 }
1661
1662 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
1663 cdev->ll2_mac_address);
1664 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
1665 eth_zero_addr(cdev->ll2_mac_address);
1666
1667 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
1668 cdev->ll2->handle);
1669 if (rc)
1670 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
1671
1672 qed_ll2_kill_buffers(cdev);
1673
1674 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle);
1675 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
1676
1677 return rc;
1678fail:
1679 return -EINVAL;
1680}
1681
1682static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
1683{
1684 const skb_frag_t *frag;
1685 int rc = -EINVAL, i;
1686 dma_addr_t mapping;
1687 u16 vlan = 0;
1688 u8 flags = 0;
1689
1690 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
1691 DP_INFO(cdev, "Cannot transmit a checksumed packet\n");
1692 return -EINVAL;
1693 }
1694
1695 if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
1696 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1697 1 + skb_shinfo(skb)->nr_frags);
1698 return -EINVAL;
1699 }
1700
1701 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
1702 skb->len, DMA_TO_DEVICE);
1703 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
1704 DP_NOTICE(cdev, "SKB mapping failed\n");
1705 return -EINVAL;
1706 }
1707
1708 /* Request HW to calculate IP csum */
1709 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
1710 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1711 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
1712
1713 if (skb_vlan_tag_present(skb)) {
1714 vlan = skb_vlan_tag_get(skb);
1715 flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
1716 }
1717
1718 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
1719 cdev->ll2->handle,
1720 1 + skb_shinfo(skb)->nr_frags,
1721 vlan, flags, 0, 0 /* RoCE FLAVOR */,
1722 mapping, skb->len, skb, 1);
1723 if (rc)
1724 goto err;
1725
1726 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1727 frag = &skb_shinfo(skb)->frags[i];
1728 if (!cdev->ll2->frags_mapped) {
1729 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
1730 skb_frag_size(frag),
1731 DMA_TO_DEVICE);
1732
1733 if (unlikely(dma_mapping_error(&cdev->pdev->dev,
1734 mapping))) {
1735 DP_NOTICE(cdev,
1736 "Unable to map frag - dropping packet\n");
1737 goto err;
1738 }
1739 } else {
1740 mapping = page_to_phys(skb_frag_page(frag)) |
1741 frag->page_offset;
1742 }
1743
1744 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
1745 cdev->ll2->handle,
1746 mapping,
1747 skb_frag_size(frag));
1748
1749 /* if failed not much to do here, partial packet has been posted
1750 * we can't free memory, will need to wait for completion.
1751 */
1752 if (rc)
1753 goto err2;
1754 }
1755
1756 return 0;
1757
1758err:
1759 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
1760
1761err2:
1762 return rc;
1763}
1764
1765static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
1766{
1767 if (!cdev->ll2)
1768 return -EINVAL;
1769
1770 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
1771 cdev->ll2->handle, stats);
1772}
1773
1774const struct qed_ll2_ops qed_ll2_ops_pass = {
1775 .start = &qed_ll2_start,
1776 .stop = &qed_ll2_stop,
1777 .start_xmit = &qed_ll2_start_xmit,
1778 .register_cb_ops = &qed_ll2_register_cb_ops,
1779 .get_stats = &qed_ll2_stats,
1780};
1781
1782int qed_ll2_alloc_if(struct qed_dev *cdev)
1783{
1784 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
1785 return cdev->ll2 ? 0 : -ENOMEM;
1786}
1787
1788void qed_ll2_dealloc_if(struct qed_dev *cdev)
1789{
1790 kfree(cdev->ll2);
1791 cdev->ll2 = NULL;
1792}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000000..80a5dc2d652d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,316 @@
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QED_LL2_H
11#define _QED_LL2_H
12
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/qed/qed_chain.h>
20#include <linux/qed/qed_ll2_if.h>
21#include "qed.h"
22#include "qed_hsi.h"
23#include "qed_sp.h"
24
25#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
26
27enum qed_ll2_roce_flavor_type {
28 QED_LL2_ROCE,
29 QED_LL2_RROCE,
30 MAX_QED_LL2_ROCE_FLAVOR_TYPE
31};
32
33enum qed_ll2_conn_type {
34 QED_LL2_TYPE_RESERVED,
35 QED_LL2_TYPE_ISCSI,
36 QED_LL2_TYPE_TEST,
37 QED_LL2_TYPE_ISCSI_OOO,
38 QED_LL2_TYPE_RESERVED2,
39 QED_LL2_TYPE_ROCE,
40 QED_LL2_TYPE_RESERVED3,
41 MAX_QED_LL2_RX_CONN_TYPE
42};
43
44struct qed_ll2_rx_packet {
45 struct list_head list_entry;
46 struct core_rx_bd_with_buff_len *rxq_bd;
47 dma_addr_t rx_buf_addr;
48 u16 buf_length;
49 void *cookie;
50 u8 placement_offset;
51 u16 parse_flags;
52 u16 packet_length;
53 u16 vlan;
54 u32 opaque_data[2];
55};
56
57struct qed_ll2_tx_packet {
58 struct list_head list_entry;
59 u16 bd_used;
60 u16 vlan;
61 u16 l4_hdr_offset_w;
62 u8 bd_flags;
63 bool notify_fw;
64 void *cookie;
65
66 struct {
67 struct core_tx_bd *txq_bd;
68 dma_addr_t tx_frag;
69 u16 frag_len;
70 } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
71};
72
73struct qed_ll2_rx_queue {
74 /* Lock protecting the Rx queue manipulation */
75 spinlock_t lock;
76 struct qed_chain rxq_chain;
77 struct qed_chain rcq_chain;
78 u8 rx_sb_index;
79 bool b_cb_registred;
80 __le16 *p_fw_cons;
81 struct list_head active_descq;
82 struct list_head free_descq;
83 struct list_head posting_descq;
84 struct qed_ll2_rx_packet *descq_array;
85 void __iomem *set_prod_addr;
86};
87
88struct qed_ll2_tx_queue {
89 /* Lock protecting the Tx queue manipulation */
90 spinlock_t lock;
91 struct qed_chain txq_chain;
92 u8 tx_sb_index;
93 bool b_cb_registred;
94 __le16 *p_fw_cons;
95 struct list_head active_descq;
96 struct list_head free_descq;
97 struct list_head sending_descq;
98 struct qed_ll2_tx_packet *descq_array;
99 struct qed_ll2_tx_packet *cur_send_packet;
100 struct qed_ll2_tx_packet cur_completing_packet;
101 u16 cur_completing_bd_idx;
102 void __iomem *doorbell_addr;
103 u16 bds_idx;
104 u16 cur_send_frag_num;
105 u16 cur_completing_frag_num;
106 bool b_completing_packet;
107};
108
109struct qed_ll2_info {
110 /* Lock protecting the state of LL2 */
111 struct mutex mutex;
112 enum qed_ll2_conn_type conn_type;
113 u32 cid;
114 u8 my_id;
115 u8 queue_id;
116 u8 tx_stats_id;
117 bool b_active;
118 u16 mtu;
119 u8 rx_drop_ttl0_flg;
120 u8 rx_vlan_removal_en;
121 u8 tx_tc;
122 enum core_tx_dest tx_dest;
123 enum core_error_handle ai_err_packet_too_big;
124 enum core_error_handle ai_err_no_buf;
125 u8 tx_stats_en;
126 struct qed_ll2_rx_queue rx_queue;
127 struct qed_ll2_tx_queue tx_queue;
128 u8 gsi_enable;
129};
130
131/**
132 * @brief qed_ll2_acquire_connection - allocate resources,
133 * starts rx & tx (if relevant) queues pair. Provides
134 * connecion handler as output parameter.
135 *
136 * @param p_hwfn
137 * @param p_params Contain various configuration properties
138 * @param rx_num_desc
139 * @param tx_num_desc
140 *
141 * @param p_connection_handle Output container for LL2 connection's handle
142 *
143 * @return 0 on success, failure otherwise
144 */
145int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
146 struct qed_ll2_info *p_params,
147 u16 rx_num_desc,
148 u16 tx_num_desc,
149 u8 *p_connection_handle);
150
151/**
152 * @brief qed_ll2_establish_connection - start previously
153 * allocated LL2 queues pair
154 *
155 * @param p_hwfn
156 * @param p_ptt
157 * @param connection_handle LL2 connection's handle obtained from
158 * qed_ll2_require_connection
159 *
160 * @return 0 on success, failure otherwise
161 */
162int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
163
164/**
165 * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
166 *
167 * @param p_hwfn
168 * @param connection_handle LL2 connection's handle obtained from
169 * qed_ll2_require_connection
170 * @param addr rx (physical address) buffers to submit
171 * @param cookie
172 * @param notify_fw produce corresponding Rx BD immediately
173 *
174 * @return 0 on success, failure otherwise
175 */
176int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn,
177 u8 connection_handle,
178 dma_addr_t addr,
179 u16 buf_len, void *cookie, u8 notify_fw);
180
181/**
182 * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
183 * to prepare Tx packet submission to FW.
184 *
185 * @param p_hwfn
186 * @param connection_handle LL2 connection's handle obtained from
187 * qed_ll2_require_connection
188 * @param num_of_bds a number of requested BD equals a number of
189 * fragments in Tx packet
190 * @param vlan VLAN to insert to packet (if insertion set)
191 * @param bd_flags
192 * @param l4_hdr_offset_w L4 Header Offset from start of packet
193 * (in words). This is needed if both l4_csum
194 * and ipv6_ext are set
195 * @param first_frag
196 * @param first_frag_len
197 * @param cookie
198 *
199 * @param notify_fw
200 *
201 * @return 0 on success, failure otherwise
202 */
203int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
204 u8 connection_handle,
205 u8 num_of_bds,
206 u16 vlan,
207 u8 bd_flags,
208 u16 l4_hdr_offset_w,
209 enum qed_ll2_roce_flavor_type qed_roce_flavor,
210 dma_addr_t first_frag,
211 u16 first_frag_len, void *cookie, u8 notify_fw);
212
213/**
214 * @brief qed_ll2_release_connection - releases resources
215 * allocated for LL2 connection
216 *
217 * @param p_hwfn
218 * @param connection_handle LL2 connection's handle obtained from
219 * qed_ll2_require_connection
220 */
221void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
222
223/**
224 * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
225 * Tx BD of BDs requested by
226 * qed_ll2_prepare_tx_packet
227 *
228 * @param p_hwfn
229 * @param connection_handle LL2 connection's handle
230 * obtained from
231 * qed_ll2_require_connection
232 * @param addr
233 * @param nbytes
234 *
235 * @return 0 on success, failure otherwise
236 */
237int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn,
238 u8 connection_handle,
239 dma_addr_t addr, u16 nbytes);
240
241/**
242 * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
243 *
244 *
245 * @param p_hwfn
246 * @param connection_handle LL2 connection's handle
247 * obtained from
248 * qed_ll2_require_connection
249 *
250 * @return 0 on success, failure otherwise
251 */
252int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle);
253
254/**
255 * @brief qed_ll2_get_stats - get LL2 queue's statistics
256 *
257 *
258 * @param p_hwfn
259 * @param connection_handle LL2 connection's handle obtained from
260 * qed_ll2_require_connection
261 * @param p_stats
262 *
263 * @return 0 on success, failure otherwise
264 */
265int qed_ll2_get_stats(struct qed_hwfn *p_hwfn,
266 u8 connection_handle, struct qed_ll2_stats *p_stats);
267
268/**
269 * @brief qed_ll2_alloc - Allocates LL2 connections set
270 *
271 * @param p_hwfn
272 *
273 * @return pointer to alocated qed_ll2_info or NULL
274 */
275struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn);
276
277/**
278 * @brief qed_ll2_setup - Inits LL2 connections set
279 *
280 * @param p_hwfn
281 * @param p_ll2_connections
282 *
283 */
284void qed_ll2_setup(struct qed_hwfn *p_hwfn,
285 struct qed_ll2_info *p_ll2_connections);
286
287/**
288 * @brief qed_ll2_free - Releases LL2 connections set
289 *
290 * @param p_hwfn
291 * @param p_ll2_connections
292 *
293 */
294void qed_ll2_free(struct qed_hwfn *p_hwfn,
295 struct qed_ll2_info *p_ll2_connections);
296void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
297 u8 connection_handle,
298 void *cookie,
299 dma_addr_t rx_buf_addr,
300 u16 data_length,
301 u8 data_length_error,
302 u16 parse_flags,
303 u16 vlan,
304 u32 src_mac_addr_hi,
305 u16 src_mac_addr_lo, bool b_last_packet);
306void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
307 u8 connection_handle,
308 void *cookie,
309 dma_addr_t first_frag_addr,
310 bool b_last_fragment, bool b_last_packet);
311void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
312 u8 connection_handle,
313 void *cookie,
314 dma_addr_t first_frag_addr,
315 bool b_last_fragment, bool b_last_packet);
316#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b730a632c383..4ee3151e80c2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,15 +22,22 @@
22#include <linux/etherdevice.h> 22#include <linux/etherdevice.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/qed/qed_if.h> 24#include <linux/qed/qed_if.h>
25#include <linux/qed/qed_ll2_if.h>
25 26
26#include "qed.h" 27#include "qed.h"
27#include "qed_sriov.h" 28#include "qed_sriov.h"
28#include "qed_sp.h" 29#include "qed_sp.h"
29#include "qed_dev_api.h" 30#include "qed_dev_api.h"
31#include "qed_ll2.h"
30#include "qed_mcp.h" 32#include "qed_mcp.h"
31#include "qed_hw.h" 33#include "qed_hw.h"
32#include "qed_selftest.h" 34#include "qed_selftest.h"
33 35
36#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
37#define QED_ROCE_QPS (8192)
38#define QED_ROCE_DPIS (8)
39#endif
40
34static char version[] = 41static char version[] =
35 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 42 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
36 43
@@ -204,8 +211,8 @@ int qed_fill_dev_info(struct qed_dev *cdev,
204 dev_info->pci_mem_start = cdev->pci_params.mem_start; 211 dev_info->pci_mem_start = cdev->pci_params.mem_start;
205 dev_info->pci_mem_end = cdev->pci_params.mem_end; 212 dev_info->pci_mem_end = cdev->pci_params.mem_end;
206 dev_info->pci_irq = cdev->pci_params.irq; 213 dev_info->pci_irq = cdev->pci_params.irq;
207 dev_info->rdma_supported = 214 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
208 (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); 215 QED_PCI_ETH_ROCE);
209 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 216 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
210 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 217 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
211 218
@@ -608,7 +615,16 @@ static int qed_nic_reset(struct qed_dev *cdev)
608 615
609static int qed_nic_setup(struct qed_dev *cdev) 616static int qed_nic_setup(struct qed_dev *cdev)
610{ 617{
611 int rc; 618 int rc, i;
619
620 /* Determine if interface is going to require LL2 */
621 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
622 for (i = 0; i < cdev->num_hwfns; i++) {
623 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
624
625 p_hwfn->using_ll2 = true;
626 }
627 }
612 628
613 rc = qed_resc_alloc(cdev); 629 rc = qed_resc_alloc(cdev);
614 if (rc) 630 if (rc)
@@ -666,6 +682,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
666 enum qed_int_mode int_mode) 682 enum qed_int_mode int_mode)
667{ 683{
668 struct qed_sb_cnt_info sb_cnt_info; 684 struct qed_sb_cnt_info sb_cnt_info;
685#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
686 int num_l2_queues;
687#endif
669 int rc; 688 int rc;
670 int i; 689 int i;
671 690
@@ -696,6 +715,31 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
696 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 715 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
697 cdev->num_hwfns; 716 cdev->num_hwfns;
698 717
718#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
719 num_l2_queues = 0;
720 for_each_hwfn(cdev, i)
721 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
722
723 DP_VERBOSE(cdev, QED_MSG_RDMA,
724 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
725 cdev->int_params.fp_msix_cnt, num_l2_queues);
726
727 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
728 cdev->int_params.rdma_msix_cnt =
729 (cdev->int_params.fp_msix_cnt - num_l2_queues)
730 / cdev->num_hwfns;
731 cdev->int_params.rdma_msix_base =
732 cdev->int_params.fp_msix_base + num_l2_queues;
733 cdev->int_params.fp_msix_cnt = num_l2_queues;
734 } else {
735 cdev->int_params.rdma_msix_cnt = 0;
736 }
737
738 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
739 cdev->int_params.rdma_msix_cnt,
740 cdev->int_params.rdma_msix_base);
741#endif
742
699 return 0; 743 return 0;
700} 744}
701 745
@@ -799,6 +843,13 @@ static void qed_update_pf_params(struct qed_dev *cdev,
799{ 843{
800 int i; 844 int i;
801 845
846#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
847 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
848 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
849 /* divide by 3 the MRs to avoid MF ILT overflow */
850 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
851 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
852#endif
802 for (i = 0; i < cdev->num_hwfns; i++) { 853 for (i = 0; i < cdev->num_hwfns; i++) {
803 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 854 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
804 855
@@ -873,6 +924,12 @@ static int qed_slowpath_start(struct qed_dev *cdev,
873 DP_INFO(cdev, 924 DP_INFO(cdev,
874 "HW initialization and function start completed successfully\n"); 925 "HW initialization and function start completed successfully\n");
875 926
927 /* Allocate LL2 interface if needed */
928 if (QED_LEADING_HWFN(cdev)->using_ll2) {
929 rc = qed_ll2_alloc_if(cdev);
930 if (rc)
931 goto err3;
932 }
876 if (IS_PF(cdev)) { 933 if (IS_PF(cdev)) {
877 hwfn = QED_LEADING_HWFN(cdev); 934 hwfn = QED_LEADING_HWFN(cdev);
878 drv_version.version = (params->drv_major << 24) | 935 drv_version.version = (params->drv_major << 24) |
@@ -893,6 +950,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
893 950
894 return 0; 951 return 0;
895 952
953err3:
954 qed_hw_stop(cdev);
896err2: 955err2:
897 qed_hw_timers_stop_all(cdev); 956 qed_hw_timers_stop_all(cdev);
898 if (IS_PF(cdev)) 957 if (IS_PF(cdev))
@@ -915,6 +974,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
915 if (!cdev) 974 if (!cdev)
916 return -ENODEV; 975 return -ENODEV;
917 976
977 qed_ll2_dealloc_if(cdev);
978
918 if (IS_PF(cdev)) { 979 if (IS_PF(cdev)) {
919 qed_free_stream_mem(cdev); 980 qed_free_stream_mem(cdev);
920 if (IS_QED_ETH_IF(cdev)) 981 if (IS_QED_ETH_IF(cdev))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 759cb04e02b0..b414a0542177 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -208,6 +208,26 @@
208 0x50196cUL 208 0x50196cUL
209#define NIG_REG_LLH_CLS_TYPE_DUALMODE \ 209#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
210 0x501964UL 210 0x501964UL
211#define NIG_REG_LLH_FUNC_FILTER_VALUE \
212 0x501a00UL
213#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
214 32
215#define NIG_REG_LLH_FUNC_FILTER_EN \
216 0x501a80UL
217#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
218 16
219#define NIG_REG_LLH_FUNC_FILTER_MODE \
220 0x501ac0UL
221#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
222 16
223#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
224 0x501b00UL
225#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
226 16
227#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
228 0x501b40UL
229#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
230 16
211#define NCSI_REG_CONFIG \ 231#define NCSI_REG_CONFIG \
212 0x040200UL 232 0x040200UL
213#define PBF_REG_INIT \ 233#define PBF_REG_INIT \
@@ -264,6 +284,8 @@
264 0x1f0a1cUL 284 0x1f0a1cUL
265#define PRS_REG_ROCE_DEST_QP_MAX_PF \ 285#define PRS_REG_ROCE_DEST_QP_MAX_PF \
266 0x1f0430UL 286 0x1f0430UL
287#define PRS_REG_USE_LIGHT_L2 \
288 0x1f096cUL
267#define PSDM_REG_ENABLE_IN1 \ 289#define PSDM_REG_ENABLE_IN1 \
268 0xfa0004UL 290 0xfa0004UL
269#define PSEM_REG_ENABLE_IN \ 291#define PSEM_REG_ENABLE_IN \
@@ -1426,5 +1448,11 @@
1426 0x620000UL 1448 0x620000UL
1427#define PHY_PCIE_REG_PHY1 \ 1449#define PHY_PCIE_REG_PHY1 \
1428 0x624000UL 1450 0x624000UL
1429 1451#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
1452#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
1453#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
1454#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
1455#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
1456#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
1457#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
1430#endif 1458#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000000..23430059471c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,2954 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/types.h>
33#include <asm/byteorder.h>
34#include <linux/bitops.h>
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37#include <linux/errno.h>
38#include <linux/etherdevice.h>
39#include <linux/if_ether.h>
40#include <linux/if_vlan.h>
41#include <linux/io.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/kernel.h>
45#include <linux/list.h>
46#include <linux/module.h>
47#include <linux/mutex.h>
48#include <linux/pci.h>
49#include <linux/slab.h>
50#include <linux/spinlock.h>
51#include <linux/string.h>
52#include <linux/tcp.h>
53#include <linux/bitops.h>
54#include <linux/qed/qed_roce_if.h>
55#include <linux/qed/qed_roce_if.h>
56#include "qed.h"
57#include "qed_cxt.h"
58#include "qed_hsi.h"
59#include "qed_hw.h"
60#include "qed_init_ops.h"
61#include "qed_int.h"
62#include "qed_ll2.h"
63#include "qed_mcp.h"
64#include "qed_reg_addr.h"
65#include "qed_sp.h"
66#include "qed_roce.h"
67#include "qed_ll2.h"
68
69void qed_async_roce_event(struct qed_hwfn *p_hwfn,
70 struct event_ring_entry *p_eqe)
71{
72 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
73
74 p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
75 p_eqe->opcode, &p_eqe->data);
76}
77
78static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
79 struct qed_bmap *bmap, u32 max_count)
80{
81 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
82
83 bmap->max_count = max_count;
84
85 bmap->bitmap = kzalloc(BITS_TO_LONGS(max_count) * sizeof(long),
86 GFP_KERNEL);
87 if (!bmap->bitmap) {
88 DP_NOTICE(p_hwfn,
89 "qed bmap alloc failed: cannot allocate memory (bitmap)\n");
90 return -ENOMEM;
91 }
92
93 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocated bitmap %p\n",
94 bmap->bitmap);
95 return 0;
96}
97
98static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
99 struct qed_bmap *bmap, u32 *id_num)
100{
101 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "bmap = %p\n", bmap);
102
103 *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
104
105 if (*id_num >= bmap->max_count) {
106 DP_NOTICE(p_hwfn, "no id available max_count=%d\n",
107 bmap->max_count);
108 return -EINVAL;
109 }
110
111 __set_bit(*id_num, bmap->bitmap);
112
113 return 0;
114}
115
116static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
117 struct qed_bmap *bmap, u32 id_num)
118{
119 bool b_acquired;
120
121 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "id_num = %08x", id_num);
122 if (id_num >= bmap->max_count)
123 return;
124
125 b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
126 if (!b_acquired) {
127 DP_NOTICE(p_hwfn, "ID %d already released\n", id_num);
128 return;
129 }
130}
131
132u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
133{
134 /* First sb id for RoCE is after all the l2 sb */
135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
136}
137
138u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
139{
140 return QED_CAU_DEF_RX_TIMER_RES;
141}
142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
144 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params)
146{
147 struct qed_rdma_info *p_rdma_info;
148 u32 num_cons, num_tasks;
149 int rc = -ENOMEM;
150
151 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
152
153 /* Allocate a struct with current pf rdma info */
154 p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
155 if (!p_rdma_info) {
156 DP_NOTICE(p_hwfn,
157 "qed rdma alloc failed: cannot allocate memory (rdma info). rc = %d\n",
158 rc);
159 return rc;
160 }
161
162 p_hwfn->p_rdma_info = p_rdma_info;
163 p_rdma_info->proto = PROTOCOLID_ROCE;
164
165 num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, 0);
166
167 p_rdma_info->num_qps = num_cons / 2;
168
169 num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
170
171 /* Each MR uses a single task */
172 p_rdma_info->num_mrs = num_tasks;
173
174 /* Queue zone lines are shared between RoCE and L2 in such a way that
175 * they can be used by each without obstructing the other.
176 */
177 p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
178
179 /* Allocate a struct with device params and fill it */
180 p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
181 if (!p_rdma_info->dev) {
182 DP_NOTICE(p_hwfn,
183 "qed rdma alloc failed: cannot allocate memory (rdma info dev). rc = %d\n",
184 rc);
185 goto free_rdma_info;
186 }
187
188 /* Allocate a struct with port params and fill it */
189 p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
190 if (!p_rdma_info->port) {
191 DP_NOTICE(p_hwfn,
192 "qed rdma alloc failed: cannot allocate memory (rdma info port). rc = %d\n",
193 rc);
194 goto free_rdma_dev;
195 }
196
197 /* Allocate bit map for pd's */
198 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS);
199 if (rc) {
200 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
201 "Failed to allocate pd_map, rc = %d\n",
202 rc);
203 goto free_rdma_port;
204 }
205
206 /* Allocate DPI bitmap */
207 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
208 p_hwfn->dpi_count);
209 if (rc) {
210 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
211 "Failed to allocate DPI bitmap, rc = %d\n", rc);
212 goto free_pd_map;
213 }
214
215 /* Allocate bitmap for cq's. The maximum number of CQs is bounded to
216 * twice the number of QPs.
217 */
218 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map,
219 p_rdma_info->num_qps * 2);
220 if (rc) {
221 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
222 "Failed to allocate cq bitmap, rc = %d\n", rc);
223 goto free_dpi_map;
224 }
225
226 /* Allocate bitmap for toggle bit for cq icids
227 * We toggle the bit every time we create or resize cq for a given icid.
228 * The maximum number of CQs is bounded to twice the number of QPs.
229 */
230 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
231 p_rdma_info->num_qps * 2);
232 if (rc) {
233 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
234 "Failed to allocate toogle bits, rc = %d\n", rc);
235 goto free_cq_map;
236 }
237
238 /* Allocate bitmap for itids */
239 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
240 p_rdma_info->num_mrs);
241 if (rc) {
242 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
243 "Failed to allocate itids bitmaps, rc = %d\n", rc);
244 goto free_toggle_map;
245 }
246
247 /* Allocate bitmap for cids used for qps. */
248 rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons);
249 if (rc) {
250 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
251 "Failed to allocate cid bitmap, rc = %d\n", rc);
252 goto free_tid_map;
253 }
254
255 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
256 return 0;
257
258free_tid_map:
259 kfree(p_rdma_info->tid_map.bitmap);
260free_toggle_map:
261 kfree(p_rdma_info->toggle_bits.bitmap);
262free_cq_map:
263 kfree(p_rdma_info->cq_map.bitmap);
264free_dpi_map:
265 kfree(p_rdma_info->dpi_map.bitmap);
266free_pd_map:
267 kfree(p_rdma_info->pd_map.bitmap);
268free_rdma_port:
269 kfree(p_rdma_info->port);
270free_rdma_dev:
271 kfree(p_rdma_info->dev);
272free_rdma_info:
273 kfree(p_rdma_info);
274
275 return rc;
276}
277
278void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
279{
280 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
281
282 kfree(p_rdma_info->cid_map.bitmap);
283 kfree(p_rdma_info->tid_map.bitmap);
284 kfree(p_rdma_info->toggle_bits.bitmap);
285 kfree(p_rdma_info->cq_map.bitmap);
286 kfree(p_rdma_info->dpi_map.bitmap);
287 kfree(p_rdma_info->pd_map.bitmap);
288
289 kfree(p_rdma_info->port);
290 kfree(p_rdma_info->dev);
291
292 kfree(p_rdma_info);
293}
294
295static void qed_rdma_free(struct qed_hwfn *p_hwfn)
296{
297 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
298
299 qed_rdma_resc_free(p_hwfn);
300}
301
302static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
303{
304 guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
305 guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
306 guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
307 guid[3] = 0xff;
308 guid[4] = 0xfe;
309 guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
310 guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
311 guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
312}
313
314static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
315 struct qed_rdma_start_in_params *params)
316{
317 struct qed_rdma_events *events;
318
319 events = &p_hwfn->p_rdma_info->events;
320
321 events->unaffiliated_event = params->events->unaffiliated_event;
322 events->affiliated_event = params->events->affiliated_event;
323 events->context = params->events->context;
324}
325
326static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
327 struct qed_rdma_start_in_params *params)
328{
329 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
330 struct qed_dev *cdev = p_hwfn->cdev;
331 u32 pci_status_control;
332 u32 num_qps;
333
334 /* Vendor specific information */
335 dev->vendor_id = cdev->vendor_id;
336 dev->vendor_part_id = cdev->device_id;
337 dev->hw_ver = 0;
338 dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
339 (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
340
341 qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
342 dev->node_guid = dev->sys_image_guid;
343
344 dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
345 RDMA_MAX_SGE_PER_RQ_WQE);
346
347 if (cdev->rdma_max_sge)
348 dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
349
350 dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
351
352 dev->max_inline = (cdev->rdma_max_inline) ?
353 min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
354 dev->max_inline;
355
356 dev->max_wqe = QED_RDMA_MAX_WQE;
357 dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
358
359 /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
360 * it is up-aligned to 16 and then to ILT page size within qed cxt.
361 * This is OK in terms of ILT but we don't want to configure the FW
362 * above its abilities
363 */
364 num_qps = ROCE_MAX_QPS;
365 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
366 dev->max_qp = num_qps;
367
368 /* CQs uses the same icids that QPs use hence they are limited by the
369 * number of icids. There are two icids per QP.
370 */
371 dev->max_cq = num_qps * 2;
372
373 /* The number of mrs is smaller by 1 since the first is reserved */
374 dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
375 dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
376
377 /* The maximum CQE capacity per CQ supported.
378 * max number of cqes will be in two layer pbl,
379 * 8 is the pointer size in bytes
380 * 32 is the size of cq element in bytes
381 */
382 if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
383 dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
384 else
385 dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
386
387 dev->max_mw = 0;
388 dev->max_fmr = QED_RDMA_MAX_FMR;
389 dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
390 dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
391 dev->max_pkey = QED_RDMA_MAX_P_KEY;
392
393 dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
394 (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
395 dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
396 RDMA_REQ_RD_ATOMIC_ELM_SIZE;
397 dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
398 p_hwfn->p_rdma_info->num_qps;
399 dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
400 dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
401 dev->max_pd = RDMA_MAX_PDS;
402 dev->max_ah = p_hwfn->p_rdma_info->num_qps;
403 dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
404
405 /* Set capablities */
406 dev->dev_caps = 0;
407 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
408 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
409 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
410 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
411 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
412 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
413 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
414 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
415
416 /* Check atomic operations support in PCI configuration space. */
417 pci_read_config_dword(cdev->pdev,
418 cdev->pdev->pcie_cap + PCI_EXP_DEVCTL2,
419 &pci_status_control);
420
421 if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
422 SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
423}
424
425static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
426{
427 struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
428 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
429
430 port->port_state = p_hwfn->mcp_info->link_output.link_up ?
431 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
432
433 port->max_msg_size = min_t(u64,
434 (dev->max_mr_mw_fmr_size *
435 p_hwfn->cdev->rdma_max_sge),
436 BIT(31));
437
438 port->pkey_bad_counter = 0;
439}
440
441static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
442{
443 u32 ll2_ethertype_en;
444
445 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
446 p_hwfn->b_rdma_enabled_in_prs = false;
447
448 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
449
450 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
451
452 /* We delay writing to this reg until first cid is allocated. See
453 * qed_cxt_dynamic_ilt_alloc function for more details
454 */
455 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
456 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
457 (ll2_ethertype_en | 0x01));
458
459 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
460 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
461 return -EINVAL;
462 }
463
464 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
465 return 0;
466}
467
468static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
469 struct qed_rdma_start_in_params *params,
470 struct qed_ptt *p_ptt)
471{
472 struct rdma_init_func_ramrod_data *p_ramrod;
473 struct qed_rdma_cnq_params *p_cnq_pbl_list;
474 struct rdma_init_func_hdr *p_params_header;
475 struct rdma_cnq_params *p_cnq_params;
476 struct qed_sp_init_data init_data;
477 struct qed_spq_entry *p_ent;
478 u32 cnq_id, sb_id;
479 int rc;
480
481 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
482
483 /* Save the number of cnqs for the function close ramrod */
484 p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
485
486 /* Get SPQ entry */
487 memset(&init_data, 0, sizeof(init_data));
488 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
489 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
490
491 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
492 p_hwfn->p_rdma_info->proto, &init_data);
493 if (rc)
494 return rc;
495
496 p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
497
498 p_params_header = &p_ramrod->params_header;
499 p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
500 QED_RDMA_CNQ_RAM);
501 p_params_header->num_cnqs = params->desired_cnq;
502
503 if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
504 p_params_header->cq_ring_mode = 1;
505 else
506 p_params_header->cq_ring_mode = 0;
507
508 for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
509 sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
510 p_cnq_params = &p_ramrod->cnq_params[cnq_id];
511 p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
512 p_cnq_params->sb_num =
513 cpu_to_le16(p_hwfn->sbs_info[sb_id]->igu_sb_id);
514
515 p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
516 p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
517
518 DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
519 p_cnq_pbl_list->pbl_ptr);
520
521 /* we assume here that cnq_id and qz_offset are the same */
522 p_cnq_params->queue_zone_num =
523 cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
524 cnq_id);
525 }
526
527 return qed_spq_post(p_hwfn, p_ent, NULL);
528}
529
530static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
531{
532 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
533
534 /* The first DPI is reserved for the Kernel */
535 __set_bit(0, p_hwfn->p_rdma_info->dpi_map.bitmap);
536
537 /* Tid 0 will be used as the key for "reserved MR".
538 * The driver should allocate memory for it so it can be loaded but no
539 * ramrod should be passed on it.
540 */
541 qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
542 if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
543 DP_NOTICE(p_hwfn,
544 "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
545 return -EINVAL;
546 }
547
548 return 0;
549}
550
551static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
552 struct qed_ptt *p_ptt,
553 struct qed_rdma_start_in_params *params)
554{
555 int rc;
556
557 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
558
559 spin_lock_init(&p_hwfn->p_rdma_info->lock);
560
561 qed_rdma_init_devinfo(p_hwfn, params);
562 qed_rdma_init_port(p_hwfn);
563 qed_rdma_init_events(p_hwfn, params);
564
565 rc = qed_rdma_reserve_lkey(p_hwfn);
566 if (rc)
567 return rc;
568
569 rc = qed_rdma_init_hw(p_hwfn, p_ptt);
570 if (rc)
571 return rc;
572
573 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
574}
575
576int qed_rdma_stop(void *rdma_cxt)
577{
578 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
579 struct rdma_close_func_ramrod_data *p_ramrod;
580 struct qed_sp_init_data init_data;
581 struct qed_spq_entry *p_ent;
582 struct qed_ptt *p_ptt;
583 u32 ll2_ethertype_en;
584 int rc = -EBUSY;
585
586 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
587
588 p_ptt = qed_ptt_acquire(p_hwfn);
589 if (!p_ptt) {
590 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
591 return rc;
592 }
593
594 /* Disable RoCE search */
595 qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
596 p_hwfn->b_rdma_enabled_in_prs = false;
597
598 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
599
600 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
601
602 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
603 (ll2_ethertype_en & 0xFFFE));
604
605 qed_ptt_release(p_hwfn, p_ptt);
606
607 /* Get SPQ entry */
608 memset(&init_data, 0, sizeof(init_data));
609 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
610 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
611
612 /* Stop RoCE */
613 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
614 p_hwfn->p_rdma_info->proto, &init_data);
615 if (rc)
616 goto out;
617
618 p_ramrod = &p_ent->ramrod.rdma_close_func;
619
620 p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
621 p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
622
623 rc = qed_spq_post(p_hwfn, p_ent, NULL);
624
625out:
626 qed_rdma_free(p_hwfn);
627
628 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
629 return rc;
630}
631
632int qed_rdma_add_user(void *rdma_cxt,
633 struct qed_rdma_add_user_out_params *out_params)
634{
635 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
636 u32 dpi_start_offset;
637 u32 returned_id = 0;
638 int rc;
639
640 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
641
642 /* Allocate DPI */
643 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
644 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
645 &returned_id);
646 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
647
648 out_params->dpi = (u16)returned_id;
649
650 /* Calculate the corresponding DPI address */
651 dpi_start_offset = p_hwfn->dpi_start_offset;
652
653 out_params->dpi_addr = (u64)((u8 __iomem *)p_hwfn->doorbells +
654 dpi_start_offset +
655 ((out_params->dpi) * p_hwfn->dpi_size));
656
657 out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
658 dpi_start_offset +
659 ((out_params->dpi) * p_hwfn->dpi_size);
660
661 out_params->dpi_size = p_hwfn->dpi_size;
662
663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
664 return rc;
665}
666
667struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
668{
669 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
670 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
671
672 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
673
674 /* Link may have changed */
675 p_port->port_state = p_hwfn->mcp_info->link_output.link_up ?
676 QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
677
678 p_port->link_speed = p_hwfn->mcp_info->link_output.speed;
679
680 return p_port;
681}
682
683struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
684{
685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
686
687 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
688
689 /* Return struct with device parameters */
690 return p_hwfn->p_rdma_info->dev;
691}
692
693void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
694{
695 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
696
697 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
698
699 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
700 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
701 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
702}
703
704int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
705{
706 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
707 int rc;
708
709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
710
711 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
712 rc = qed_rdma_bmap_alloc_id(p_hwfn,
713 &p_hwfn->p_rdma_info->tid_map, itid);
714 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
715 if (rc)
716 goto out;
717
718 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
719out:
720 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
721 return rc;
722}
723
724void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
725{
726 struct qed_hwfn *p_hwfn;
727 u16 qz_num;
728 u32 addr;
729
730 p_hwfn = (struct qed_hwfn *)rdma_cxt;
731 qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
732 addr = GTT_BAR0_MAP_REG_USDM_RAM +
733 USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
734
735 REG_WR16(p_hwfn, addr, prod);
736
737 /* keep prod updates ordered */
738 wmb();
739}
740
741static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
742 struct qed_dev_rdma_info *info)
743{
744 memset(info, 0, sizeof(*info));
745
746 info->rdma_type = QED_RDMA_TYPE_ROCE;
747
748 qed_fill_dev_info(cdev, &info->common);
749
750 return 0;
751}
752
753static int qed_rdma_get_sb_start(struct qed_dev *cdev)
754{
755 int feat_num;
756
757 if (cdev->num_hwfns > 1)
758 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE);
759 else
760 feat_num = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_PF_L2_QUE) *
761 cdev->num_hwfns;
762
763 return feat_num;
764}
765
766static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
767{
768 int n_cnq = FEAT_NUM(QED_LEADING_HWFN(cdev), QED_RDMA_CNQ);
769 int n_msix = cdev->int_params.rdma_msix_cnt;
770
771 return min_t(int, n_cnq, n_msix);
772}
773
774static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
775{
776 int limit = 0;
777
778 /* Mark the fastpath as free/used */
779 cdev->int_params.fp_initialized = cnt ? true : false;
780
781 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
782 DP_ERR(cdev,
783 "qed roce supports only MSI-X interrupts (detected %d).\n",
784 cdev->int_params.out.int_mode);
785 return -EINVAL;
786 } else if (cdev->int_params.fp_msix_cnt) {
787 limit = cdev->int_params.rdma_msix_cnt;
788 }
789
790 if (!limit)
791 return -ENOMEM;
792
793 return min_t(int, cnt, limit);
794}
795
796static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
797{
798 memset(info, 0, sizeof(*info));
799
800 if (!cdev->int_params.fp_initialized) {
801 DP_INFO(cdev,
802 "Protocol driver requested interrupt information, but its support is not yet configured\n");
803 return -EINVAL;
804 }
805
806 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
807 int msix_base = cdev->int_params.rdma_msix_base;
808
809 info->msix_cnt = cdev->int_params.rdma_msix_cnt;
810 info->msix = &cdev->int_params.msix_table[msix_base];
811
812 DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
813 info->msix_cnt, msix_base);
814 }
815
816 return 0;
817}
818
819int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
820{
821 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
822 u32 returned_id;
823 int rc;
824
825 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
826
827 /* Allocates an unused protection domain */
828 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
829 rc = qed_rdma_bmap_alloc_id(p_hwfn,
830 &p_hwfn->p_rdma_info->pd_map, &returned_id);
831 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
832
833 *pd = (u16)returned_id;
834
835 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
836 return rc;
837}
838
839void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
840{
841 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
842
843 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
844
845 /* Returns a previously allocated protection domain for reuse */
846 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
847 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
848 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
849}
850
851static enum qed_rdma_toggle_bit
852qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
853{
854 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
855 enum qed_rdma_toggle_bit toggle_bit;
856 u32 bmap_id;
857
858 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
859
860 /* the function toggle the bit that is related to a given icid
861 * and returns the new toggle bit's value
862 */
863 bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
864
865 spin_lock_bh(&p_info->lock);
866 toggle_bit = !test_and_change_bit(bmap_id,
867 p_info->toggle_bits.bitmap);
868 spin_unlock_bh(&p_info->lock);
869
870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
871 toggle_bit);
872
873 return toggle_bit;
874}
875
876int qed_rdma_create_cq(void *rdma_cxt,
877 struct qed_rdma_create_cq_in_params *params, u16 *icid)
878{
879 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
880 struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
881 struct rdma_create_cq_ramrod_data *p_ramrod;
882 enum qed_rdma_toggle_bit toggle_bit;
883 struct qed_sp_init_data init_data;
884 struct qed_spq_entry *p_ent;
885 u32 returned_id, start_cid;
886 int rc;
887
888 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
889 params->cq_handle_hi, params->cq_handle_lo);
890
891 /* Allocate icid */
892 spin_lock_bh(&p_info->lock);
893 rc = qed_rdma_bmap_alloc_id(p_hwfn,
894 &p_info->cq_map, &returned_id);
895 spin_unlock_bh(&p_info->lock);
896
897 if (rc) {
898 DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
899 return rc;
900 }
901
902 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
903 p_info->proto);
904 *icid = returned_id + start_cid;
905
906 /* Check if icid requires a page allocation */
907 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
908 if (rc)
909 goto err;
910
911 /* Get SPQ entry */
912 memset(&init_data, 0, sizeof(init_data));
913 init_data.cid = *icid;
914 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
915 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
916
917 /* Send create CQ ramrod */
918 rc = qed_sp_init_request(p_hwfn, &p_ent,
919 RDMA_RAMROD_CREATE_CQ,
920 p_info->proto, &init_data);
921 if (rc)
922 goto err;
923
924 p_ramrod = &p_ent->ramrod.rdma_create_cq;
925
926 p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
927 p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
928 p_ramrod->dpi = cpu_to_le16(params->dpi);
929 p_ramrod->is_two_level_pbl = params->pbl_two_level;
930 p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
931 DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
932 p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
933 p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
934 params->cnq_id;
935 p_ramrod->int_timeout = params->int_timeout;
936
937 /* toggle the bit for every resize or create cq for a given icid */
938 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
939
940 p_ramrod->toggle_bit = toggle_bit;
941
942 rc = qed_spq_post(p_hwfn, p_ent, NULL);
943 if (rc) {
944 /* restore toggle bit */
945 qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
946 goto err;
947 }
948
949 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
950 return rc;
951
952err:
953 /* release allocated icid */
954 qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
955 DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
956
957 return rc;
958}
959
960int qed_rdma_resize_cq(void *rdma_cxt,
961 struct qed_rdma_resize_cq_in_params *in_params,
962 struct qed_rdma_resize_cq_out_params *out_params)
963{
964 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
965 struct rdma_resize_cq_output_params *p_ramrod_res;
966 struct rdma_resize_cq_ramrod_data *p_ramrod;
967 enum qed_rdma_toggle_bit toggle_bit;
968 struct qed_sp_init_data init_data;
969 struct qed_spq_entry *p_ent;
970 dma_addr_t ramrod_res_phys;
971 u8 fw_return_code;
972 int rc = -ENOMEM;
973
974 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
975
976 p_ramrod_res =
977 (struct rdma_resize_cq_output_params *)
978 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
979 sizeof(struct rdma_resize_cq_output_params),
980 &ramrod_res_phys, GFP_KERNEL);
981 if (!p_ramrod_res) {
982 DP_NOTICE(p_hwfn,
983 "qed resize cq failed: cannot allocate memory (ramrod)\n");
984 return rc;
985 }
986
987 /* Get SPQ entry */
988 memset(&init_data, 0, sizeof(init_data));
989 init_data.cid = in_params->icid;
990 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
991 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
992
993 rc = qed_sp_init_request(p_hwfn, &p_ent,
994 RDMA_RAMROD_RESIZE_CQ,
995 p_hwfn->p_rdma_info->proto, &init_data);
996 if (rc)
997 goto err;
998
999 p_ramrod = &p_ent->ramrod.rdma_resize_cq;
1000
1001 p_ramrod->flags = 0;
1002
1003 /* toggle the bit for every resize or create cq for a given icid */
1004 toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn,
1005 in_params->icid);
1006
1007 SET_FIELD(p_ramrod->flags,
1008 RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT, toggle_bit);
1009
1010 SET_FIELD(p_ramrod->flags,
1011 RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL,
1012 in_params->pbl_two_level);
1013
1014 p_ramrod->pbl_log_page_size = in_params->pbl_page_size_log - 12;
1015 p_ramrod->pbl_num_pages = cpu_to_le16(in_params->pbl_num_pages);
1016 p_ramrod->max_cqes = cpu_to_le32(in_params->cq_size);
1017 DMA_REGPAIR_LE(p_ramrod->pbl_addr, in_params->pbl_ptr);
1018 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1019
1020 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
1021 if (rc)
1022 goto err;
1023
1024 if (fw_return_code != RDMA_RETURN_OK) {
1025 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
1026 rc = -EINVAL;
1027 goto err;
1028 }
1029
1030 out_params->prod = le32_to_cpu(p_ramrod_res->old_cq_prod);
1031 out_params->cons = le32_to_cpu(p_ramrod_res->old_cq_cons);
1032
1033 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1034 sizeof(struct rdma_resize_cq_output_params),
1035 p_ramrod_res, ramrod_res_phys);
1036
1037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Resized CQ, rc = %d\n", rc);
1038
1039 return rc;
1040
1041err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1042 sizeof(struct rdma_resize_cq_output_params),
1043 p_ramrod_res, ramrod_res_phys);
1044 DP_NOTICE(p_hwfn, "Resized CQ, Failed - rc = %d\n", rc);
1045
1046 return rc;
1047}
1048
1049int qed_rdma_destroy_cq(void *rdma_cxt,
1050 struct qed_rdma_destroy_cq_in_params *in_params,
1051 struct qed_rdma_destroy_cq_out_params *out_params)
1052{
1053 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1054 struct rdma_destroy_cq_output_params *p_ramrod_res;
1055 struct rdma_destroy_cq_ramrod_data *p_ramrod;
1056 struct qed_sp_init_data init_data;
1057 struct qed_spq_entry *p_ent;
1058 dma_addr_t ramrod_res_phys;
1059 int rc = -ENOMEM;
1060
1061 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
1062
1063 p_ramrod_res =
1064 (struct rdma_destroy_cq_output_params *)
1065 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1066 sizeof(struct rdma_destroy_cq_output_params),
1067 &ramrod_res_phys, GFP_KERNEL);
1068 if (!p_ramrod_res) {
1069 DP_NOTICE(p_hwfn,
1070 "qed destroy cq failed: cannot allocate memory (ramrod)\n");
1071 return rc;
1072 }
1073
1074 /* Get SPQ entry */
1075 memset(&init_data, 0, sizeof(init_data));
1076 init_data.cid = in_params->icid;
1077 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1078 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1079
1080 /* Send destroy CQ ramrod */
1081 rc = qed_sp_init_request(p_hwfn, &p_ent,
1082 RDMA_RAMROD_DESTROY_CQ,
1083 p_hwfn->p_rdma_info->proto, &init_data);
1084 if (rc)
1085 goto err;
1086
1087 p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
1088 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1089
1090 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1091 if (rc)
1092 goto err;
1093
1094 out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
1095
1096 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1097 sizeof(struct rdma_destroy_cq_output_params),
1098 p_ramrod_res, ramrod_res_phys);
1099
1100 /* Free icid */
1101 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1102
1103 qed_bmap_release_id(p_hwfn,
1104 &p_hwfn->p_rdma_info->cq_map,
1105 (in_params->icid -
1106 qed_cxt_get_proto_cid_start(p_hwfn,
1107 p_hwfn->
1108 p_rdma_info->proto)));
1109
1110 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1111
1112 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
1113 return rc;
1114
1115err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1116 sizeof(struct rdma_destroy_cq_output_params),
1117 p_ramrod_res, ramrod_res_phys);
1118
1119 return rc;
1120}
1121
1122static void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
1123{
1124 p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
1125 p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
1126 p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
1127}
1128
1129static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
1130 __le32 *dst_gid)
1131{
1132 u32 i;
1133
1134 if (qp->roce_mode == ROCE_V2_IPV4) {
1135 /* The IPv4 addresses shall be aligned to the highest word.
1136 * The lower words must be zero.
1137 */
1138 memset(src_gid, 0, sizeof(union qed_gid));
1139 memset(dst_gid, 0, sizeof(union qed_gid));
1140 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
1141 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
1142 } else {
1143 /* GIDs and IPv6 addresses coincide in location and size */
1144 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
1145 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
1146 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
1147 }
1148 }
1149}
1150
1151static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
1152{
1153 enum roce_flavor flavor;
1154
1155 switch (roce_mode) {
1156 case ROCE_V1:
1157 flavor = PLAIN_ROCE;
1158 break;
1159 case ROCE_V2_IPV4:
1160 flavor = RROCE_IPV4;
1161 break;
1162 case ROCE_V2_IPV6:
1163 flavor = ROCE_V2_IPV6;
1164 break;
1165 default:
1166 flavor = MAX_ROCE_MODE;
1167 break;
1168 }
1169 return flavor;
1170}
1171
1172int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
1173{
1174 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1175 u32 responder_icid;
1176 u32 requester_icid;
1177 int rc;
1178
1179 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1180 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1181 &responder_icid);
1182 if (rc) {
1183 spin_unlock_bh(&p_rdma_info->lock);
1184 return rc;
1185 }
1186
1187 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
1188 &requester_icid);
1189
1190 spin_unlock_bh(&p_rdma_info->lock);
1191 if (rc)
1192 goto err;
1193
1194 /* the two icid's should be adjacent */
1195 if ((requester_icid - responder_icid) != 1) {
1196 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
1197 rc = -EINVAL;
1198 goto err;
1199 }
1200
1201 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1202 p_rdma_info->proto);
1203 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
1204 p_rdma_info->proto);
1205
1206 /* If these icids require a new ILT line allocate DMA-able context for
1207 * an ILT page
1208 */
1209 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
1210 if (rc)
1211 goto err;
1212
1213 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
1214 if (rc)
1215 goto err;
1216
1217 *cid = (u16)responder_icid;
1218 return rc;
1219
1220err:
1221 spin_lock_bh(&p_rdma_info->lock);
1222 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
1223 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
1224
1225 spin_unlock_bh(&p_rdma_info->lock);
1226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
1227 "Allocate CID - failed, rc = %d\n", rc);
1228 return rc;
1229}
1230
1231static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
1232 struct qed_rdma_qp *qp)
1233{
1234 struct roce_create_qp_resp_ramrod_data *p_ramrod;
1235 struct qed_sp_init_data init_data;
1236 union qed_qm_pq_params qm_params;
1237 enum roce_flavor roce_flavor;
1238 struct qed_spq_entry *p_ent;
1239 u16 physical_queue0 = 0;
1240 int rc;
1241
1242 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1243
1244 /* Allocate DMA-able memory for IRQ */
1245 qp->irq_num_pages = 1;
1246 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1247 RDMA_RING_PAGE_SIZE,
1248 &qp->irq_phys_addr, GFP_KERNEL);
1249 if (!qp->irq) {
1250 rc = -ENOMEM;
1251 DP_NOTICE(p_hwfn,
1252 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
1253 rc);
1254 return rc;
1255 }
1256
1257 /* Get SPQ entry */
1258 memset(&init_data, 0, sizeof(init_data));
1259 init_data.cid = qp->icid;
1260 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1261 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1262
1263 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
1264 PROTOCOLID_ROCE, &init_data);
1265 if (rc)
1266 goto err;
1267
1268 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
1269
1270 p_ramrod->flags = 0;
1271
1272 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1273 SET_FIELD(p_ramrod->flags,
1274 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1275
1276 SET_FIELD(p_ramrod->flags,
1277 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1278 qp->incoming_rdma_read_en);
1279
1280 SET_FIELD(p_ramrod->flags,
1281 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1282 qp->incoming_rdma_write_en);
1283
1284 SET_FIELD(p_ramrod->flags,
1285 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1286 qp->incoming_atomic_en);
1287
1288 SET_FIELD(p_ramrod->flags,
1289 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1290 qp->e2e_flow_control_en);
1291
1292 SET_FIELD(p_ramrod->flags,
1293 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
1294
1295 SET_FIELD(p_ramrod->flags,
1296 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
1297 qp->fmr_and_reserved_lkey);
1298
1299 SET_FIELD(p_ramrod->flags,
1300 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1301 qp->min_rnr_nak_timer);
1302
1303 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1304 p_ramrod->traffic_class = qp->traffic_class_tos;
1305 p_ramrod->hop_limit = qp->hop_limit_ttl;
1306 p_ramrod->irq_num_pages = qp->irq_num_pages;
1307 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1308 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1309 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1310 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1311 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
1312 p_ramrod->pd = cpu_to_le16(qp->pd);
1313 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
1314 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
1315 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
1316 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1317 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1318 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1319 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1320 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1321 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1322 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1323 qp->rq_cq_id);
1324
1325 memset(&qm_params, 0, sizeof(qm_params));
1326 qm_params.roce.qpid = qp->icid >> 1;
1327 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1328
1329 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1330 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1331
1332 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1333 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1334
1335 p_ramrod->udp_src_port = qp->udp_src_port;
1336 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1337 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
1338 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
1339
1340 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1341 qp->stats_queue;
1342
1343 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1344
1345 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
1346 rc, physical_queue0);
1347
1348 if (rc)
1349 goto err;
1350
1351 qp->resp_offloaded = true;
1352
1353 return rc;
1354
1355err:
1356 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
1357 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1358 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1359 qp->irq, qp->irq_phys_addr);
1360
1361 return rc;
1362}
1363
1364static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
1365 struct qed_rdma_qp *qp)
1366{
1367 struct roce_create_qp_req_ramrod_data *p_ramrod;
1368 struct qed_sp_init_data init_data;
1369 union qed_qm_pq_params qm_params;
1370 enum roce_flavor roce_flavor;
1371 struct qed_spq_entry *p_ent;
1372 u16 physical_queue0 = 0;
1373 int rc;
1374
1375 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1376
1377 /* Allocate DMA-able memory for ORQ */
1378 qp->orq_num_pages = 1;
1379 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1380 RDMA_RING_PAGE_SIZE,
1381 &qp->orq_phys_addr, GFP_KERNEL);
1382 if (!qp->orq) {
1383 rc = -ENOMEM;
1384 DP_NOTICE(p_hwfn,
1385 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
1386 rc);
1387 return rc;
1388 }
1389
1390 /* Get SPQ entry */
1391 memset(&init_data, 0, sizeof(init_data));
1392 init_data.cid = qp->icid + 1;
1393 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1394 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1395
1396 rc = qed_sp_init_request(p_hwfn, &p_ent,
1397 ROCE_RAMROD_CREATE_QP,
1398 PROTOCOLID_ROCE, &init_data);
1399 if (rc)
1400 goto err;
1401
1402 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
1403
1404 p_ramrod->flags = 0;
1405
1406 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
1407 SET_FIELD(p_ramrod->flags,
1408 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
1409
1410 SET_FIELD(p_ramrod->flags,
1411 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
1412 qp->fmr_and_reserved_lkey);
1413
1414 SET_FIELD(p_ramrod->flags,
1415 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
1416
1417 SET_FIELD(p_ramrod->flags,
1418 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1419
1420 SET_FIELD(p_ramrod->flags,
1421 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1422 qp->rnr_retry_cnt);
1423
1424 p_ramrod->max_ord = qp->max_rd_atomic_req;
1425 p_ramrod->traffic_class = qp->traffic_class_tos;
1426 p_ramrod->hop_limit = qp->hop_limit_ttl;
1427 p_ramrod->orq_num_pages = qp->orq_num_pages;
1428 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1429 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1430 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
1431 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1432 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1433 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
1434 p_ramrod->pd = cpu_to_le16(qp->pd);
1435 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
1436 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
1437 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
1438 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1439 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
1440 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
1441 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
1442 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
1443 p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
1444 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
1445 qp->sq_cq_id);
1446
1447 memset(&qm_params, 0, sizeof(qm_params));
1448 qm_params.roce.qpid = qp->icid >> 1;
1449 physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
1450
1451 p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
1452 p_ramrod->dpi = cpu_to_le16(qp->dpi);
1453
1454 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
1455 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
1456
1457 p_ramrod->udp_src_port = qp->udp_src_port;
1458 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
1459 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
1460 qp->stats_queue;
1461
1462 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1463
1464 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
1465
1466 if (rc)
1467 goto err;
1468
1469 qp->req_offloaded = true;
1470
1471 return rc;
1472
1473err:
1474 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
1475 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1476 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1477 qp->orq, qp->orq_phys_addr);
1478 return rc;
1479}
1480
1481static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
1482 struct qed_rdma_qp *qp,
1483 bool move_to_err, u32 modify_flags)
1484{
1485 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
1486 struct qed_sp_init_data init_data;
1487 struct qed_spq_entry *p_ent;
1488 int rc;
1489
1490 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1491
1492 if (move_to_err && !qp->resp_offloaded)
1493 return 0;
1494
1495 /* Get SPQ entry */
1496 memset(&init_data, 0, sizeof(init_data));
1497 init_data.cid = qp->icid;
1498 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1499 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1500
1501 rc = qed_sp_init_request(p_hwfn, &p_ent,
1502 ROCE_EVENT_MODIFY_QP,
1503 PROTOCOLID_ROCE, &init_data);
1504 if (rc) {
1505 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1506 return rc;
1507 }
1508
1509 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
1510
1511 p_ramrod->flags = 0;
1512
1513 SET_FIELD(p_ramrod->flags,
1514 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1515
1516 SET_FIELD(p_ramrod->flags,
1517 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
1518 qp->incoming_rdma_read_en);
1519
1520 SET_FIELD(p_ramrod->flags,
1521 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
1522 qp->incoming_rdma_write_en);
1523
1524 SET_FIELD(p_ramrod->flags,
1525 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
1526 qp->incoming_atomic_en);
1527
1528 SET_FIELD(p_ramrod->flags,
1529 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
1530 qp->e2e_flow_control_en);
1531
1532 SET_FIELD(p_ramrod->flags,
1533 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
1534 GET_FIELD(modify_flags,
1535 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
1536
1537 SET_FIELD(p_ramrod->flags,
1538 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
1539 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1540
1541 SET_FIELD(p_ramrod->flags,
1542 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1543 GET_FIELD(modify_flags,
1544 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1545
1546 SET_FIELD(p_ramrod->flags,
1547 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
1548 GET_FIELD(modify_flags,
1549 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
1550
1551 SET_FIELD(p_ramrod->flags,
1552 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
1553 GET_FIELD(modify_flags,
1554 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
1555
1556 p_ramrod->fields = 0;
1557 SET_FIELD(p_ramrod->fields,
1558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
1559 qp->min_rnr_nak_timer);
1560
1561 p_ramrod->max_ird = qp->max_rd_atomic_resp;
1562 p_ramrod->traffic_class = qp->traffic_class_tos;
1563 p_ramrod->hop_limit = qp->hop_limit_ttl;
1564 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1565 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1566 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1567 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1568 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1569
1570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
1571 return rc;
1572}
1573
1574static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
1575 struct qed_rdma_qp *qp,
1576 bool move_to_sqd,
1577 bool move_to_err, u32 modify_flags)
1578{
1579 struct roce_modify_qp_req_ramrod_data *p_ramrod;
1580 struct qed_sp_init_data init_data;
1581 struct qed_spq_entry *p_ent;
1582 int rc;
1583
1584 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1585
1586 if (move_to_err && !(qp->req_offloaded))
1587 return 0;
1588
1589 /* Get SPQ entry */
1590 memset(&init_data, 0, sizeof(init_data));
1591 init_data.cid = qp->icid + 1;
1592 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1593 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1594
1595 rc = qed_sp_init_request(p_hwfn, &p_ent,
1596 ROCE_EVENT_MODIFY_QP,
1597 PROTOCOLID_ROCE, &init_data);
1598 if (rc) {
1599 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
1600 return rc;
1601 }
1602
1603 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
1604
1605 p_ramrod->flags = 0;
1606
1607 SET_FIELD(p_ramrod->flags,
1608 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
1609
1610 SET_FIELD(p_ramrod->flags,
1611 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
1612
1613 SET_FIELD(p_ramrod->flags,
1614 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
1615 qp->sqd_async);
1616
1617 SET_FIELD(p_ramrod->flags,
1618 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
1619 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
1620
1621 SET_FIELD(p_ramrod->flags,
1622 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
1623 GET_FIELD(modify_flags,
1624 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
1625
1626 SET_FIELD(p_ramrod->flags,
1627 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
1628 GET_FIELD(modify_flags,
1629 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
1630
1631 SET_FIELD(p_ramrod->flags,
1632 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
1633 GET_FIELD(modify_flags,
1634 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
1635
1636 SET_FIELD(p_ramrod->flags,
1637 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
1638 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
1639
1640 SET_FIELD(p_ramrod->flags,
1641 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
1642 GET_FIELD(modify_flags,
1643 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
1644
1645 p_ramrod->fields = 0;
1646 SET_FIELD(p_ramrod->fields,
1647 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
1648
1649 SET_FIELD(p_ramrod->fields,
1650 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
1651 qp->rnr_retry_cnt);
1652
1653 p_ramrod->max_ord = qp->max_rd_atomic_req;
1654 p_ramrod->traffic_class = qp->traffic_class_tos;
1655 p_ramrod->hop_limit = qp->hop_limit_ttl;
1656 p_ramrod->p_key = cpu_to_le16(qp->pkey);
1657 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
1658 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
1659 p_ramrod->mtu = cpu_to_le16(qp->mtu);
1660 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
1661 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1662
1663 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
1664 return rc;
1665}
1666
1667static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
1668 struct qed_rdma_qp *qp,
1669 u32 *num_invalidated_mw)
1670{
1671 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
1672 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
1673 struct qed_sp_init_data init_data;
1674 struct qed_spq_entry *p_ent;
1675 dma_addr_t ramrod_res_phys;
1676 int rc;
1677
1678 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1679
1680 if (!qp->resp_offloaded)
1681 return 0;
1682
1683 /* Get SPQ entry */
1684 memset(&init_data, 0, sizeof(init_data));
1685 init_data.cid = qp->icid;
1686 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1687 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1688
1689 rc = qed_sp_init_request(p_hwfn, &p_ent,
1690 ROCE_RAMROD_DESTROY_QP,
1691 PROTOCOLID_ROCE, &init_data);
1692 if (rc)
1693 return rc;
1694
1695 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
1696
1697 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
1698 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1699 &ramrod_res_phys, GFP_KERNEL);
1700
1701 if (!p_ramrod_res) {
1702 rc = -ENOMEM;
1703 DP_NOTICE(p_hwfn,
1704 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
1705 rc);
1706 return rc;
1707 }
1708
1709 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1710
1711 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1712 if (rc)
1713 goto err;
1714
1715 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
1716
1717 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
1718 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1719 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
1720 qp->irq, qp->irq_phys_addr);
1721
1722 qp->resp_offloaded = false;
1723
1724 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
1725
1726err:
1727 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1728 sizeof(struct roce_destroy_qp_resp_output_params),
1729 p_ramrod_res, ramrod_res_phys);
1730
1731 return rc;
1732}
1733
1734static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
1735 struct qed_rdma_qp *qp,
1736 u32 *num_bound_mw)
1737{
1738 struct roce_destroy_qp_req_output_params *p_ramrod_res;
1739 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
1740 struct qed_sp_init_data init_data;
1741 struct qed_spq_entry *p_ent;
1742 dma_addr_t ramrod_res_phys;
1743 int rc = -ENOMEM;
1744
1745 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1746
1747 if (!qp->req_offloaded)
1748 return 0;
1749
1750 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
1751 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1752 sizeof(*p_ramrod_res),
1753 &ramrod_res_phys, GFP_KERNEL);
1754 if (!p_ramrod_res) {
1755 DP_NOTICE(p_hwfn,
1756 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
1757 return rc;
1758 }
1759
1760 /* Get SPQ entry */
1761 memset(&init_data, 0, sizeof(init_data));
1762 init_data.cid = qp->icid + 1;
1763 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1764 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1765
1766 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
1767 PROTOCOLID_ROCE, &init_data);
1768 if (rc)
1769 goto err;
1770
1771 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
1772 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
1773
1774 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1775 if (rc)
1776 goto err;
1777
1778 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
1779
1780 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
1781 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
1782 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
1783 qp->orq, qp->orq_phys_addr);
1784
1785 qp->req_offloaded = false;
1786
1787 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
1788
1789err:
1790 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
1791 p_ramrod_res, ramrod_res_phys);
1792
1793 return rc;
1794}
1795
1796int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
1797 struct qed_rdma_qp *qp,
1798 struct qed_rdma_query_qp_out_params *out_params)
1799{
1800 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
1801 struct roce_query_qp_req_output_params *p_req_ramrod_res;
1802 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
1803 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
1804 struct qed_sp_init_data init_data;
1805 dma_addr_t resp_ramrod_res_phys;
1806 dma_addr_t req_ramrod_res_phys;
1807 struct qed_spq_entry *p_ent;
1808 bool rq_err_state;
1809 bool sq_err_state;
1810 bool sq_draining;
1811 int rc = -ENOMEM;
1812
1813 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
1814 /* We can't send ramrod to the fw since this qp wasn't offloaded
1815 * to the fw yet
1816 */
1817 out_params->draining = false;
1818 out_params->rq_psn = qp->rq_psn;
1819 out_params->sq_psn = qp->sq_psn;
1820 out_params->state = qp->cur_state;
1821
1822 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
1823 return 0;
1824 }
1825
1826 if (!(qp->resp_offloaded)) {
1827 DP_NOTICE(p_hwfn,
1828 "The responder's qp should be offloded before requester's\n");
1829 return -EINVAL;
1830 }
1831
1832 /* Send a query responder ramrod to FW to get RQ-PSN and state */
1833 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
1834 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1835 sizeof(*p_resp_ramrod_res),
1836 &resp_ramrod_res_phys, GFP_KERNEL);
1837 if (!p_resp_ramrod_res) {
1838 DP_NOTICE(p_hwfn,
1839 "qed query qp failed: cannot allocate memory (ramrod)\n");
1840 return rc;
1841 }
1842
1843 /* Get SPQ entry */
1844 memset(&init_data, 0, sizeof(init_data));
1845 init_data.cid = qp->icid;
1846 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1847 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1848 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1849 PROTOCOLID_ROCE, &init_data);
1850 if (rc)
1851 goto err_resp;
1852
1853 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
1854 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
1855
1856 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1857 if (rc)
1858 goto err_resp;
1859
1860 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1861 p_resp_ramrod_res, resp_ramrod_res_phys);
1862
1863 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
1864 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
1865 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
1866
1867 if (!(qp->req_offloaded)) {
1868 /* Don't send query qp for the requester */
1869 out_params->sq_psn = qp->sq_psn;
1870 out_params->draining = false;
1871
1872 if (rq_err_state)
1873 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1874
1875 out_params->state = qp->cur_state;
1876
1877 return 0;
1878 }
1879
1880 /* Send a query requester ramrod to FW to get SQ-PSN and state */
1881 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
1882 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1883 sizeof(*p_req_ramrod_res),
1884 &req_ramrod_res_phys,
1885 GFP_KERNEL);
1886 if (!p_req_ramrod_res) {
1887 rc = -ENOMEM;
1888 DP_NOTICE(p_hwfn,
1889 "qed query qp failed: cannot allocate memory (ramrod)\n");
1890 return rc;
1891 }
1892
1893 /* Get SPQ entry */
1894 init_data.cid = qp->icid + 1;
1895 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
1896 PROTOCOLID_ROCE, &init_data);
1897 if (rc)
1898 goto err_req;
1899
1900 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
1901 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
1902
1903 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1904 if (rc)
1905 goto err_req;
1906
1907 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1908 p_req_ramrod_res, req_ramrod_res_phys);
1909
1910 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
1911 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1912 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
1913 sq_draining =
1914 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
1915 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
1916
1917 out_params->draining = false;
1918
1919 if (rq_err_state)
1920 qp->cur_state = QED_ROCE_QP_STATE_ERR;
1921 else if (sq_err_state)
1922 qp->cur_state = QED_ROCE_QP_STATE_SQE;
1923 else if (sq_draining)
1924 out_params->draining = true;
1925 out_params->state = qp->cur_state;
1926
1927 return 0;
1928
1929err_req:
1930 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
1931 p_req_ramrod_res, req_ramrod_res_phys);
1932 return rc;
1933err_resp:
1934 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
1935 p_resp_ramrod_res, resp_ramrod_res_phys);
1936 return rc;
1937}
1938
1939int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1940{
1941 u32 num_invalidated_mw = 0;
1942 u32 num_bound_mw = 0;
1943 u32 start_cid;
1944 int rc;
1945
1946 /* Destroys the specified QP */
1947 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
1948 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
1949 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
1950 DP_NOTICE(p_hwfn,
1951 "QP must be in error, reset or init state before destroying it\n");
1952 return -EINVAL;
1953 }
1954
1955 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, &num_invalidated_mw);
1956 if (rc)
1957 return rc;
1958
1959 /* Send destroy requester ramrod */
1960 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, &num_bound_mw);
1961 if (rc)
1962 return rc;
1963
1964 if (num_invalidated_mw != num_bound_mw) {
1965 DP_NOTICE(p_hwfn,
1966 "number of invalidate memory windows is different from bounded ones\n");
1967 return -EINVAL;
1968 }
1969
1970 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
1971
1972 start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
1973 p_hwfn->p_rdma_info->proto);
1974
1975 /* Release responder's icid */
1976 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1977 qp->icid - start_cid);
1978
1979 /* Release requester's icid */
1980 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map,
1981 qp->icid + 1 - start_cid);
1982
1983 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1984
1985 return 0;
1986}
1987
1988int qed_rdma_query_qp(void *rdma_cxt,
1989 struct qed_rdma_qp *qp,
1990 struct qed_rdma_query_qp_out_params *out_params)
1991{
1992 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1993 int rc;
1994
1995 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
1996
1997 /* The following fields are filled in from qp and not FW as they can't
1998 * be modified by FW
1999 */
2000 out_params->mtu = qp->mtu;
2001 out_params->dest_qp = qp->dest_qp;
2002 out_params->incoming_atomic_en = qp->incoming_atomic_en;
2003 out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
2004 out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
2005 out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
2006 out_params->dgid = qp->dgid;
2007 out_params->flow_label = qp->flow_label;
2008 out_params->hop_limit_ttl = qp->hop_limit_ttl;
2009 out_params->traffic_class_tos = qp->traffic_class_tos;
2010 out_params->timeout = qp->ack_timeout;
2011 out_params->rnr_retry = qp->rnr_retry_cnt;
2012 out_params->retry_cnt = qp->retry_cnt;
2013 out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
2014 out_params->pkey_index = 0;
2015 out_params->max_rd_atomic = qp->max_rd_atomic_req;
2016 out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
2017 out_params->sqd_async = qp->sqd_async;
2018
2019 rc = qed_roce_query_qp(p_hwfn, qp, out_params);
2020
2021 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
2022 return rc;
2023}
2024
2025int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2026{
2027 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2028 int rc = 0;
2029
2030 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
2031
2032 rc = qed_roce_destroy_qp(p_hwfn, qp);
2033
2034 /* free qp params struct */
2035 kfree(qp);
2036
2037 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
2038 return rc;
2039}
2040
2041struct qed_rdma_qp *
2042qed_rdma_create_qp(void *rdma_cxt,
2043 struct qed_rdma_create_qp_in_params *in_params,
2044 struct qed_rdma_create_qp_out_params *out_params)
2045{
2046 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2047 struct qed_rdma_qp *qp;
2048 u8 max_stats_queues;
2049 int rc;
2050
2051 if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
2052 DP_ERR(p_hwfn->cdev,
2053 "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
2054 rdma_cxt, in_params, out_params);
2055 return NULL;
2056 }
2057
2058 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2059 "qed rdma create qp called with qp_handle = %08x%08x\n",
2060 in_params->qp_handle_hi, in_params->qp_handle_lo);
2061
2062 /* Some sanity checks... */
2063 max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
2064 if (in_params->stats_queue >= max_stats_queues) {
2065 DP_ERR(p_hwfn->cdev,
2066 "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
2067 in_params->stats_queue, max_stats_queues);
2068 return NULL;
2069 }
2070
2071 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
2072 if (!qp) {
2073 DP_NOTICE(p_hwfn, "Failed to allocate qed_rdma_qp\n");
2074 return NULL;
2075 }
2076
2077 rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
2078 qp->qpid = ((0xFF << 16) | qp->icid);
2079
2080 DP_INFO(p_hwfn, "ROCE qpid=%x\n", qp->qpid);
2081
2082 if (rc) {
2083 kfree(qp);
2084 return NULL;
2085 }
2086
2087 qp->cur_state = QED_ROCE_QP_STATE_RESET;
2088 qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
2089 qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
2090 qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
2091 qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
2092 qp->use_srq = in_params->use_srq;
2093 qp->signal_all = in_params->signal_all;
2094 qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
2095 qp->pd = in_params->pd;
2096 qp->dpi = in_params->dpi;
2097 qp->sq_cq_id = in_params->sq_cq_id;
2098 qp->sq_num_pages = in_params->sq_num_pages;
2099 qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
2100 qp->rq_cq_id = in_params->rq_cq_id;
2101 qp->rq_num_pages = in_params->rq_num_pages;
2102 qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
2103 qp->srq_id = in_params->srq_id;
2104 qp->req_offloaded = false;
2105 qp->resp_offloaded = false;
2106 qp->e2e_flow_control_en = qp->use_srq ? false : true;
2107 qp->stats_queue = in_params->stats_queue;
2108
2109 out_params->icid = qp->icid;
2110 out_params->qp_id = qp->qpid;
2111
2112 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
2113 return qp;
2114}
2115
2116static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2117 struct qed_rdma_qp *qp,
2118 enum qed_roce_qp_state prev_state,
2119 struct qed_rdma_modify_qp_in_params *params)
2120{
2121 u32 num_invalidated_mw = 0, num_bound_mw = 0;
2122 int rc = 0;
2123
2124 /* Perform additional operations according to the current state and the
2125 * next state
2126 */
2127 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
2128 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
2129 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
2130 /* Init->RTR or Reset->RTR */
2131 rc = qed_roce_sp_create_responder(p_hwfn, qp);
2132 return rc;
2133 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
2134 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2135 /* RTR-> RTS */
2136 rc = qed_roce_sp_create_requester(p_hwfn, qp);
2137 if (rc)
2138 return rc;
2139
2140 /* Send modify responder ramrod */
2141 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2142 params->modify_flags);
2143 return rc;
2144 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2145 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2146 /* RTS->RTS */
2147 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2148 params->modify_flags);
2149 if (rc)
2150 return rc;
2151
2152 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2153 params->modify_flags);
2154 return rc;
2155 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
2156 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2157 /* RTS->SQD */
2158 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
2159 params->modify_flags);
2160 return rc;
2161 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2162 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
2163 /* SQD->SQD */
2164 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2165 params->modify_flags);
2166 if (rc)
2167 return rc;
2168
2169 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2170 params->modify_flags);
2171 return rc;
2172 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
2173 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
2174 /* SQD->RTS */
2175 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
2176 params->modify_flags);
2177 if (rc)
2178 return rc;
2179
2180 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
2181 params->modify_flags);
2182
2183 return rc;
2184 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR ||
2185 qp->cur_state == QED_ROCE_QP_STATE_SQE) {
2186 /* ->ERR */
2187 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
2188 params->modify_flags);
2189 if (rc)
2190 return rc;
2191
2192 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
2193 params->modify_flags);
2194 return rc;
2195 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
2196 /* Any state -> RESET */
2197
2198 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
2199 &num_invalidated_mw);
2200 if (rc)
2201 return rc;
2202
2203 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
2204 &num_bound_mw);
2205
2206 if (num_invalidated_mw != num_bound_mw) {
2207 DP_NOTICE(p_hwfn,
2208 "number of invalidate memory windows is different from bounded ones\n");
2209 return -EINVAL;
2210 }
2211 } else {
2212 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
2213 }
2214
2215 return rc;
2216}
2217
2218int qed_rdma_modify_qp(void *rdma_cxt,
2219 struct qed_rdma_qp *qp,
2220 struct qed_rdma_modify_qp_in_params *params)
2221{
2222 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2223 enum qed_roce_qp_state prev_state;
2224 int rc = 0;
2225
2226 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
2227 qp->icid, params->new_state);
2228
2229 if (rc) {
2230 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2231 return rc;
2232 }
2233
2234 if (GET_FIELD(params->modify_flags,
2235 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
2236 qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
2237 qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
2238 qp->incoming_atomic_en = params->incoming_atomic_en;
2239 }
2240
2241 /* Update QP structure with the updated values */
2242 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
2243 qp->roce_mode = params->roce_mode;
2244 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
2245 qp->pkey = params->pkey;
2246 if (GET_FIELD(params->modify_flags,
2247 QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
2248 qp->e2e_flow_control_en = params->e2e_flow_control_en;
2249 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
2250 qp->dest_qp = params->dest_qp;
2251 if (GET_FIELD(params->modify_flags,
2252 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
2253 /* Indicates that the following parameters have changed:
2254 * Traffic class, flow label, hop limit, source GID,
2255 * destination GID, loopback indicator
2256 */
2257 qp->traffic_class_tos = params->traffic_class_tos;
2258 qp->flow_label = params->flow_label;
2259 qp->hop_limit_ttl = params->hop_limit_ttl;
2260
2261 qp->sgid = params->sgid;
2262 qp->dgid = params->dgid;
2263 qp->udp_src_port = 0;
2264 qp->vlan_id = params->vlan_id;
2265 qp->mtu = params->mtu;
2266 qp->lb_indication = params->lb_indication;
2267 memcpy((u8 *)&qp->remote_mac_addr[0],
2268 (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
2269 if (params->use_local_mac) {
2270 memcpy((u8 *)&qp->local_mac_addr[0],
2271 (u8 *)&params->local_mac_addr[0], ETH_ALEN);
2272 } else {
2273 memcpy((u8 *)&qp->local_mac_addr[0],
2274 (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
2275 }
2276 }
2277 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
2278 qp->rq_psn = params->rq_psn;
2279 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
2280 qp->sq_psn = params->sq_psn;
2281 if (GET_FIELD(params->modify_flags,
2282 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
2283 qp->max_rd_atomic_req = params->max_rd_atomic_req;
2284 if (GET_FIELD(params->modify_flags,
2285 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
2286 qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
2287 if (GET_FIELD(params->modify_flags,
2288 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
2289 qp->ack_timeout = params->ack_timeout;
2290 if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
2291 qp->retry_cnt = params->retry_cnt;
2292 if (GET_FIELD(params->modify_flags,
2293 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
2294 qp->rnr_retry_cnt = params->rnr_retry_cnt;
2295 if (GET_FIELD(params->modify_flags,
2296 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
2297 qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
2298
2299 qp->sqd_async = params->sqd_async;
2300
2301 prev_state = qp->cur_state;
2302 if (GET_FIELD(params->modify_flags,
2303 QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
2304 qp->cur_state = params->new_state;
2305 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
2306 qp->cur_state);
2307 }
2308
2309 rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
2310
2311 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
2312 return rc;
2313}
2314
2315int qed_rdma_register_tid(void *rdma_cxt,
2316 struct qed_rdma_register_tid_in_params *params)
2317{
2318 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2319 struct rdma_register_tid_ramrod_data *p_ramrod;
2320 struct qed_sp_init_data init_data;
2321 struct qed_spq_entry *p_ent;
2322 enum rdma_tid_type tid_type;
2323 u8 fw_return_code;
2324 int rc;
2325
2326 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
2327
2328 /* Get SPQ entry */
2329 memset(&init_data, 0, sizeof(init_data));
2330 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2331 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2332
2333 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
2334 p_hwfn->p_rdma_info->proto, &init_data);
2335 if (rc) {
2336 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2337 return rc;
2338 }
2339
2340 if (p_hwfn->p_rdma_info->last_tid < params->itid)
2341 p_hwfn->p_rdma_info->last_tid = params->itid;
2342
2343 p_ramrod = &p_ent->ramrod.rdma_register_tid;
2344
2345 p_ramrod->flags = 0;
2346 SET_FIELD(p_ramrod->flags,
2347 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
2348 params->pbl_two_level);
2349
2350 SET_FIELD(p_ramrod->flags,
2351 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
2352
2353 SET_FIELD(p_ramrod->flags,
2354 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
2355
2356 /* Don't initialize D/C field, as it may override other bits. */
2357 if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
2358 SET_FIELD(p_ramrod->flags,
2359 RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
2360 params->page_size_log - 12);
2361
2362 SET_FIELD(p_ramrod->flags,
2363 RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID,
2364 p_hwfn->p_rdma_info->last_tid);
2365
2366 SET_FIELD(p_ramrod->flags,
2367 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
2368 params->remote_read);
2369
2370 SET_FIELD(p_ramrod->flags,
2371 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
2372 params->remote_write);
2373
2374 SET_FIELD(p_ramrod->flags,
2375 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
2376 params->remote_atomic);
2377
2378 SET_FIELD(p_ramrod->flags,
2379 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
2380 params->local_write);
2381
2382 SET_FIELD(p_ramrod->flags,
2383 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
2384
2385 SET_FIELD(p_ramrod->flags,
2386 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
2387 params->mw_bind);
2388
2389 SET_FIELD(p_ramrod->flags1,
2390 RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
2391 params->pbl_page_size_log - 12);
2392
2393 SET_FIELD(p_ramrod->flags2,
2394 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
2395
2396 switch (params->tid_type) {
2397 case QED_RDMA_TID_REGISTERED_MR:
2398 tid_type = RDMA_TID_REGISTERED_MR;
2399 break;
2400 case QED_RDMA_TID_FMR:
2401 tid_type = RDMA_TID_FMR;
2402 break;
2403 case QED_RDMA_TID_MW_TYPE1:
2404 tid_type = RDMA_TID_MW_TYPE1;
2405 break;
2406 case QED_RDMA_TID_MW_TYPE2A:
2407 tid_type = RDMA_TID_MW_TYPE2A;
2408 break;
2409 default:
2410 rc = -EINVAL;
2411 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2412 return rc;
2413 }
2414 SET_FIELD(p_ramrod->flags1,
2415 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
2416
2417 p_ramrod->itid = cpu_to_le32(params->itid);
2418 p_ramrod->key = params->key;
2419 p_ramrod->pd = cpu_to_le16(params->pd);
2420 p_ramrod->length_hi = (u8)(params->length >> 32);
2421 p_ramrod->length_lo = DMA_LO_LE(params->length);
2422 if (params->zbva) {
2423 /* Lower 32 bits of the registered MR address.
2424 * In case of zero based MR, will hold FBO
2425 */
2426 p_ramrod->va.hi = 0;
2427 p_ramrod->va.lo = cpu_to_le32(params->fbo);
2428 } else {
2429 DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
2430 }
2431 DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
2432
2433 /* DIF */
2434 if (params->dif_enabled) {
2435 SET_FIELD(p_ramrod->flags2,
2436 RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
2437 DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
2438 params->dif_error_addr);
2439 DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
2440 }
2441
2442 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2443
2444 if (fw_return_code != RDMA_RETURN_OK) {
2445 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2446 return -EINVAL;
2447 }
2448
2449 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
2450 return rc;
2451}
2452
2453int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2454{
2455 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2456 struct rdma_deregister_tid_ramrod_data *p_ramrod;
2457 struct qed_sp_init_data init_data;
2458 struct qed_spq_entry *p_ent;
2459 struct qed_ptt *p_ptt;
2460 u8 fw_return_code;
2461 int rc;
2462
2463 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
2464
2465 /* Get SPQ entry */
2466 memset(&init_data, 0, sizeof(init_data));
2467 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2468 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2469
2470 rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
2471 p_hwfn->p_rdma_info->proto, &init_data);
2472 if (rc) {
2473 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2474 return rc;
2475 }
2476
2477 p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
2478 p_ramrod->itid = cpu_to_le32(itid);
2479
2480 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2481 if (rc) {
2482 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
2483 return rc;
2484 }
2485
2486 if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
2487 DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
2488 return -EINVAL;
2489 } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
2490 /* Bit indicating that the TID is in use and a nig drain is
2491 * required before sending the ramrod again
2492 */
2493 p_ptt = qed_ptt_acquire(p_hwfn);
2494 if (!p_ptt) {
2495 rc = -EBUSY;
2496 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2497 "Failed to acquire PTT\n");
2498 return rc;
2499 }
2500
2501 rc = qed_mcp_drain(p_hwfn, p_ptt);
2502 if (rc) {
2503 qed_ptt_release(p_hwfn, p_ptt);
2504 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2505 "Drain failed\n");
2506 return rc;
2507 }
2508
2509 qed_ptt_release(p_hwfn, p_ptt);
2510
2511 /* Resend the ramrod */
2512 rc = qed_sp_init_request(p_hwfn, &p_ent,
2513 RDMA_RAMROD_DEREGISTER_MR,
2514 p_hwfn->p_rdma_info->proto,
2515 &init_data);
2516 if (rc) {
2517 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2518 "Failed to init sp-element\n");
2519 return rc;
2520 }
2521
2522 rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
2523 if (rc) {
2524 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2525 "Ramrod failed\n");
2526 return rc;
2527 }
2528
2529 if (fw_return_code != RDMA_RETURN_OK) {
2530 DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
2531 fw_return_code);
2532 return rc;
2533 }
2534 }
2535
2536 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
2537 return rc;
2538}
2539
2540static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
2541{
2542 return QED_LEADING_HWFN(cdev);
2543}
2544
2545static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2546{
2547 u32 val;
2548
2549 val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
2550
2551 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
2552 DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
2553 "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
2554 val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
2555}
2556
2557void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2558{
2559 p_hwfn->db_bar_no_edpm = true;
2560
2561 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2562}
2563
2564int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params)
2565{
2566 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2567 struct qed_ptt *p_ptt;
2568 int rc = -EBUSY;
2569
2570 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
2571 "desired_cnq = %08x\n", params->desired_cnq);
2572
2573 p_ptt = qed_ptt_acquire(p_hwfn);
2574 if (!p_ptt)
2575 goto err;
2576
2577 rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
2578 if (rc)
2579 goto err1;
2580
2581 rc = qed_rdma_setup(p_hwfn, p_ptt, params);
2582 if (rc)
2583 goto err2;
2584
2585 qed_ptt_release(p_hwfn, p_ptt);
2586
2587 return rc;
2588
2589err2:
2590 qed_rdma_free(p_hwfn);
2591err1:
2592 qed_ptt_release(p_hwfn, p_ptt);
2593err:
2594 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
2595 return rc;
2596}
2597
2598static int qed_rdma_init(struct qed_dev *cdev,
2599 struct qed_rdma_start_in_params *params)
2600{
2601 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2602}
2603
2604void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2605{
2606 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2607
2608 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
2609
2610 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
2611 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
2612 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
2613}
2614
2615void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2616 u8 connection_handle,
2617 void *cookie,
2618 dma_addr_t first_frag_addr,
2619 bool b_last_fragment, bool b_last_packet)
2620{
2621 struct qed_roce_ll2_packet *packet = cookie;
2622 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2623
2624 roce_ll2->cbs.tx_cb(roce_ll2->cb_cookie, packet);
2625}
2626
2627void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
2628 u8 connection_handle,
2629 void *cookie,
2630 dma_addr_t first_frag_addr,
2631 bool b_last_fragment, bool b_last_packet)
2632{
2633 qed_ll2b_complete_tx_gsi_packet(p_hwfn, connection_handle,
2634 cookie, first_frag_addr,
2635 b_last_fragment, b_last_packet);
2636}
2637
2638void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
2639 u8 connection_handle,
2640 void *cookie,
2641 dma_addr_t rx_buf_addr,
2642 u16 data_length,
2643 u8 data_length_error,
2644 u16 parse_flags,
2645 u16 vlan,
2646 u32 src_mac_addr_hi,
2647 u16 src_mac_addr_lo, bool b_last_packet)
2648{
2649 struct qed_roce_ll2_info *roce_ll2 = p_hwfn->ll2;
2650 struct qed_roce_ll2_rx_params params;
2651 struct qed_dev *cdev = p_hwfn->cdev;
2652 struct qed_roce_ll2_packet pkt;
2653
2654 DP_VERBOSE(cdev,
2655 QED_MSG_LL2,
2656 "roce ll2 rx complete: bus_addr=%p, len=%d, data_len_err=%d\n",
2657 (void *)(uintptr_t)rx_buf_addr,
2658 data_length, data_length_error);
2659
2660 memset(&pkt, 0, sizeof(pkt));
2661 pkt.n_seg = 1;
2662 pkt.payload[0].baddr = rx_buf_addr;
2663 pkt.payload[0].len = data_length;
2664
2665 memset(&params, 0, sizeof(params));
2666 params.vlan_id = vlan;
2667 *((u32 *)&params.smac[0]) = ntohl(src_mac_addr_hi);
2668 *((u16 *)&params.smac[4]) = ntohs(src_mac_addr_lo);
2669
2670 if (data_length_error) {
2671 DP_ERR(cdev,
2672 "roce ll2 rx complete: data length error %d, length=%d\n",
2673 data_length_error, data_length);
2674 params.rc = -EINVAL;
2675 }
2676
2677 roce_ll2->cbs.rx_cb(roce_ll2->cb_cookie, &pkt, &params);
2678}
2679
2680static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
2681 u8 *old_mac_address,
2682 u8 *new_mac_address)
2683{
2684 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2685 struct qed_ptt *p_ptt;
2686 int rc = 0;
2687
2688 if (!hwfn->ll2 || hwfn->ll2->handle == QED_LL2_UNUSED_HANDLE) {
2689 DP_ERR(cdev,
2690 "qed roce mac filter failed - roce_info/ll2 NULL\n");
2691 return -EINVAL;
2692 }
2693
2694 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
2695 if (!p_ptt) {
2696 DP_ERR(cdev,
2697 "qed roce ll2 mac filter set: failed to acquire PTT\n");
2698 return -EINVAL;
2699 }
2700
2701 mutex_lock(&hwfn->ll2->lock);
2702 if (old_mac_address)
2703 qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2704 old_mac_address);
2705 if (new_mac_address)
2706 rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt,
2707 new_mac_address);
2708 mutex_unlock(&hwfn->ll2->lock);
2709
2710 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
2711
2712 if (rc)
2713 DP_ERR(cdev,
2714 "qed roce ll2 mac filter set: failed to add mac filter\n");
2715
2716 return rc;
2717}
2718
2719static int qed_roce_ll2_start(struct qed_dev *cdev,
2720 struct qed_roce_ll2_params *params)
2721{
2722 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2723 struct qed_roce_ll2_info *roce_ll2;
2724 struct qed_ll2_info ll2_params;
2725 int rc;
2726
2727 if (!params) {
2728 DP_ERR(cdev, "qed roce ll2 start: failed due to NULL params\n");
2729 return -EINVAL;
2730 }
2731 if (!params->cbs.tx_cb || !params->cbs.rx_cb) {
2732 DP_ERR(cdev,
2733 "qed roce ll2 start: failed due to NULL tx/rx. tx_cb=%p, rx_cb=%p\n",
2734 params->cbs.tx_cb, params->cbs.rx_cb);
2735 return -EINVAL;
2736 }
2737 if (!is_valid_ether_addr(params->mac_address)) {
2738 DP_ERR(cdev,
2739 "qed roce ll2 start: failed due to invalid Ethernet address %pM\n",
2740 params->mac_address);
2741 return -EINVAL;
2742 }
2743
2744 /* Initialize */
2745 roce_ll2 = kzalloc(sizeof(*roce_ll2), GFP_ATOMIC);
2746 if (!roce_ll2) {
2747 DP_ERR(cdev, "qed roce ll2 start: failed memory allocation\n");
2748 return -ENOMEM;
2749 }
2750 memset(roce_ll2, 0, sizeof(*roce_ll2));
2751 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2752 roce_ll2->cbs = params->cbs;
2753 roce_ll2->cb_cookie = params->cb_cookie;
2754 mutex_init(&roce_ll2->lock);
2755
2756 memset(&ll2_params, 0, sizeof(ll2_params));
2757 ll2_params.conn_type = QED_LL2_TYPE_ROCE;
2758 ll2_params.mtu = params->mtu;
2759 ll2_params.rx_drop_ttl0_flg = true;
2760 ll2_params.rx_vlan_removal_en = false;
2761 ll2_params.tx_dest = CORE_TX_DEST_NW;
2762 ll2_params.ai_err_packet_too_big = LL2_DROP_PACKET;
2763 ll2_params.ai_err_no_buf = LL2_DROP_PACKET;
2764 ll2_params.gsi_enable = true;
2765
2766 rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_params,
2767 params->max_rx_buffers,
2768 params->max_tx_buffers,
2769 &roce_ll2->handle);
2770 if (rc) {
2771 DP_ERR(cdev,
2772 "qed roce ll2 start: failed to acquire LL2 connection (rc=%d)\n",
2773 rc);
2774 goto err;
2775 }
2776
2777 rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev),
2778 roce_ll2->handle);
2779 if (rc) {
2780 DP_ERR(cdev,
2781 "qed roce ll2 start: failed to establish LL2 connection (rc=%d)\n",
2782 rc);
2783 goto err1;
2784 }
2785
2786 hwfn->ll2 = roce_ll2;
2787
2788 rc = qed_roce_ll2_set_mac_filter(cdev, NULL, params->mac_address);
2789 if (rc) {
2790 hwfn->ll2 = NULL;
2791 goto err2;
2792 }
2793 ether_addr_copy(roce_ll2->mac_address, params->mac_address);
2794
2795 return 0;
2796
2797err2:
2798 qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2799err1:
2800 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2801err:
2802 kfree(roce_ll2);
2803 return rc;
2804}
2805
2806static int qed_roce_ll2_stop(struct qed_dev *cdev)
2807{
2808 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2809 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2810 int rc;
2811
2812 if (!cdev) {
2813 DP_ERR(cdev, "qed roce ll2 stop: invalid cdev\n");
2814 return -EINVAL;
2815 }
2816
2817 if (roce_ll2->handle == QED_LL2_UNUSED_HANDLE) {
2818 DP_ERR(cdev, "qed roce ll2 stop: cannot stop an unused LL2\n");
2819 return -EINVAL;
2820 }
2821
2822 /* remove LL2 MAC address filter */
2823 rc = qed_roce_ll2_set_mac_filter(cdev, roce_ll2->mac_address, NULL);
2824 eth_zero_addr(roce_ll2->mac_address);
2825
2826 rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
2827 roce_ll2->handle);
2828 if (rc)
2829 DP_ERR(cdev,
2830 "qed roce ll2 stop: failed to terminate LL2 connection (rc=%d)\n",
2831 rc);
2832
2833 qed_ll2_release_connection(QED_LEADING_HWFN(cdev), roce_ll2->handle);
2834
2835 roce_ll2->handle = QED_LL2_UNUSED_HANDLE;
2836
2837 kfree(roce_ll2);
2838
2839 return rc;
2840}
2841
2842static int qed_roce_ll2_tx(struct qed_dev *cdev,
2843 struct qed_roce_ll2_packet *pkt,
2844 struct qed_roce_ll2_tx_params *params)
2845{
2846 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2847 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2848 enum qed_ll2_roce_flavor_type qed_roce_flavor;
2849 u8 flags = 0;
2850 int rc;
2851 int i;
2852
2853 if (!cdev || !pkt || !params) {
2854 DP_ERR(cdev,
2855 "roce ll2 tx: failed tx because one of the following is NULL - drv=%p, pkt=%p, params=%p\n",
2856 cdev, pkt, params);
2857 return -EINVAL;
2858 }
2859
2860 qed_roce_flavor = (pkt->roce_mode == ROCE_V1) ? QED_LL2_ROCE
2861 : QED_LL2_RROCE;
2862
2863 if (pkt->roce_mode == ROCE_V2_IPV4)
2864 flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
2865
2866 /* Tx header */
2867 rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
2868 1 + pkt->n_seg, 0, flags, 0,
2869 qed_roce_flavor, pkt->header.baddr,
2870 pkt->header.len, pkt, 1);
2871 if (rc) {
2872 DP_ERR(cdev, "roce ll2 tx: header failed (rc=%d)\n", rc);
2873 return QED_ROCE_TX_HEAD_FAILURE;
2874 }
2875
2876 /* Tx payload */
2877 for (i = 0; i < pkt->n_seg; i++) {
2878 rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev),
2879 roce_ll2->handle,
2880 pkt->payload[i].baddr,
2881 pkt->payload[i].len);
2882 if (rc) {
2883 /* If failed not much to do here, partial packet has
2884 * been posted * we can't free memory, will need to wait
2885 * for completion
2886 */
2887 DP_ERR(cdev,
2888 "roce ll2 tx: payload failed (rc=%d)\n", rc);
2889 return QED_ROCE_TX_FRAG_FAILURE;
2890 }
2891 }
2892
2893 return 0;
2894}
2895
2896static int qed_roce_ll2_post_rx_buffer(struct qed_dev *cdev,
2897 struct qed_roce_ll2_buffer *buf,
2898 u64 cookie, u8 notify_fw)
2899{
2900 return qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
2901 QED_LEADING_HWFN(cdev)->ll2->handle,
2902 buf->baddr, buf->len,
2903 (void *)(uintptr_t)cookie, notify_fw);
2904}
2905
2906static int qed_roce_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2907{
2908 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2909 struct qed_roce_ll2_info *roce_ll2 = hwfn->ll2;
2910
2911 return qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2912 roce_ll2->handle, stats);
2913}
2914
2915static const struct qed_rdma_ops qed_rdma_ops_pass = {
2916 .common = &qed_common_ops_pass,
2917 .fill_dev_info = &qed_fill_rdma_dev_info,
2918 .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
2919 .rdma_init = &qed_rdma_init,
2920 .rdma_add_user = &qed_rdma_add_user,
2921 .rdma_remove_user = &qed_rdma_remove_user,
2922 .rdma_stop = &qed_rdma_stop,
2923 .rdma_query_port = &qed_rdma_query_port,
2924 .rdma_query_device = &qed_rdma_query_device,
2925 .rdma_get_start_sb = &qed_rdma_get_sb_start,
2926 .rdma_get_rdma_int = &qed_rdma_get_int,
2927 .rdma_set_rdma_int = &qed_rdma_set_int,
2928 .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
2929 .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
2930 .rdma_alloc_pd = &qed_rdma_alloc_pd,
2931 .rdma_dealloc_pd = &qed_rdma_free_pd,
2932 .rdma_create_cq = &qed_rdma_create_cq,
2933 .rdma_destroy_cq = &qed_rdma_destroy_cq,
2934 .rdma_create_qp = &qed_rdma_create_qp,
2935 .rdma_modify_qp = &qed_rdma_modify_qp,
2936 .rdma_query_qp = &qed_rdma_query_qp,
2937 .rdma_destroy_qp = &qed_rdma_destroy_qp,
2938 .rdma_alloc_tid = &qed_rdma_alloc_tid,
2939 .rdma_free_tid = &qed_rdma_free_tid,
2940 .rdma_register_tid = &qed_rdma_register_tid,
2941 .rdma_deregister_tid = &qed_rdma_deregister_tid,
2942 .roce_ll2_start = &qed_roce_ll2_start,
2943 .roce_ll2_stop = &qed_roce_ll2_stop,
2944 .roce_ll2_tx = &qed_roce_ll2_tx,
2945 .roce_ll2_post_rx_buffer = &qed_roce_ll2_post_rx_buffer,
2946 .roce_ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
2947 .roce_ll2_stats = &qed_roce_ll2_stats,
2948};
2949
2950const struct qed_rdma_ops *qed_get_rdma_ops()
2951{
2952 return &qed_rdma_ops_pass;
2953}
2954EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000000..2f091e8a0f40
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,216 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _QED_ROCE_H
33#define _QED_ROCE_H
34#include <linux/types.h>
35#include <linux/bitops.h>
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/slab.h>
39#include <linux/spinlock.h>
40#include <linux/qed/qed_if.h>
41#include <linux/qed/qed_roce_if.h>
42#include "qed.h"
43#include "qed_dev_api.h"
44#include "qed_hsi.h"
45#include "qed_ll2.h"
46
47#define QED_RDMA_MAX_FMR (RDMA_MAX_TIDS)
48#define QED_RDMA_MAX_P_KEY (1)
49#define QED_RDMA_MAX_WQE (0x7FFF)
50#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
51#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
52#define QED_RDMA_ACK_DELAY (15)
53#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
54#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
55#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
56/* Add 1 for header element */
57#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
58#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
59#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
60#define QED_RDMA_MAX_SRQS (32 * 1024)
61
62#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
63#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
64
65enum qed_rdma_toggle_bit {
66 QED_RDMA_TOGGLE_BIT_CLEAR = 0,
67 QED_RDMA_TOGGLE_BIT_SET = 1
68};
69
70struct qed_bmap {
71 unsigned long *bitmap;
72 u32 max_count;
73};
74
75struct qed_rdma_info {
76 /* spin lock to protect bitmaps */
77 spinlock_t lock;
78
79 struct qed_bmap cq_map;
80 struct qed_bmap pd_map;
81 struct qed_bmap tid_map;
82 struct qed_bmap qp_map;
83 struct qed_bmap srq_map;
84 struct qed_bmap cid_map;
85 struct qed_bmap dpi_map;
86 struct qed_bmap toggle_bits;
87 struct qed_rdma_events events;
88 struct qed_rdma_device *dev;
89 struct qed_rdma_port *port;
90 u32 last_tid;
91 u8 num_cnqs;
92 u32 num_qps;
93 u32 num_mrs;
94 u16 queue_zone_base;
95 enum protocol_type proto;
96};
97
98struct qed_rdma_resize_cq_in_params {
99 u16 icid;
100 u32 cq_size;
101 bool pbl_two_level;
102 u64 pbl_ptr;
103 u16 pbl_num_pages;
104 u8 pbl_page_size_log;
105};
106
107struct qed_rdma_resize_cq_out_params {
108 u32 prod;
109 u32 cons;
110};
111
112struct qed_rdma_resize_cnq_in_params {
113 u32 cnq_id;
114 u32 pbl_page_size_log;
115 u64 pbl_ptr;
116};
117
118struct qed_rdma_qp {
119 struct regpair qp_handle;
120 struct regpair qp_handle_async;
121 u32 qpid;
122 u16 icid;
123 enum qed_roce_qp_state cur_state;
124 bool use_srq;
125 bool signal_all;
126 bool fmr_and_reserved_lkey;
127
128 bool incoming_rdma_read_en;
129 bool incoming_rdma_write_en;
130 bool incoming_atomic_en;
131 bool e2e_flow_control_en;
132
133 u16 pd;
134 u16 pkey;
135 u32 dest_qp;
136 u16 mtu;
137 u16 srq_id;
138 u8 traffic_class_tos;
139 u8 hop_limit_ttl;
140 u16 dpi;
141 u32 flow_label;
142 bool lb_indication;
143 u16 vlan_id;
144 u32 ack_timeout;
145 u8 retry_cnt;
146 u8 rnr_retry_cnt;
147 u8 min_rnr_nak_timer;
148 bool sqd_async;
149 union qed_gid sgid;
150 union qed_gid dgid;
151 enum roce_mode roce_mode;
152 u16 udp_src_port;
153 u8 stats_queue;
154
155 /* requeseter */
156 u8 max_rd_atomic_req;
157 u32 sq_psn;
158 u16 sq_cq_id;
159 u16 sq_num_pages;
160 dma_addr_t sq_pbl_ptr;
161 void *orq;
162 dma_addr_t orq_phys_addr;
163 u8 orq_num_pages;
164 bool req_offloaded;
165
166 /* responder */
167 u8 max_rd_atomic_resp;
168 u32 rq_psn;
169 u16 rq_cq_id;
170 u16 rq_num_pages;
171 dma_addr_t rq_pbl_ptr;
172 void *irq;
173 dma_addr_t irq_phys_addr;
174 u8 irq_num_pages;
175 bool resp_offloaded;
176
177 u8 remote_mac_addr[6];
178 u8 local_mac_addr[6];
179
180 void *shared_queue;
181 dma_addr_t shared_queue_phys_addr;
182};
183
184int
185qed_rdma_add_user(void *rdma_cxt,
186 struct qed_rdma_add_user_out_params *out_params);
187int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
188int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
189int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
190void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
191struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
192struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
193int
194qed_rdma_register_tid(void *rdma_cxt,
195 struct qed_rdma_register_tid_in_params *params);
196void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
197int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
198int qed_rdma_stop(void *rdma_cxt);
199u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
200u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
201void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
202void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
203void qed_async_roce_event(struct qed_hwfn *p_hwfn,
204 struct event_ring_entry *p_eqe);
205int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp);
206int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
207 struct qed_rdma_modify_qp_in_params *params);
208int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp,
209 struct qed_rdma_query_qp_out_params *out_params);
210
211#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
212void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
213#else
214void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
215#endif
216#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index a548504c3420..652c90819758 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -61,6 +61,10 @@ union ramrod_data {
61 struct vport_start_ramrod_data vport_start; 61 struct vport_start_ramrod_data vport_start;
62 struct vport_stop_ramrod_data vport_stop; 62 struct vport_stop_ramrod_data vport_stop;
63 struct vport_update_ramrod_data vport_update; 63 struct vport_update_ramrod_data vport_update;
64 struct core_rx_start_ramrod_data core_rx_queue_start;
65 struct core_rx_stop_ramrod_data core_rx_queue_stop;
66 struct core_tx_start_ramrod_data core_tx_queue_start;
67 struct core_tx_stop_ramrod_data core_tx_queue_stop;
64 struct vport_filter_update_ramrod_data vport_filter_update; 68 struct vport_filter_update_ramrod_data vport_filter_update;
65 69
66 struct rdma_init_func_ramrod_data rdma_init_func; 70 struct rdma_init_func_ramrod_data rdma_init_func;
@@ -81,6 +85,7 @@ union ramrod_data {
81 struct rdma_srq_create_ramrod_data rdma_create_srq; 85 struct rdma_srq_create_ramrod_data rdma_create_srq;
82 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; 86 struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
83 struct rdma_srq_modify_ramrod_data rdma_modify_srq; 87 struct rdma_srq_modify_ramrod_data rdma_modify_srq;
88 struct roce_init_func_ramrod_data roce_init_func;
84 89
85 struct iscsi_slow_path_hdr iscsi_empty; 90 struct iscsi_slow_path_hdr iscsi_empty;
86 struct iscsi_init_ramrod_params iscsi_init; 91 struct iscsi_init_ramrod_params iscsi_init;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 349af182d085..caff41544898 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,6 +28,9 @@
28#include "qed_reg_addr.h" 28#include "qed_reg_addr.h"
29#include "qed_sp.h" 29#include "qed_sp.h"
30#include "qed_sriov.h" 30#include "qed_sriov.h"
31#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
32#include "qed_roce.h"
33#endif
31 34
32/*************************************************************************** 35/***************************************************************************
33* Structures & Definitions 36* Structures & Definitions
@@ -237,6 +240,11 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
237 struct event_ring_entry *p_eqe) 240 struct event_ring_entry *p_eqe)
238{ 241{
239 switch (p_eqe->protocol_id) { 242 switch (p_eqe->protocol_id) {
243#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 case PROTOCOLID_ROCE:
245 qed_async_roce_event(p_hwfn, p_eqe);
246 return 0;
247#endif
240 case PROTOCOLID_COMMON: 248 case PROTOCOLID_COMMON:
241 return qed_sriov_eqe_event(p_hwfn, 249 return qed_sriov_eqe_event(p_hwfn,
242 p_eqe->opcode, 250 p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index a4a3cead15bb..d2d6621fe0e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1851,8 +1851,8 @@ static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
1851 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { 1851 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1852 u16 qid = mbx->req_virt->start_txq.tx_qid; 1852 u16 qid = mbx->req_virt->start_txq.tx_qid;
1853 1853
1854 p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, 1854 p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
1855 DQ_DEMS_LEGACY); 1855 DQ_DEMS_LEGACY);
1856 } 1856 }
1857 1857
1858 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); 1858 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 85334ceaf69c..abf5bf11f865 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -544,7 +544,7 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
544 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; 544 u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
545 u32 db_addr; 545 u32 db_addr;
546 546
547 db_addr = qed_db_addr(cid, DQ_DEMS_LEGACY); 547 db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
548 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + 548 *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
549 db_addr; 549 db_addr;
550 } 550 }
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 74a49850d74d..28dc58919c85 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
2 2
3qede-y := qede_main.o qede_ethtool.o 3qede-y := qede_main.o qede_ethtool.o
4qede-$(CONFIG_DCB) += qede_dcbnl.o 4qede-$(CONFIG_DCB) += qede_dcbnl.o
5qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index e01adce4a966..28c0e9f42c9e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -106,6 +106,13 @@ struct qede_vlan {
106 bool configured; 106 bool configured;
107}; 107};
108 108
109struct qede_rdma_dev {
110 struct qedr_dev *qedr_dev;
111 struct list_head entry;
112 struct list_head roce_event_list;
113 struct workqueue_struct *roce_wq;
114};
115
109struct qede_dev { 116struct qede_dev {
110 struct qed_dev *cdev; 117 struct qed_dev *cdev;
111 struct net_device *ndev; 118 struct net_device *ndev;
@@ -185,6 +192,8 @@ struct qede_dev {
185 unsigned long sp_flags; 192 unsigned long sp_flags;
186 u16 vxlan_dst_port; 193 u16 vxlan_dst_port;
187 u16 geneve_dst_port; 194 u16 geneve_dst_port;
195
196 struct qede_rdma_dev rdma_info;
188}; 197};
189 198
190enum QEDE_STATE { 199enum QEDE_STATE {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 0e198fe89d1a..343038ca047d 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -36,7 +36,7 @@
36#include <linux/random.h> 36#include <linux/random.h>
37#include <net/ip6_checksum.h> 37#include <net/ip6_checksum.h>
38#include <linux/bitops.h> 38#include <linux/bitops.h>
39 39#include <linux/qed/qede_roce.h>
40#include "qede.h" 40#include "qede.h"
41 41
42static char version[] = 42static char version[] =
@@ -193,8 +193,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
193 struct ethtool_drvinfo drvinfo; 193 struct ethtool_drvinfo drvinfo;
194 struct qede_dev *edev; 194 struct qede_dev *edev;
195 195
196 /* Currently only support name change */ 196 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
197 if (event != NETDEV_CHANGENAME)
198 goto done; 197 goto done;
199 198
200 /* Check whether this is a qede device */ 199 /* Check whether this is a qede device */
@@ -207,11 +206,18 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
207 goto done; 206 goto done;
208 edev = netdev_priv(ndev); 207 edev = netdev_priv(ndev);
209 208
210 /* Notify qed of the name change */ 209 switch (event) {
211 if (!edev->ops || !edev->ops->common) 210 case NETDEV_CHANGENAME:
212 goto done; 211 /* Notify qed of the name change */
213 edev->ops->common->set_id(edev->cdev, edev->ndev->name, 212 if (!edev->ops || !edev->ops->common)
214 "qede"); 213 goto done;
214 edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
215 break;
216 case NETDEV_CHANGEADDR:
217 edev = netdev_priv(ndev);
218 qede_roce_event_changeaddr(edev);
219 break;
220 }
215 221
216done: 222done:
217 return NOTIFY_DONE; 223 return NOTIFY_DONE;
@@ -2545,10 +2551,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2545 2551
2546 qede_init_ndev(edev); 2552 qede_init_ndev(edev);
2547 2553
2554 rc = qede_roce_dev_add(edev);
2555 if (rc)
2556 goto err3;
2557
2548 rc = register_netdev(edev->ndev); 2558 rc = register_netdev(edev->ndev);
2549 if (rc) { 2559 if (rc) {
2550 DP_NOTICE(edev, "Cannot register net-device\n"); 2560 DP_NOTICE(edev, "Cannot register net-device\n");
2551 goto err3; 2561 goto err4;
2552 } 2562 }
2553 2563
2554 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); 2564 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
@@ -2568,6 +2578,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
2568 2578
2569 return 0; 2579 return 0;
2570 2580
2581err4:
2582 qede_roce_dev_remove(edev);
2571err3: 2583err3:
2572 free_netdev(edev->ndev); 2584 free_netdev(edev->ndev);
2573err2: 2585err2:
@@ -2614,8 +2626,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
2614 DP_INFO(edev, "Starting qede_remove\n"); 2626 DP_INFO(edev, "Starting qede_remove\n");
2615 2627
2616 cancel_delayed_work_sync(&edev->sp_task); 2628 cancel_delayed_work_sync(&edev->sp_task);
2629
2617 unregister_netdev(ndev); 2630 unregister_netdev(ndev);
2618 2631
2632 qede_roce_dev_remove(edev);
2633
2619 edev->ops->common->set_power_state(cdev, PCI_D0); 2634 edev->ops->common->set_power_state(cdev, PCI_D0);
2620 2635
2621 pci_set_drvdata(pdev, NULL); 2636 pci_set_drvdata(pdev, NULL);
@@ -3512,6 +3527,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode)
3512 3527
3513 DP_INFO(edev, "Starting qede unload\n"); 3528 DP_INFO(edev, "Starting qede unload\n");
3514 3529
3530 qede_roce_dev_event_close(edev);
3515 mutex_lock(&edev->qede_lock); 3531 mutex_lock(&edev->qede_lock);
3516 edev->state = QEDE_STATE_CLOSED; 3532 edev->state = QEDE_STATE_CLOSED;
3517 3533
@@ -3612,6 +3628,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
3612 /* Query whether link is already-up */ 3628 /* Query whether link is already-up */
3613 memset(&link_output, 0, sizeof(link_output)); 3629 memset(&link_output, 0, sizeof(link_output));
3614 edev->ops->common->get_link(edev->cdev, &link_output); 3630 edev->ops->common->get_link(edev->cdev, &link_output);
3631 qede_roce_dev_event_open(edev);
3615 qede_link_update(edev, &link_output); 3632 qede_link_update(edev, &link_output);
3616 3633
3617 DP_INFO(edev, "Ending successfully qede load\n"); 3634 DP_INFO(edev, "Ending successfully qede load\n");
diff --git a/drivers/net/ethernet/qlogic/qede/qede_roce.c b/drivers/net/ethernet/qlogic/qede/qede_roce.c
new file mode 100644
index 000000000000..9867f960b063
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_roce.c
@@ -0,0 +1,314 @@
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/pci.h>
33#include <linux/netdevice.h>
34#include <linux/list.h>
35#include <linux/mutex.h>
36#include <linux/qed/qede_roce.h>
37#include "qede.h"
38
39static struct qedr_driver *qedr_drv;
40static LIST_HEAD(qedr_dev_list);
41static DEFINE_MUTEX(qedr_dev_list_lock);
42
43bool qede_roce_supported(struct qede_dev *dev)
44{
45 return dev->dev_info.common.rdma_supported;
46}
47
48static void _qede_roce_dev_add(struct qede_dev *edev)
49{
50 if (!qedr_drv)
51 return;
52
53 edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
54 edev->ndev);
55}
56
57static int qede_roce_create_wq(struct qede_dev *edev)
58{
59 INIT_LIST_HEAD(&edev->rdma_info.roce_event_list);
60 edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq");
61 if (!edev->rdma_info.roce_wq) {
62 DP_NOTICE(edev, "qedr: Could not create workqueue\n");
63 return -ENOMEM;
64 }
65
66 return 0;
67}
68
69static void qede_roce_cleanup_event(struct qede_dev *edev)
70{
71 struct list_head *head = &edev->rdma_info.roce_event_list;
72 struct qede_roce_event_work *event_node;
73
74 flush_workqueue(edev->rdma_info.roce_wq);
75 while (!list_empty(head)) {
76 event_node = list_entry(head->next, struct qede_roce_event_work,
77 list);
78 cancel_work_sync(&event_node->work);
79 list_del(&event_node->list);
80 kfree(event_node);
81 }
82}
83
84static void qede_roce_destroy_wq(struct qede_dev *edev)
85{
86 qede_roce_cleanup_event(edev);
87 destroy_workqueue(edev->rdma_info.roce_wq);
88}
89
90int qede_roce_dev_add(struct qede_dev *edev)
91{
92 int rc = 0;
93
94 if (qede_roce_supported(edev)) {
95 rc = qede_roce_create_wq(edev);
96 if (rc)
97 return rc;
98
99 INIT_LIST_HEAD(&edev->rdma_info.entry);
100 mutex_lock(&qedr_dev_list_lock);
101 list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
102 _qede_roce_dev_add(edev);
103 mutex_unlock(&qedr_dev_list_lock);
104 }
105
106 return rc;
107}
108
109static void _qede_roce_dev_remove(struct qede_dev *edev)
110{
111 if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
112 qedr_drv->remove(edev->rdma_info.qedr_dev);
113 edev->rdma_info.qedr_dev = NULL;
114}
115
116void qede_roce_dev_remove(struct qede_dev *edev)
117{
118 if (!qede_roce_supported(edev))
119 return;
120
121 qede_roce_destroy_wq(edev);
122 mutex_lock(&qedr_dev_list_lock);
123 _qede_roce_dev_remove(edev);
124 list_del(&edev->rdma_info.entry);
125 mutex_unlock(&qedr_dev_list_lock);
126}
127
128static void _qede_roce_dev_open(struct qede_dev *edev)
129{
130 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
131 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
132}
133
134static void qede_roce_dev_open(struct qede_dev *edev)
135{
136 if (!qede_roce_supported(edev))
137 return;
138
139 mutex_lock(&qedr_dev_list_lock);
140 _qede_roce_dev_open(edev);
141 mutex_unlock(&qedr_dev_list_lock);
142}
143
144static void _qede_roce_dev_close(struct qede_dev *edev)
145{
146 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
147 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
148}
149
150static void qede_roce_dev_close(struct qede_dev *edev)
151{
152 if (!qede_roce_supported(edev))
153 return;
154
155 mutex_lock(&qedr_dev_list_lock);
156 _qede_roce_dev_close(edev);
157 mutex_unlock(&qedr_dev_list_lock);
158}
159
160static void qede_roce_dev_shutdown(struct qede_dev *edev)
161{
162 if (!qede_roce_supported(edev))
163 return;
164
165 mutex_lock(&qedr_dev_list_lock);
166 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
167 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
168 mutex_unlock(&qedr_dev_list_lock);
169}
170
171int qede_roce_register_driver(struct qedr_driver *drv)
172{
173 struct qede_dev *edev;
174 u8 qedr_counter = 0;
175
176 mutex_lock(&qedr_dev_list_lock);
177 if (qedr_drv) {
178 mutex_unlock(&qedr_dev_list_lock);
179 return -EINVAL;
180 }
181 qedr_drv = drv;
182
183 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
184 struct net_device *ndev;
185
186 qedr_counter++;
187 _qede_roce_dev_add(edev);
188 ndev = edev->ndev;
189 if (netif_running(ndev) && netif_oper_up(ndev))
190 _qede_roce_dev_open(edev);
191 }
192 mutex_unlock(&qedr_dev_list_lock);
193
194 DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n",
195 qedr_counter);
196
197 return 0;
198}
199EXPORT_SYMBOL(qede_roce_register_driver);
200
201void qede_roce_unregister_driver(struct qedr_driver *drv)
202{
203 struct qede_dev *edev;
204
205 mutex_lock(&qedr_dev_list_lock);
206 list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
207 if (edev->rdma_info.qedr_dev)
208 _qede_roce_dev_remove(edev);
209 }
210 qedr_drv = NULL;
211 mutex_unlock(&qedr_dev_list_lock);
212}
213EXPORT_SYMBOL(qede_roce_unregister_driver);
214
215static void qede_roce_changeaddr(struct qede_dev *edev)
216{
217 if (!qede_roce_supported(edev))
218 return;
219
220 if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
221 qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
222}
223
224struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev
225 *edev)
226{
227 struct qede_roce_event_work *event_node = NULL;
228 struct list_head *list_node = NULL;
229 bool found = false;
230
231 list_for_each(list_node, &edev->rdma_info.roce_event_list) {
232 event_node = list_entry(list_node, struct qede_roce_event_work,
233 list);
234 if (!work_pending(&event_node->work)) {
235 found = true;
236 break;
237 }
238 }
239
240 if (!found) {
241 event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
242 if (!event_node) {
243 DP_NOTICE(edev,
244 "qedr: Could not allocate memory for roce work\n");
245 return NULL;
246 }
247 list_add_tail(&event_node->list,
248 &edev->rdma_info.roce_event_list);
249 }
250
251 return event_node;
252}
253
254static void qede_roce_handle_event(struct work_struct *work)
255{
256 struct qede_roce_event_work *event_node;
257 enum qede_roce_event event;
258 struct qede_dev *edev;
259
260 event_node = container_of(work, struct qede_roce_event_work, work);
261 event = event_node->event;
262 edev = event_node->ptr;
263
264 switch (event) {
265 case QEDE_UP:
266 qede_roce_dev_open(edev);
267 break;
268 case QEDE_DOWN:
269 qede_roce_dev_close(edev);
270 break;
271 case QEDE_CLOSE:
272 qede_roce_dev_shutdown(edev);
273 break;
274 case QEDE_CHANGE_ADDR:
275 qede_roce_changeaddr(edev);
276 break;
277 default:
278 DP_NOTICE(edev, "Invalid roce event %d", event);
279 }
280}
281
282static void qede_roce_add_event(struct qede_dev *edev,
283 enum qede_roce_event event)
284{
285 struct qede_roce_event_work *event_node;
286
287 if (!edev->rdma_info.qedr_dev)
288 return;
289
290 event_node = qede_roce_get_free_event_node(edev);
291 if (!event_node)
292 return;
293
294 event_node->event = event;
295 event_node->ptr = edev;
296
297 INIT_WORK(&event_node->work, qede_roce_handle_event);
298 queue_work(edev->rdma_info.roce_wq, &event_node->work);
299}
300
301void qede_roce_dev_event_open(struct qede_dev *edev)
302{
303 qede_roce_add_event(edev, QEDE_UP);
304}
305
306void qede_roce_dev_event_close(struct qede_dev *edev)
307{
308 qede_roce_add_event(edev, QEDE_DOWN);
309}
310
311void qede_roce_event_changeaddr(struct qede_dev *edev)
312{
313 qede_roce_add_event(edev, QEDE_CHANGE_ADDR);
314}
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 19027635df0d..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -674,6 +674,7 @@ union event_ring_data {
674 struct iscsi_eqe_data iscsi_info; 674 struct iscsi_eqe_data iscsi_info;
675 struct malicious_vf_eqe_data malicious_vf; 675 struct malicious_vf_eqe_data malicious_vf;
676 struct initial_cleanup_eqe_data vf_init_cleanup; 676 struct initial_cleanup_eqe_data vf_init_cleanup;
677 struct regpair roce_handle;
677}; 678};
678 679
679/* Event Ring Entry */ 680/* Event Ring Entry */
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index e4546abcea08..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
34 DCBX_MAX_PROTOCOL_TYPE 34 DCBX_MAX_PROTOCOL_TYPE
35}; 35};
36 36
37#define QED_ROCE_PROTOCOL_INDEX (3)
38
37#ifdef CONFIG_DCB 39#ifdef CONFIG_DCB
38#define QED_LLDP_CHASSIS_ID_STAT_LEN 4 40#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
39#define QED_LLDP_PORT_ID_STAT_LEN 4 41#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
260 /* MFW version */ 262 /* MFW version */
261 u32 mfw_rev; 263 u32 mfw_rev;
262 264
263 bool rdma_supported;
264
265 u32 flash_size; 265 u32 flash_size;
266 u8 mf_mode; 266 u8 mf_mode;
267 bool tx_switching; 267 bool tx_switching;
268 bool rdma_supported;
268}; 269};
269 270
270enum qed_sb_type { 271enum qed_sb_type {
271 QED_SB_TYPE_L2_QUEUE, 272 QED_SB_TYPE_L2_QUEUE,
273 QED_SB_TYPE_CNQ,
272}; 274};
273 275
274enum qed_protocol { 276enum qed_protocol {
@@ -627,8 +629,9 @@ enum DP_MODULE {
627 QED_MSG_SP = 0x100000, 629 QED_MSG_SP = 0x100000,
628 QED_MSG_STORAGE = 0x200000, 630 QED_MSG_STORAGE = 0x200000,
629 QED_MSG_CXT = 0x800000, 631 QED_MSG_CXT = 0x800000,
632 QED_MSG_LL2 = 0x1000000,
630 QED_MSG_ILT = 0x2000000, 633 QED_MSG_ILT = 0x2000000,
631 QED_MSG_ROCE = 0x4000000, 634 QED_MSG_RDMA = 0x4000000,
632 QED_MSG_DEBUG = 0x8000000, 635 QED_MSG_DEBUG = 0x8000000,
633 /* to be added...up to 0x8000000 */ 636 /* to be added...up to 0x8000000 */
634}; 637};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
1/* QLogic qed NIC Driver
2 *
3 * Copyright (c) 2015 QLogic Corporation
4 *
5 * This software is available under the terms of the GNU General Public License
6 * (GPL) Version 2, available from the file COPYING in the main directory of
7 * this source tree.
8 */
9
10#ifndef _QED_LL2_IF_H
11#define _QED_LL2_IF_H
12
13#include <linux/types.h>
14#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/pci.h>
17#include <linux/skbuff.h>
18#include <linux/version.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/qed/qed_if.h>
22
23struct qed_ll2_stats {
24 u64 gsi_invalid_hdr;
25 u64 gsi_invalid_pkt_length;
26 u64 gsi_unsupported_pkt_typ;
27 u64 gsi_crcchksm_error;
28
29 u64 packet_too_big_discard;
30 u64 no_buff_discard;
31
32 u64 rcv_ucast_bytes;
33 u64 rcv_mcast_bytes;
34 u64 rcv_bcast_bytes;
35 u64 rcv_ucast_pkts;
36 u64 rcv_mcast_pkts;
37 u64 rcv_bcast_pkts;
38
39 u64 sent_ucast_bytes;
40 u64 sent_mcast_bytes;
41 u64 sent_bcast_bytes;
42 u64 sent_ucast_pkts;
43 u64 sent_mcast_pkts;
44 u64 sent_bcast_pkts;
45};
46
47#define QED_LL2_UNUSED_HANDLE (0xff)
48
49struct qed_ll2_cb_ops {
50 int (*rx_cb)(void *, struct sk_buff *, u32, u32);
51 int (*tx_cb)(void *, struct sk_buff *, bool);
52};
53
54struct qed_ll2_params {
55 u16 mtu;
56 bool drop_ttl0_packets;
57 bool rx_vlan_stripping;
58 u8 tx_tc;
59 bool frags_mapped;
60 u8 ll2_mac_address[ETH_ALEN];
61};
62
63struct qed_ll2_ops {
64/**
65 * @brief start - initializes ll2
66 *
67 * @param cdev
68 * @param params - protocol driver configuration for the ll2.
69 *
70 * @return 0 on success, otherwise error value.
71 */
72 int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
73
74/**
75 * @brief stop - stops the ll2
76 *
77 * @param cdev
78 *
79 * @return 0 on success, otherwise error value.
80 */
81 int (*stop)(struct qed_dev *cdev);
82
83/**
84 * @brief start_xmit - transmits an skb over the ll2 interface
85 *
86 * @param cdev
87 * @param skb
88 *
89 * @return 0 on success, otherwise error value.
90 */
91 int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
92
93/**
94 * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
95 * packets. Should be called before `start'.
96 *
97 * @param cdev
98 * @param cookie - to be passed to the callback functions.
99 * @param ops - the callback functions to register for Rx / Tx.
100 *
101 * @return 0 on success, otherwise error value.
102 */
103 void (*register_cb_ops)(struct qed_dev *cdev,
104 const struct qed_ll2_cb_ops *ops,
105 void *cookie);
106
107/**
108 * @brief get LL2 related statistics
109 *
110 * @param cdev
111 * @param stats - pointer to struct that would be filled with stats
112 *
113 * @return 0 on success, error otherwise.
114 */
115 int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
116};
117
118#ifdef CONFIG_QED_LL2
119int qed_ll2_alloc_if(struct qed_dev *);
120void qed_ll2_dealloc_if(struct qed_dev *);
121#else
122static const struct qed_ll2_ops qed_ll2_ops_pass = {
123 .start = NULL,
124 .stop = NULL,
125 .start_xmit = NULL,
126 .register_cb_ops = NULL,
127 .get_stats = NULL,
128};
129
130static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
131{
132 return 0;
133}
134
135static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
136{
137}
138#endif
139#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
1/* QLogic qed NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef _QED_ROCE_IF_H
33#define _QED_ROCE_IF_H
34#include <linux/types.h>
35#include <linux/delay.h>
36#include <linux/list.h>
37#include <linux/mutex.h>
38#include <linux/pci.h>
39#include <linux/slab.h>
40#include <linux/qed/qed_if.h>
41#include <linux/qed/qed_ll2_if.h>
42#include <linux/qed/rdma_common.h>
43
44enum qed_roce_ll2_tx_dest {
45 /* Light L2 TX Destination to the Network */
46 QED_ROCE_LL2_TX_DEST_NW,
47
48 /* Light L2 TX Destination to the Loopback */
49 QED_ROCE_LL2_TX_DEST_LB,
50 QED_ROCE_LL2_TX_DEST_MAX
51};
52
53#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
54
55/* rdma interface */
56
57enum qed_roce_qp_state {
58 QED_ROCE_QP_STATE_RESET,
59 QED_ROCE_QP_STATE_INIT,
60 QED_ROCE_QP_STATE_RTR,
61 QED_ROCE_QP_STATE_RTS,
62 QED_ROCE_QP_STATE_SQD,
63 QED_ROCE_QP_STATE_ERR,
64 QED_ROCE_QP_STATE_SQE
65};
66
67enum qed_rdma_tid_type {
68 QED_RDMA_TID_REGISTERED_MR,
69 QED_RDMA_TID_FMR,
70 QED_RDMA_TID_MW_TYPE1,
71 QED_RDMA_TID_MW_TYPE2A
72};
73
74struct qed_rdma_events {
75 void *context;
76 void (*affiliated_event)(void *context, u8 fw_event_code,
77 void *fw_handle);
78 void (*unaffiliated_event)(void *context, u8 event_code);
79};
80
81struct qed_rdma_device {
82 u32 vendor_id;
83 u32 vendor_part_id;
84 u32 hw_ver;
85 u64 fw_ver;
86
87 u64 node_guid;
88 u64 sys_image_guid;
89
90 u8 max_cnq;
91 u8 max_sge;
92 u8 max_srq_sge;
93 u16 max_inline;
94 u32 max_wqe;
95 u32 max_srq_wqe;
96 u8 max_qp_resp_rd_atomic_resc;
97 u8 max_qp_req_rd_atomic_resc;
98 u64 max_dev_resp_rd_atomic_resc;
99 u32 max_cq;
100 u32 max_qp;
101 u32 max_srq;
102 u32 max_mr;
103 u64 max_mr_size;
104 u32 max_cqe;
105 u32 max_mw;
106 u32 max_fmr;
107 u32 max_mr_mw_fmr_pbl;
108 u64 max_mr_mw_fmr_size;
109 u32 max_pd;
110 u32 max_ah;
111 u8 max_pkey;
112 u16 max_srq_wr;
113 u8 max_stats_queues;
114 u32 dev_caps;
115
116 /* Abilty to support RNR-NAK generation */
117
118#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
119#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
120 /* Abilty to support shutdown port */
121#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
122#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
123 /* Abilty to support port active event */
124#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
125#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
126 /* Abilty to support port change event */
127#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
128#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
129 /* Abilty to support system image GUID */
130#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
131#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
132 /* Abilty to support bad P_Key counter support */
133#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
134#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
135 /* Abilty to support atomic operations */
136#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
137#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
138#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
139#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
140 /* Abilty to support modifying the maximum number of
141 * outstanding work requests per QP
142 */
143#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
144#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
145 /* Abilty to support automatic path migration */
146#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
147#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
148 /* Abilty to support the base memory management extensions */
149#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
150#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
151#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
152#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
153 /* Abilty to support multipile page sizes per memory region */
154#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
155#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
156 /* Abilty to support block list physical buffer list */
157#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
158#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
159 /* Abilty to support zero based virtual addresses */
160#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
161#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
162 /* Abilty to support local invalidate fencing */
163#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
164#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
165 /* Abilty to support Loopback on QP */
166#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
167#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
168 u64 page_size_caps;
169 u8 dev_ack_delay;
170 u32 reserved_lkey;
171 u32 bad_pkey_counter;
172 struct qed_rdma_events events;
173};
174
175enum qed_port_state {
176 QED_RDMA_PORT_UP,
177 QED_RDMA_PORT_DOWN,
178};
179
180enum qed_roce_capability {
181 QED_ROCE_V1 = 1 << 0,
182 QED_ROCE_V2 = 1 << 1,
183};
184
185struct qed_rdma_port {
186 enum qed_port_state port_state;
187 int link_speed;
188 u64 max_msg_size;
189 u8 source_gid_table_len;
190 void *source_gid_table_ptr;
191 u8 pkey_table_len;
192 void *pkey_table_ptr;
193 u32 pkey_bad_counter;
194 enum qed_roce_capability capability;
195};
196
197struct qed_rdma_cnq_params {
198 u8 num_pbl_pages;
199 u64 pbl_ptr;
200};
201
202/* The CQ Mode affects the CQ doorbell transaction size.
203 * 64/32 bit machines should configure to 32/16 bits respectively.
204 */
205enum qed_rdma_cq_mode {
206 QED_RDMA_CQ_MODE_16_BITS,
207 QED_RDMA_CQ_MODE_32_BITS,
208};
209
210struct qed_roce_dcqcn_params {
211 u8 notification_point;
212 u8 reaction_point;
213
214 /* fields for notification point */
215 u32 cnp_send_timeout;
216
217 /* fields for reaction point */
218 u32 rl_bc_rate;
219 u16 rl_max_rate;
220 u16 rl_r_ai;
221 u16 rl_r_hai;
222 u16 dcqcn_g;
223 u32 dcqcn_k_us;
224 u32 dcqcn_timeout_us;
225};
226
227struct qed_rdma_start_in_params {
228 struct qed_rdma_events *events;
229 struct qed_rdma_cnq_params cnq_pbl_list[128];
230 u8 desired_cnq;
231 enum qed_rdma_cq_mode cq_mode;
232 struct qed_roce_dcqcn_params dcqcn_params;
233 u16 max_mtu;
234 u8 mac_addr[ETH_ALEN];
235 u8 iwarp_flags;
236};
237
238struct qed_rdma_add_user_out_params {
239 u16 dpi;
240 u64 dpi_addr;
241 u64 dpi_phys_addr;
242 u32 dpi_size;
243};
244
245enum roce_mode {
246 ROCE_V1,
247 ROCE_V2_IPV4,
248 ROCE_V2_IPV6,
249 MAX_ROCE_MODE
250};
251
252union qed_gid {
253 u8 bytes[16];
254 u16 words[8];
255 u32 dwords[4];
256 u64 qwords[2];
257 u32 ipv4_addr;
258};
259
260struct qed_rdma_register_tid_in_params {
261 u32 itid;
262 enum qed_rdma_tid_type tid_type;
263 u8 key;
264 u16 pd;
265 bool local_read;
266 bool local_write;
267 bool remote_read;
268 bool remote_write;
269 bool remote_atomic;
270 bool mw_bind;
271 u64 pbl_ptr;
272 bool pbl_two_level;
273 u8 pbl_page_size_log;
274 u8 page_size_log;
275 u32 fbo;
276 u64 length;
277 u64 vaddr;
278 bool zbva;
279 bool phy_mr;
280 bool dma_mr;
281
282 bool dif_enabled;
283 u64 dif_error_addr;
284 u64 dif_runt_addr;
285};
286
287struct qed_rdma_create_cq_in_params {
288 u32 cq_handle_lo;
289 u32 cq_handle_hi;
290 u32 cq_size;
291 u16 dpi;
292 bool pbl_two_level;
293 u64 pbl_ptr;
294 u16 pbl_num_pages;
295 u8 pbl_page_size_log;
296 u8 cnq_id;
297 u16 int_timeout;
298};
299
300struct qed_rdma_create_srq_in_params {
301 u64 pbl_base_addr;
302 u64 prod_pair_addr;
303 u16 num_pages;
304 u16 pd_id;
305 u16 page_size;
306};
307
308struct qed_rdma_destroy_cq_in_params {
309 u16 icid;
310};
311
312struct qed_rdma_destroy_cq_out_params {
313 u16 num_cq_notif;
314};
315
316struct qed_rdma_create_qp_in_params {
317 u32 qp_handle_lo;
318 u32 qp_handle_hi;
319 u32 qp_handle_async_lo;
320 u32 qp_handle_async_hi;
321 bool use_srq;
322 bool signal_all;
323 bool fmr_and_reserved_lkey;
324 u16 pd;
325 u16 dpi;
326 u16 sq_cq_id;
327 u16 sq_num_pages;
328 u64 sq_pbl_ptr;
329 u8 max_sq_sges;
330 u16 rq_cq_id;
331 u16 rq_num_pages;
332 u64 rq_pbl_ptr;
333 u16 srq_id;
334 u8 stats_queue;
335};
336
337struct qed_rdma_create_qp_out_params {
338 u32 qp_id;
339 u16 icid;
340 void *rq_pbl_virt;
341 dma_addr_t rq_pbl_phys;
342 void *sq_pbl_virt;
343 dma_addr_t sq_pbl_phys;
344};
345
346struct qed_rdma_modify_qp_in_params {
347 u32 modify_flags;
348#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
349#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
350#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
351#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
352#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
353#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
354#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
355#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
356#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
357#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
358#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
359#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
360#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
361#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
362#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
363#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
364#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
365#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
366#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
367#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
368#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
369#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
370#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
371#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
372#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
373#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
374#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
375#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
376#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
377#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
378
379 enum qed_roce_qp_state new_state;
380 u16 pkey;
381 bool incoming_rdma_read_en;
382 bool incoming_rdma_write_en;
383 bool incoming_atomic_en;
384 bool e2e_flow_control_en;
385 u32 dest_qp;
386 bool lb_indication;
387 u16 mtu;
388 u8 traffic_class_tos;
389 u8 hop_limit_ttl;
390 u32 flow_label;
391 union qed_gid sgid;
392 union qed_gid dgid;
393 u16 udp_src_port;
394
395 u16 vlan_id;
396
397 u32 rq_psn;
398 u32 sq_psn;
399 u8 max_rd_atomic_resp;
400 u8 max_rd_atomic_req;
401 u32 ack_timeout;
402 u8 retry_cnt;
403 u8 rnr_retry_cnt;
404 u8 min_rnr_nak_timer;
405 bool sqd_async;
406 u8 remote_mac_addr[6];
407 u8 local_mac_addr[6];
408 bool use_local_mac;
409 enum roce_mode roce_mode;
410};
411
412struct qed_rdma_query_qp_out_params {
413 enum qed_roce_qp_state state;
414 u32 rq_psn;
415 u32 sq_psn;
416 bool draining;
417 u16 mtu;
418 u32 dest_qp;
419 bool incoming_rdma_read_en;
420 bool incoming_rdma_write_en;
421 bool incoming_atomic_en;
422 bool e2e_flow_control_en;
423 union qed_gid sgid;
424 union qed_gid dgid;
425 u32 flow_label;
426 u8 hop_limit_ttl;
427 u8 traffic_class_tos;
428 u32 timeout;
429 u8 rnr_retry;
430 u8 retry_cnt;
431 u8 min_rnr_nak_timer;
432 u16 pkey_index;
433 u8 max_rd_atomic;
434 u8 max_dest_rd_atomic;
435 bool sqd_async;
436};
437
438struct qed_rdma_create_srq_out_params {
439 u16 srq_id;
440};
441
442struct qed_rdma_destroy_srq_in_params {
443 u16 srq_id;
444};
445
446struct qed_rdma_modify_srq_in_params {
447 u32 wqe_limit;
448 u16 srq_id;
449};
450
451struct qed_rdma_stats_out_params {
452 u64 sent_bytes;
453 u64 sent_pkts;
454 u64 rcv_bytes;
455 u64 rcv_pkts;
456};
457
458struct qed_rdma_counters_out_params {
459 u64 pd_count;
460 u64 max_pd;
461 u64 dpi_count;
462 u64 max_dpi;
463 u64 cq_count;
464 u64 max_cq;
465 u64 qp_count;
466 u64 max_qp;
467 u64 tid_count;
468 u64 max_tid;
469};
470
471#define QED_ROCE_TX_HEAD_FAILURE (1)
472#define QED_ROCE_TX_FRAG_FAILURE (2)
473
474struct qed_roce_ll2_header {
475 void *vaddr;
476 dma_addr_t baddr;
477 size_t len;
478};
479
480struct qed_roce_ll2_buffer {
481 dma_addr_t baddr;
482 size_t len;
483};
484
485struct qed_roce_ll2_packet {
486 struct qed_roce_ll2_header header;
487 int n_seg;
488 struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
489 int roce_mode;
490 enum qed_roce_ll2_tx_dest tx_dest;
491};
492
493struct qed_roce_ll2_tx_params {
494 int reserved;
495};
496
497struct qed_roce_ll2_rx_params {
498 u16 vlan_id;
499 u8 smac[ETH_ALEN];
500 int rc;
501};
502
503struct qed_roce_ll2_cbs {
504 void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
505
506 void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
507 struct qed_roce_ll2_rx_params *params);
508};
509
510struct qed_roce_ll2_params {
511 u16 max_rx_buffers;
512 u16 max_tx_buffers;
513 u16 mtu;
514 u8 mac_address[ETH_ALEN];
515 struct qed_roce_ll2_cbs cbs;
516 void *cb_cookie;
517};
518
519struct qed_roce_ll2_info {
520 u8 handle;
521 struct qed_roce_ll2_cbs cbs;
522 u8 mac_address[ETH_ALEN];
523 void *cb_cookie;
524
525 /* Lock to protect ll2 */
526 struct mutex lock;
527};
528
529enum qed_rdma_type {
530 QED_RDMA_TYPE_ROCE,
531};
532
533struct qed_dev_rdma_info {
534 struct qed_dev_info common;
535 enum qed_rdma_type rdma_type;
536};
537
538struct qed_rdma_ops {
539 const struct qed_common_ops *common;
540
541 int (*fill_dev_info)(struct qed_dev *cdev,
542 struct qed_dev_rdma_info *info);
543 void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
544
545 int (*rdma_init)(struct qed_dev *dev,
546 struct qed_rdma_start_in_params *iparams);
547
548 int (*rdma_add_user)(void *rdma_cxt,
549 struct qed_rdma_add_user_out_params *oparams);
550
551 void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
552 int (*rdma_stop)(void *rdma_cxt);
553 struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
554 struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
555 int (*rdma_get_start_sb)(struct qed_dev *cdev);
556 int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
557 void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
558 int (*rdma_get_rdma_int)(struct qed_dev *cdev,
559 struct qed_int_info *info);
560 int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
561 int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
562 void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
563 int (*rdma_create_cq)(void *rdma_cxt,
564 struct qed_rdma_create_cq_in_params *params,
565 u16 *icid);
566 int (*rdma_destroy_cq)(void *rdma_cxt,
567 struct qed_rdma_destroy_cq_in_params *iparams,
568 struct qed_rdma_destroy_cq_out_params *oparams);
569 struct qed_rdma_qp *
570 (*rdma_create_qp)(void *rdma_cxt,
571 struct qed_rdma_create_qp_in_params *iparams,
572 struct qed_rdma_create_qp_out_params *oparams);
573
574 int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
575 struct qed_rdma_modify_qp_in_params *iparams);
576
577 int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
578 struct qed_rdma_query_qp_out_params *oparams);
579 int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
580 int
581 (*rdma_register_tid)(void *rdma_cxt,
582 struct qed_rdma_register_tid_in_params *iparams);
583 int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
584 int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
585 void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
586 int (*roce_ll2_start)(struct qed_dev *cdev,
587 struct qed_roce_ll2_params *params);
588 int (*roce_ll2_stop)(struct qed_dev *cdev);
589 int (*roce_ll2_tx)(struct qed_dev *cdev,
590 struct qed_roce_ll2_packet *packet,
591 struct qed_roce_ll2_tx_params *params);
592 int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
593 struct qed_roce_ll2_buffer *buf,
594 u64 cookie, u8 notify_fw);
595 int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
596 u8 *old_mac_address,
597 u8 *new_mac_address);
598 int (*roce_ll2_stats)(struct qed_dev *cdev,
599 struct qed_ll2_stats *stats);
600};
601
602const struct qed_rdma_ops *qed_get_rdma_ops(void);
603
604#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
1/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#ifndef QEDE_ROCE_H
33#define QEDE_ROCE_H
34
35struct qedr_dev;
36struct qed_dev;
37struct qede_dev;
38
39enum qede_roce_event {
40 QEDE_UP,
41 QEDE_DOWN,
42 QEDE_CHANGE_ADDR,
43 QEDE_CLOSE
44};
45
46struct qede_roce_event_work {
47 struct list_head list;
48 struct work_struct work;
49 void *ptr;
50 enum qede_roce_event event;
51};
52
53struct qedr_driver {
54 unsigned char name[32];
55
56 struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
57 struct net_device *);
58
59 void (*remove)(struct qedr_dev *);
60 void (*notify)(struct qedr_dev *, enum qede_roce_event);
61};
62
63/* APIs for RoCE driver to register callback handlers,
64 * which will be invoked when device is added, removed, ifup, ifdown
65 */
66int qede_roce_register_driver(struct qedr_driver *drv);
67void qede_roce_unregister_driver(struct qedr_driver *drv);
68
69bool qede_roce_supported(struct qede_dev *dev);
70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev);
75void qede_roce_dev_remove(struct qede_dev *dev);
76void qede_roce_event_changeaddr(struct qede_dev *qedr);
77#else
78static inline int qede_roce_dev_add(struct qede_dev *dev)
79{
80 return 0;
81}
82
83static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
84static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
85static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
86static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
87#endif
88#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
28#define RDMA_MAX_PDS (64 * 1024) 28#define RDMA_MAX_PDS (64 * 1024)
29 29
30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS 30#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
31#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
31 32
32#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) 33#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
33 34