aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuval Mintz <Yuval.Mintz@caviumnetworks.com>2016-10-13 15:57:02 -0400
committerDavid S. Miller <davem@davemloft.net>2016-10-14 11:07:22 -0400
commit0189efb8f4f830b9ac7a7c56c0c6e260859e950d (patch)
treeddb21be3b6b763fd619a8028f57c744d63537435
parentce6b04ee8b112cc9d5ef41ba697a3ffabc630f42 (diff)
qed*: Fix Kconfig dependencies with INFINIBAND_QEDR
The qedr driver would require a tristate Kconfig option [to allow it to compile as a module], and toward that end we've added the INFINIBAND_QEDR option. But as we've made the compilation of the qed/qede infrastructure required for RoCE dependent on the option we'd be facing linking difficulties in case that QED=y or QEDE=y, and INFINIBAND_QEDR=m. To resolve this, we seperate between the INFINIBAND_QEDR option and the infrastructure support in qed/qede by introducing a new QED_RDMA option which would be selected by INFINIBAND_QEDR but would be a boolean instead of a tristate; Following that, the qed/qede is fixed based on this new option so that all config combinations would be supported. Fixes: cee9fbd8e2e9 ("qede: add qedr framework") Reported-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig4
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c91
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile2
-rw-r--r--include/linux/qed/qede_roce.h2
12 files changed, 120 insertions, 130 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0df1391f9663..77567727528a 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -107,10 +107,14 @@ config QEDE
107 ---help--- 107 ---help---
108 This enables the support for ... 108 This enables the support for ...
109 109
110config QED_RDMA
111 bool
112
110config INFINIBAND_QEDR 113config INFINIBAND_QEDR
111 tristate "QLogic qede RoCE sources [debug]" 114 tristate "QLogic qede RoCE sources [debug]"
112 depends on QEDE && 64BIT 115 depends on QEDE && 64BIT
113 select QED_LL2 116 select QED_LL2
117 select QED_RDMA
114 default n 118 default n
115 ---help--- 119 ---help---
116 This provides a temporary node that allows the compilation 120 This provides a temporary node that allows the compilation
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index cda0af7fbc20..967acf322c09 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -5,4 +5,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
5 qed_selftest.o qed_dcbx.o qed_debug.o 5 qed_selftest.o qed_dcbx.o qed_debug.o
6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o 6qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
7qed-$(CONFIG_QED_LL2) += qed_ll2.o 7qed-$(CONFIG_QED_LL2) += qed_ll2.o
8qed-$(CONFIG_INFINIBAND_QEDR) += qed_roce.o 8qed-$(CONFIG_QED_RDMA) += qed_roce.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 82370a1a59ad..277db7831f7b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -47,13 +47,8 @@
47#define TM_ALIGN BIT(TM_SHIFT) 47#define TM_ALIGN BIT(TM_SHIFT)
48#define TM_ELEM_SIZE 4 48#define TM_ELEM_SIZE 4
49 49
50/* ILT constants */
51#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
52/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */ 50/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
53#define ILT_DEFAULT_HW_P_SIZE 4 51#define ILT_DEFAULT_HW_P_SIZE (IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
54#else
55#define ILT_DEFAULT_HW_P_SIZE 3
56#endif
57 52
58#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) 53#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
59#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET 54#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 754f6a908858..21adf5208320 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1422,19 +1422,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1422 u32 *feat_num = p_hwfn->hw_info.feat_num; 1422 u32 *feat_num = p_hwfn->hw_info.feat_num;
1423 int num_features = 1; 1423 int num_features = 1;
1424 1424
1425#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 1425 if (IS_ENABLED(CONFIG_QED_RDMA) &&
1426 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide the 1426 p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
1427 * status blocks equally between L2 / RoCE but with consideration as 1427 /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
1428 * to how many l2 queues / cnqs we have 1428 * the status blocks equally between L2 / RoCE but with
1429 */ 1429 * consideration as to how many l2 queues / cnqs we have.
1430 if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { 1430 */
1431 num_features++; 1431 num_features++;
1432 1432
1433 feat_num[QED_RDMA_CNQ] = 1433 feat_num[QED_RDMA_CNQ] =
1434 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features, 1434 min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
1435 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); 1435 RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
1436 } 1436 }
1437#endif 1437
1438 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) / 1438 feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1439 num_features, 1439 num_features,
1440 RESC_NUM(p_hwfn, QED_L2_QUEUE)); 1440 RESC_NUM(p_hwfn, QED_L2_QUEUE));
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 02a8be2faed7..7856e5241b52 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -38,6 +38,7 @@
38#include "qed_mcp.h" 38#include "qed_mcp.h"
39#include "qed_reg_addr.h" 39#include "qed_reg_addr.h"
40#include "qed_sp.h" 40#include "qed_sp.h"
41#include "qed_roce.h"
41 42
42#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) 43#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred)
43#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) 44#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 80a5dc2d652d..4e3d62a16cab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -293,24 +293,4 @@ void qed_ll2_setup(struct qed_hwfn *p_hwfn,
293 */ 293 */
294void qed_ll2_free(struct qed_hwfn *p_hwfn, 294void qed_ll2_free(struct qed_hwfn *p_hwfn,
295 struct qed_ll2_info *p_ll2_connections); 295 struct qed_ll2_info *p_ll2_connections);
296void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
297 u8 connection_handle,
298 void *cookie,
299 dma_addr_t rx_buf_addr,
300 u16 data_length,
301 u8 data_length_error,
302 u16 parse_flags,
303 u16 vlan,
304 u32 src_mac_addr_hi,
305 u16 src_mac_addr_lo, bool b_last_packet);
306void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
307 u8 connection_handle,
308 void *cookie,
309 dma_addr_t first_frag_addr,
310 bool b_last_fragment, bool b_last_packet);
311void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
312 u8 connection_handle,
313 void *cookie,
314 dma_addr_t first_frag_addr,
315 bool b_last_fragment, bool b_last_packet);
316#endif 296#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 4ee3151e80c2..6eb2401b9e22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -33,10 +33,8 @@
33#include "qed_hw.h" 33#include "qed_hw.h"
34#include "qed_selftest.h" 34#include "qed_selftest.h"
35 35
36#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
37#define QED_ROCE_QPS (8192) 36#define QED_ROCE_QPS (8192)
38#define QED_ROCE_DPIS (8) 37#define QED_ROCE_DPIS (8)
39#endif
40 38
41static char version[] = 39static char version[] =
42 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 40 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -682,9 +680,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
682 enum qed_int_mode int_mode) 680 enum qed_int_mode int_mode)
683{ 681{
684 struct qed_sb_cnt_info sb_cnt_info; 682 struct qed_sb_cnt_info sb_cnt_info;
685#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 683 int num_l2_queues = 0;
686 int num_l2_queues;
687#endif
688 int rc; 684 int rc;
689 int i; 685 int i;
690 686
@@ -715,8 +711,9 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
715 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 711 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
716 cdev->num_hwfns; 712 cdev->num_hwfns;
717 713
718#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 714 if (!IS_ENABLED(CONFIG_QED_RDMA))
719 num_l2_queues = 0; 715 return 0;
716
720 for_each_hwfn(cdev, i) 717 for_each_hwfn(cdev, i)
721 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 718 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
722 719
@@ -738,7 +735,6 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
738 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 735 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
739 cdev->int_params.rdma_msix_cnt, 736 cdev->int_params.rdma_msix_cnt,
740 cdev->int_params.rdma_msix_base); 737 cdev->int_params.rdma_msix_base);
741#endif
742 738
743 return 0; 739 return 0;
744} 740}
@@ -843,18 +839,20 @@ static void qed_update_pf_params(struct qed_dev *cdev,
843{ 839{
844 int i; 840 int i;
845 841
846#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
847 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
848 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
849 /* divide by 3 the MRs to avoid MF ILT overflow */
850 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
851 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
852#endif
853 for (i = 0; i < cdev->num_hwfns; i++) { 842 for (i = 0; i < cdev->num_hwfns; i++) {
854 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 843 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
855 844
856 p_hwfn->pf_params = *params; 845 p_hwfn->pf_params = *params;
857 } 846 }
847
848 if (!IS_ENABLED(CONFIG_QED_RDMA))
849 return;
850
851 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
852 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
853 /* divide by 3 the MRs to avoid MF ILT overflow */
854 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
855 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
858} 856}
859 857
860static int qed_slowpath_start(struct qed_dev *cdev, 858static int qed_slowpath_start(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index c73638ceb1ad..187df38542f7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -129,17 +129,12 @@ static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
129 } 129 }
130} 130}
131 131
132u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) 132static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
133{ 133{
134 /* First sb id for RoCE is after all the l2 sb */ 134 /* First sb id for RoCE is after all the l2 sb */
135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; 135 return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
136} 136}
137 137
138u32 qed_rdma_query_cau_timer_res(void *rdma_cxt)
139{
140 return QED_CAU_DEF_RX_TIMER_RES;
141}
142
143static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, 138static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
144 struct qed_ptt *p_ptt, 139 struct qed_ptt *p_ptt,
145 struct qed_rdma_start_in_params *params) 140 struct qed_rdma_start_in_params *params)
@@ -275,7 +270,7 @@ free_rdma_info:
275 return rc; 270 return rc;
276} 271}
277 272
278void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) 273static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
279{ 274{
280 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; 275 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
281 276
@@ -527,6 +522,26 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
527 return qed_spq_post(p_hwfn, p_ent, NULL); 522 return qed_spq_post(p_hwfn, p_ent, NULL);
528} 523}
529 524
525static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
526{
527 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
528 int rc;
529
530 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
531
532 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
533 rc = qed_rdma_bmap_alloc_id(p_hwfn,
534 &p_hwfn->p_rdma_info->tid_map, itid);
535 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
536 if (rc)
537 goto out;
538
539 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
540out:
541 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
542 return rc;
543}
544
530static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) 545static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
531{ 546{
532 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; 547 struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
@@ -573,7 +588,7 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
573 return qed_rdma_start_fw(p_hwfn, params, p_ptt); 588 return qed_rdma_start_fw(p_hwfn, params, p_ptt);
574} 589}
575 590
576int qed_rdma_stop(void *rdma_cxt) 591static int qed_rdma_stop(void *rdma_cxt)
577{ 592{
578 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 593 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
579 struct rdma_close_func_ramrod_data *p_ramrod; 594 struct rdma_close_func_ramrod_data *p_ramrod;
@@ -629,8 +644,8 @@ out:
629 return rc; 644 return rc;
630} 645}
631 646
632int qed_rdma_add_user(void *rdma_cxt, 647static int qed_rdma_add_user(void *rdma_cxt,
633 struct qed_rdma_add_user_out_params *out_params) 648 struct qed_rdma_add_user_out_params *out_params)
634{ 649{
635 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 650 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
636 u32 dpi_start_offset; 651 u32 dpi_start_offset;
@@ -664,7 +679,7 @@ int qed_rdma_add_user(void *rdma_cxt,
664 return rc; 679 return rc;
665} 680}
666 681
667struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) 682static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
668{ 683{
669 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 684 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
670 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; 685 struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
@@ -680,7 +695,7 @@ struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
680 return p_port; 695 return p_port;
681} 696}
682 697
683struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) 698static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
684{ 699{
685 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 700 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
686 701
@@ -690,7 +705,7 @@ struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
690 return p_hwfn->p_rdma_info->dev; 705 return p_hwfn->p_rdma_info->dev;
691} 706}
692 707
693void qed_rdma_free_tid(void *rdma_cxt, u32 itid) 708static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
694{ 709{
695 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 710 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
696 711
@@ -701,27 +716,7 @@ void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
701 spin_unlock_bh(&p_hwfn->p_rdma_info->lock); 716 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
702} 717}
703 718
704int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) 719static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
705{
706 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
707 int rc;
708
709 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
710
711 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
712 rc = qed_rdma_bmap_alloc_id(p_hwfn,
713 &p_hwfn->p_rdma_info->tid_map, itid);
714 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
715 if (rc)
716 goto out;
717
718 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
719out:
720 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
721 return rc;
722}
723
724void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
725{ 720{
726 struct qed_hwfn *p_hwfn; 721 struct qed_hwfn *p_hwfn;
727 u16 qz_num; 722 u16 qz_num;
@@ -816,7 +811,7 @@ static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
816 return 0; 811 return 0;
817} 812}
818 813
819int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) 814static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
820{ 815{
821 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 816 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
822 u32 returned_id; 817 u32 returned_id;
@@ -1985,9 +1980,9 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
1985 return 0; 1980 return 0;
1986} 1981}
1987 1982
1988int qed_rdma_query_qp(void *rdma_cxt, 1983static int qed_rdma_query_qp(void *rdma_cxt,
1989 struct qed_rdma_qp *qp, 1984 struct qed_rdma_qp *qp,
1990 struct qed_rdma_query_qp_out_params *out_params) 1985 struct qed_rdma_query_qp_out_params *out_params)
1991{ 1986{
1992 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 1987 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
1993 int rc; 1988 int rc;
@@ -2022,7 +2017,7 @@ int qed_rdma_query_qp(void *rdma_cxt,
2022 return rc; 2017 return rc;
2023} 2018}
2024 2019
2025int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) 2020static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
2026{ 2021{
2027 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2022 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2028 int rc = 0; 2023 int rc = 0;
@@ -2215,9 +2210,9 @@ static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
2215 return rc; 2210 return rc;
2216} 2211}
2217 2212
2218int qed_rdma_modify_qp(void *rdma_cxt, 2213static int qed_rdma_modify_qp(void *rdma_cxt,
2219 struct qed_rdma_qp *qp, 2214 struct qed_rdma_qp *qp,
2220 struct qed_rdma_modify_qp_in_params *params) 2215 struct qed_rdma_modify_qp_in_params *params)
2221{ 2216{
2222 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2217 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2223 enum qed_roce_qp_state prev_state; 2218 enum qed_roce_qp_state prev_state;
@@ -2312,8 +2307,9 @@ int qed_rdma_modify_qp(void *rdma_cxt,
2312 return rc; 2307 return rc;
2313} 2308}
2314 2309
2315int qed_rdma_register_tid(void *rdma_cxt, 2310static int
2316 struct qed_rdma_register_tid_in_params *params) 2311qed_rdma_register_tid(void *rdma_cxt,
2312 struct qed_rdma_register_tid_in_params *params)
2317{ 2313{
2318 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2314 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2319 struct rdma_register_tid_ramrod_data *p_ramrod; 2315 struct rdma_register_tid_ramrod_data *p_ramrod;
@@ -2450,7 +2446,7 @@ int qed_rdma_register_tid(void *rdma_cxt,
2450 return rc; 2446 return rc;
2451} 2447}
2452 2448
2453int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) 2449static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
2454{ 2450{
2455 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2451 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2456 struct rdma_deregister_tid_ramrod_data *p_ramrod; 2452 struct rdma_deregister_tid_ramrod_data *p_ramrod;
@@ -2561,7 +2557,8 @@ void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2561 qed_rdma_dpm_conf(p_hwfn, p_ptt); 2557 qed_rdma_dpm_conf(p_hwfn, p_ptt);
2562} 2558}
2563 2559
2564int qed_rdma_start(void *rdma_cxt, struct qed_rdma_start_in_params *params) 2560static int qed_rdma_start(void *rdma_cxt,
2561 struct qed_rdma_start_in_params *params)
2565{ 2562{
2566 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2563 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2567 struct qed_ptt *p_ptt; 2564 struct qed_ptt *p_ptt;
@@ -2601,7 +2598,7 @@ static int qed_rdma_init(struct qed_dev *cdev,
2601 return qed_rdma_start(QED_LEADING_HWFN(cdev), params); 2598 return qed_rdma_start(QED_LEADING_HWFN(cdev), params);
2602} 2599}
2603 2600
2604void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) 2601static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
2605{ 2602{
2606 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; 2603 struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
2607 2604
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 2f091e8a0f40..691413176734 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -181,36 +181,55 @@ struct qed_rdma_qp {
181 dma_addr_t shared_queue_phys_addr; 181 dma_addr_t shared_queue_phys_addr;
182}; 182};
183 183
184int 184#if IS_ENABLED(CONFIG_QED_RDMA)
185qed_rdma_add_user(void *rdma_cxt, 185void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
186 struct qed_rdma_add_user_out_params *out_params);
187int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd);
188int qed_rdma_alloc_tid(void *rdma_cxt, u32 *tid);
189int qed_rdma_deregister_tid(void *rdma_cxt, u32 tid);
190void qed_rdma_free_tid(void *rdma_cxt, u32 tid);
191struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt);
192struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt);
193int
194qed_rdma_register_tid(void *rdma_cxt,
195 struct qed_rdma_register_tid_in_params *params);
196void qed_rdma_remove_user(void *rdma_cxt, u16 dpi);
197int qed_rdma_start(void *p_hwfn, struct qed_rdma_start_in_params *params);
198int qed_rdma_stop(void *rdma_cxt);
199u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
200u32 qed_rdma_query_cau_timer_res(void *p_hwfn);
201void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
202void qed_rdma_resc_free(struct qed_hwfn *p_hwfn);
203void qed_async_roce_event(struct qed_hwfn *p_hwfn, 186void qed_async_roce_event(struct qed_hwfn *p_hwfn,
204 struct event_ring_entry *p_eqe); 187 struct event_ring_entry *p_eqe);
205int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp); 188void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
206int qed_rdma_modify_qp(void *rdma_cxt, struct qed_rdma_qp *qp, 189 u8 connection_handle,
207 struct qed_rdma_modify_qp_in_params *params); 190 void *cookie,
208int qed_rdma_query_qp(void *rdma_cxt, struct qed_rdma_qp *qp, 191 dma_addr_t first_frag_addr,
209 struct qed_rdma_query_qp_out_params *out_params); 192 bool b_last_fragment, bool b_last_packet);
210 193void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
211#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 194 u8 connection_handle,
212void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 195 void *cookie,
196 dma_addr_t first_frag_addr,
197 bool b_last_fragment, bool b_last_packet);
198void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
199 u8 connection_handle,
200 void *cookie,
201 dma_addr_t rx_buf_addr,
202 u16 data_length,
203 u8 data_length_error,
204 u16 parse_flags,
205 u16 vlan,
206 u32 src_mac_addr_hi,
207 u16 src_mac_addr_lo, bool b_last_packet);
213#else 208#else
214void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} 209static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
210static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
211static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
212 u8 connection_handle,
213 void *cookie,
214 dma_addr_t first_frag_addr,
215 bool b_last_fragment,
216 bool b_last_packet) {}
217static inline void qed_ll2b_release_tx_gsi_packet(struct qed_hwfn *p_hwfn,
218 u8 connection_handle,
219 void *cookie,
220 dma_addr_t first_frag_addr,
221 bool b_last_fragment,
222 bool b_last_packet) {}
223static inline void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
224 u8 connection_handle,
225 void *cookie,
226 dma_addr_t rx_buf_addr,
227 u16 data_length,
228 u8 data_length_error,
229 u16 parse_flags,
230 u16 vlan,
231 u32 src_mac_addr_hi,
232 u16 src_mac_addr_lo,
233 bool b_last_packet) {}
215#endif 234#endif
216#endif 235#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index caff41544898..9fbaf9429fd0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -28,9 +28,7 @@
28#include "qed_reg_addr.h" 28#include "qed_reg_addr.h"
29#include "qed_sp.h" 29#include "qed_sp.h"
30#include "qed_sriov.h" 30#include "qed_sriov.h"
31#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
32#include "qed_roce.h" 31#include "qed_roce.h"
33#endif
34 32
35/*************************************************************************** 33/***************************************************************************
36* Structures & Definitions 34* Structures & Definitions
@@ -240,11 +238,9 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
240 struct event_ring_entry *p_eqe) 238 struct event_ring_entry *p_eqe)
241{ 239{
242 switch (p_eqe->protocol_id) { 240 switch (p_eqe->protocol_id) {
243#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
244 case PROTOCOLID_ROCE: 241 case PROTOCOLID_ROCE:
245 qed_async_roce_event(p_hwfn, p_eqe); 242 qed_async_roce_event(p_hwfn, p_eqe);
246 return 0; 243 return 0;
247#endif
248 case PROTOCOLID_COMMON: 244 case PROTOCOLID_COMMON:
249 return qed_sriov_eqe_event(p_hwfn, 245 return qed_sriov_eqe_event(p_hwfn,
250 p_eqe->opcode, 246 p_eqe->opcode,
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 28dc58919c85..048a230c3ce0 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
2 2
3qede-y := qede_main.o qede_ethtool.o 3qede-y := qede_main.o qede_ethtool.o
4qede-$(CONFIG_DCB) += qede_dcbnl.o 4qede-$(CONFIG_DCB) += qede_dcbnl.o
5qede-$(CONFIG_INFINIBAND_QEDR) += qede_roce.o 5qede-$(CONFIG_QED_RDMA) += qede_roce.o
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
index 99fbe6d55acb..f48d64b0e2fb 100644
--- a/include/linux/qed/qede_roce.h
+++ b/include/linux/qed/qede_roce.h
@@ -68,7 +68,7 @@ void qede_roce_unregister_driver(struct qedr_driver *drv);
68 68
69bool qede_roce_supported(struct qede_dev *dev); 69bool qede_roce_supported(struct qede_dev *dev);
70 70
71#if IS_ENABLED(CONFIG_INFINIBAND_QEDR) 71#if IS_ENABLED(CONFIG_QED_RDMA)
72int qede_roce_dev_add(struct qede_dev *dev); 72int qede_roce_dev_add(struct qede_dev *dev);
73void qede_roce_dev_event_open(struct qede_dev *dev); 73void qede_roce_dev_event_open(struct qede_dev *dev);
74void qede_roce_dev_event_close(struct qede_dev *dev); 74void qede_roce_dev_event_close(struct qede_dev *dev);