diff options
author | Yuval Mintz <Yuval.Mintz@caviumnetworks.com> | 2016-10-01 14:59:55 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-03 23:22:46 -0400 |
commit | 0a7fb11c23c0fb8f5ad37f285f40348f1ab9ccbd (patch) | |
tree | bea79cf204080cbc89e454dd04a62c77e5a69036 /drivers/net | |
parent | b9118b7221ebb12156d2b08d4d5647bc6076d6bb (diff) |
qed: Add Light L2 support
Other protocols beside the networking driver need the ability
of passing some L2 traffic, usually [although not limited] for the
purpose of some management traffic.
Signed-off-by: Yuval Mintz <Yuval.Mintz@caviumnetworks.com>
Signed-off-by: Ram Amrani <Ram.Amrani@caviumnetworks.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ethernet/qlogic/Kconfig | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed.h | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev.c | 120 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_ll2.c | 1699 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_ll2.h | 289 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_main.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_sp.h | 4 |
11 files changed, 2194 insertions, 3 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 6ba48406899e..9eb3b1914cf5 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig | |||
@@ -88,6 +88,14 @@ config QED | |||
88 | ---help--- | 88 | ---help--- |
89 | This enables the support for ... | 89 | This enables the support for ... |
90 | 90 | ||
91 | config QED_LL2 | ||
92 | bool "Qlogic QED Light L2 interface" | ||
93 | default n | ||
94 | depends on QED | ||
95 | ---help--- | ||
96 | This enables support for Light L2 interface which is required | ||
97 | by all qed protocol drivers other than qede. | ||
98 | |||
91 | config QED_SRIOV | 99 | config QED_SRIOV |
92 | bool "QLogic QED 25/40/100Gb SR-IOV support" | 100 | bool "QLogic QED 25/40/100Gb SR-IOV support" |
93 | depends on QED && PCI_IOV | 101 | depends on QED && PCI_IOV |
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 86a5b4f5f870..e067098f10a9 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile | |||
@@ -4,3 +4,4 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ | |||
4 | qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ | 4 | qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ |
5 | qed_selftest.o qed_dcbx.o qed_debug.o | 5 | qed_selftest.o qed_dcbx.o qed_debug.o |
6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o | 6 | qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o |
7 | qed-$(CONFIG_QED_LL2) += qed_ll2.o | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 0929582fc82b..91b571a3670b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -72,6 +72,7 @@ struct qed_sb_info; | |||
72 | struct qed_sb_attn_info; | 72 | struct qed_sb_attn_info; |
73 | struct qed_cxt_mngr; | 73 | struct qed_cxt_mngr; |
74 | struct qed_sb_sp_info; | 74 | struct qed_sb_sp_info; |
75 | struct qed_ll2_info; | ||
75 | struct qed_mcp_info; | 76 | struct qed_mcp_info; |
76 | 77 | ||
77 | struct qed_rt_data { | 78 | struct qed_rt_data { |
@@ -152,6 +153,7 @@ enum QED_RESOURCES { | |||
152 | QED_MAC, | 153 | QED_MAC, |
153 | QED_VLAN, | 154 | QED_VLAN, |
154 | QED_ILT, | 155 | QED_ILT, |
156 | QED_LL2_QUEUE, | ||
155 | QED_MAX_RESC, | 157 | QED_MAX_RESC, |
156 | }; | 158 | }; |
157 | 159 | ||
@@ -360,6 +362,8 @@ struct qed_hwfn { | |||
360 | struct qed_sb_attn_info *p_sb_attn; | 362 | struct qed_sb_attn_info *p_sb_attn; |
361 | 363 | ||
362 | /* Protocol related */ | 364 | /* Protocol related */ |
365 | bool using_ll2; | ||
366 | struct qed_ll2_info *p_ll2_info; | ||
363 | struct qed_pf_params pf_params; | 367 | struct qed_pf_params pf_params; |
364 | 368 | ||
365 | bool b_rdma_enabled_in_prs; | 369 | bool b_rdma_enabled_in_prs; |
@@ -564,6 +568,11 @@ struct qed_dev { | |||
564 | 568 | ||
565 | struct qed_dbg_params dbg_params; | 569 | struct qed_dbg_params dbg_params; |
566 | 570 | ||
571 | #ifdef CONFIG_QED_LL2 | ||
572 | struct qed_cb_ll2_info *ll2; | ||
573 | u8 ll2_mac_address[ETH_ALEN]; | ||
574 | #endif | ||
575 | |||
567 | const struct firmware *firmware; | 576 | const struct firmware *firmware; |
568 | }; | 577 | }; |
569 | 578 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index dd579b2ef224..d9bea2a9c9f7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c | |||
@@ -1839,6 +1839,8 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) | |||
1839 | /* Set the number of required CORE connections */ | 1839 | /* Set the number of required CORE connections */ |
1840 | u32 core_cids = 1; /* SPQ */ | 1840 | u32 core_cids = 1; /* SPQ */ |
1841 | 1841 | ||
1842 | if (p_hwfn->using_ll2) | ||
1843 | core_cids += 4; | ||
1842 | qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); | 1844 | qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); |
1843 | 1845 | ||
1844 | switch (p_hwfn->hw_info.personality) { | 1846 | switch (p_hwfn->hw_info.personality) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 13d8b4075b01..9a8e153df841 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "qed_hw.h" | 29 | #include "qed_hw.h" |
30 | #include "qed_init_ops.h" | 30 | #include "qed_init_ops.h" |
31 | #include "qed_int.h" | 31 | #include "qed_int.h" |
32 | #include "qed_ll2.h" | ||
32 | #include "qed_mcp.h" | 33 | #include "qed_mcp.h" |
33 | #include "qed_reg_addr.h" | 34 | #include "qed_reg_addr.h" |
34 | #include "qed_sp.h" | 35 | #include "qed_sp.h" |
@@ -147,6 +148,9 @@ void qed_resc_free(struct qed_dev *cdev) | |||
147 | qed_eq_free(p_hwfn, p_hwfn->p_eq); | 148 | qed_eq_free(p_hwfn, p_hwfn->p_eq); |
148 | qed_consq_free(p_hwfn, p_hwfn->p_consq); | 149 | qed_consq_free(p_hwfn, p_hwfn->p_consq); |
149 | qed_int_free(p_hwfn); | 150 | qed_int_free(p_hwfn); |
151 | #ifdef CONFIG_QED_LL2 | ||
152 | qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info); | ||
153 | #endif | ||
150 | qed_iov_free(p_hwfn); | 154 | qed_iov_free(p_hwfn); |
151 | qed_dmae_info_free(p_hwfn); | 155 | qed_dmae_info_free(p_hwfn); |
152 | qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); | 156 | qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); |
@@ -403,6 +407,9 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
403 | 407 | ||
404 | int qed_resc_alloc(struct qed_dev *cdev) | 408 | int qed_resc_alloc(struct qed_dev *cdev) |
405 | { | 409 | { |
410 | #ifdef CONFIG_QED_LL2 | ||
411 | struct qed_ll2_info *p_ll2_info; | ||
412 | #endif | ||
406 | struct qed_consq *p_consq; | 413 | struct qed_consq *p_consq; |
407 | struct qed_eq *p_eq; | 414 | struct qed_eq *p_eq; |
408 | int i, rc = 0; | 415 | int i, rc = 0; |
@@ -513,6 +520,15 @@ int qed_resc_alloc(struct qed_dev *cdev) | |||
513 | goto alloc_no_mem; | 520 | goto alloc_no_mem; |
514 | p_hwfn->p_consq = p_consq; | 521 | p_hwfn->p_consq = p_consq; |
515 | 522 | ||
523 | #ifdef CONFIG_QED_LL2 | ||
524 | if (p_hwfn->using_ll2) { | ||
525 | p_ll2_info = qed_ll2_alloc(p_hwfn); | ||
526 | if (!p_ll2_info) | ||
527 | goto alloc_no_mem; | ||
528 | p_hwfn->p_ll2_info = p_ll2_info; | ||
529 | } | ||
530 | #endif | ||
531 | |||
516 | /* DMA info initialization */ | 532 | /* DMA info initialization */ |
517 | rc = qed_dmae_info_alloc(p_hwfn); | 533 | rc = qed_dmae_info_alloc(p_hwfn); |
518 | if (rc) | 534 | if (rc) |
@@ -561,6 +577,10 @@ void qed_resc_setup(struct qed_dev *cdev) | |||
561 | qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); | 577 | qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); |
562 | 578 | ||
563 | qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); | 579 | qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); |
580 | #ifdef CONFIG_QED_LL2 | ||
581 | if (p_hwfn->using_ll2) | ||
582 | qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info); | ||
583 | #endif | ||
564 | } | 584 | } |
565 | } | 585 | } |
566 | 586 | ||
@@ -1304,6 +1324,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) | |||
1304 | resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / | 1324 | resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / |
1305 | num_funcs; | 1325 | num_funcs; |
1306 | resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; | 1326 | resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; |
1327 | resc_num[QED_LL2_QUEUE] = MAX_NUM_LL2_RX_QUEUES / num_funcs; | ||
1307 | 1328 | ||
1308 | for (i = 0; i < QED_MAX_RESC; i++) | 1329 | for (i = 0; i < QED_MAX_RESC; i++) |
1309 | resc_start[i] = resc_num[i] * enabled_func_idx; | 1330 | resc_start[i] = resc_num[i] * enabled_func_idx; |
@@ -1327,7 +1348,8 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) | |||
1327 | "RL = %d start = %d\n" | 1348 | "RL = %d start = %d\n" |
1328 | "MAC = %d start = %d\n" | 1349 | "MAC = %d start = %d\n" |
1329 | "VLAN = %d start = %d\n" | 1350 | "VLAN = %d start = %d\n" |
1330 | "ILT = %d start = %d\n", | 1351 | "ILT = %d start = %d\n" |
1352 | "LL2_QUEUE = %d start = %d\n", | ||
1331 | p_hwfn->hw_info.resc_num[QED_SB], | 1353 | p_hwfn->hw_info.resc_num[QED_SB], |
1332 | p_hwfn->hw_info.resc_start[QED_SB], | 1354 | p_hwfn->hw_info.resc_start[QED_SB], |
1333 | p_hwfn->hw_info.resc_num[QED_L2_QUEUE], | 1355 | p_hwfn->hw_info.resc_num[QED_L2_QUEUE], |
@@ -1343,7 +1365,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) | |||
1343 | p_hwfn->hw_info.resc_num[QED_VLAN], | 1365 | p_hwfn->hw_info.resc_num[QED_VLAN], |
1344 | p_hwfn->hw_info.resc_start[QED_VLAN], | 1366 | p_hwfn->hw_info.resc_start[QED_VLAN], |
1345 | p_hwfn->hw_info.resc_num[QED_ILT], | 1367 | p_hwfn->hw_info.resc_num[QED_ILT], |
1346 | p_hwfn->hw_info.resc_start[QED_ILT]); | 1368 | p_hwfn->hw_info.resc_start[QED_ILT], |
1369 | RESC_NUM(p_hwfn, QED_LL2_QUEUE), | ||
1370 | RESC_START(p_hwfn, QED_LL2_QUEUE)); | ||
1347 | 1371 | ||
1348 | return 0; | 1372 | return 0; |
1349 | } | 1373 | } |
@@ -2133,6 +2157,98 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) | |||
2133 | return 0; | 2157 | return 0; |
2134 | } | 2158 | } |
2135 | 2159 | ||
2160 | static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low, | ||
2161 | u8 *p_filter) | ||
2162 | { | ||
2163 | *p_high = p_filter[1] | (p_filter[0] << 8); | ||
2164 | *p_low = p_filter[5] | (p_filter[4] << 8) | | ||
2165 | (p_filter[3] << 16) | (p_filter[2] << 24); | ||
2166 | } | ||
2167 | |||
2168 | int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, | ||
2169 | struct qed_ptt *p_ptt, u8 *p_filter) | ||
2170 | { | ||
2171 | u32 high = 0, low = 0, en; | ||
2172 | int i; | ||
2173 | |||
2174 | if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) | ||
2175 | return 0; | ||
2176 | |||
2177 | qed_llh_mac_to_filter(&high, &low, p_filter); | ||
2178 | |||
2179 | /* Find a free entry and utilize it */ | ||
2180 | for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { | ||
2181 | en = qed_rd(p_hwfn, p_ptt, | ||
2182 | NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); | ||
2183 | if (en) | ||
2184 | continue; | ||
2185 | qed_wr(p_hwfn, p_ptt, | ||
2186 | NIG_REG_LLH_FUNC_FILTER_VALUE + | ||
2187 | 2 * i * sizeof(u32), low); | ||
2188 | qed_wr(p_hwfn, p_ptt, | ||
2189 | NIG_REG_LLH_FUNC_FILTER_VALUE + | ||
2190 | (2 * i + 1) * sizeof(u32), high); | ||
2191 | qed_wr(p_hwfn, p_ptt, | ||
2192 | NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); | ||
2193 | qed_wr(p_hwfn, p_ptt, | ||
2194 | NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + | ||
2195 | i * sizeof(u32), 0); | ||
2196 | qed_wr(p_hwfn, p_ptt, | ||
2197 | NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); | ||
2198 | break; | ||
2199 | } | ||
2200 | if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { | ||
2201 | DP_NOTICE(p_hwfn, | ||
2202 | "Failed to find an empty LLH filter to utilize\n"); | ||
2203 | return -EINVAL; | ||
2204 | } | ||
2205 | |||
2206 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, | ||
2207 | "mac: %pM is added at %d\n", | ||
2208 | p_filter, i); | ||
2209 | |||
2210 | return 0; | ||
2211 | } | ||
2212 | |||
2213 | void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, | ||
2214 | struct qed_ptt *p_ptt, u8 *p_filter) | ||
2215 | { | ||
2216 | u32 high = 0, low = 0; | ||
2217 | int i; | ||
2218 | |||
2219 | if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) | ||
2220 | return; | ||
2221 | |||
2222 | qed_llh_mac_to_filter(&high, &low, p_filter); | ||
2223 | |||
2224 | /* Find the entry and clean it */ | ||
2225 | for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { | ||
2226 | if (qed_rd(p_hwfn, p_ptt, | ||
2227 | NIG_REG_LLH_FUNC_FILTER_VALUE + | ||
2228 | 2 * i * sizeof(u32)) != low) | ||
2229 | continue; | ||
2230 | if (qed_rd(p_hwfn, p_ptt, | ||
2231 | NIG_REG_LLH_FUNC_FILTER_VALUE + | ||
2232 | (2 * i + 1) * sizeof(u32)) != high) | ||
2233 | continue; | ||
2234 | |||
2235 | qed_wr(p_hwfn, p_ptt, | ||
2236 | NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); | ||
2237 | qed_wr(p_hwfn, p_ptt, | ||
2238 | NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0); | ||
2239 | qed_wr(p_hwfn, p_ptt, | ||
2240 | NIG_REG_LLH_FUNC_FILTER_VALUE + | ||
2241 | (2 * i + 1) * sizeof(u32), 0); | ||
2242 | |||
2243 | DP_VERBOSE(p_hwfn, NETIF_MSG_HW, | ||
2244 | "mac: %pM is removed from %d\n", | ||
2245 | p_filter, i); | ||
2246 | break; | ||
2247 | } | ||
2248 | if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) | ||
2249 | DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n"); | ||
2250 | } | ||
2251 | |||
2136 | static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 2252 | static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
2137 | u32 hw_addr, void *p_eth_qzone, | 2253 | u32 hw_addr, void *p_eth_qzone, |
2138 | size_t eth_qzone_size, u8 timeset) | 2254 | size_t eth_qzone_size, u8 timeset) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 343bb0344f62..b6711c106597 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h | |||
@@ -310,6 +310,26 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, | |||
310 | u8 *dst_id); | 310 | u8 *dst_id); |
311 | 311 | ||
312 | /** | 312 | /** |
313 | * @brief qed_llh_add_mac_filter - configures a MAC filter in llh | ||
314 | * | ||
315 | * @param p_hwfn | ||
316 | * @param p_ptt | ||
317 | * @param p_filter - MAC to add | ||
318 | */ | ||
319 | int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn, | ||
320 | struct qed_ptt *p_ptt, u8 *p_filter); | ||
321 | |||
322 | /** | ||
323 | * @brief qed_llh_remove_mac_filter - removes a MAC filter from llh | ||
324 | * | ||
325 | * @param p_hwfn | ||
326 | * @param p_ptt | ||
327 | * @param p_filter - MAC to remove | ||
328 | */ | ||
329 | void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn, | ||
330 | struct qed_ptt *p_ptt, u8 *p_filter); | ||
331 | |||
332 | /** | ||
313 | * *@brief Cleanup of previous driver remains prior to load | 333 | * *@brief Cleanup of previous driver remains prior to load |
314 | * | 334 | * |
315 | * @param p_hwfn | 335 | * @param p_hwfn |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c new file mode 100644 index 000000000000..e0ec8ed2f92c --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -0,0 +1,1699 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * | ||
3 | * Copyright (c) 2015 QLogic Corporation | ||
4 | * | ||
5 | * This software is available under the terms of the GNU General Public License | ||
6 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
7 | * this source tree. | ||
8 | */ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | #include <asm/byteorder.h> | ||
12 | #include <linux/dma-mapping.h> | ||
13 | #include <linux/if_vlan.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/stddef.h> | ||
18 | #include <linux/version.h> | ||
19 | #include <linux/workqueue.h> | ||
20 | #include <net/ipv6.h> | ||
21 | #include <linux/bitops.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/etherdevice.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/mutex.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/qed/qed_ll2_if.h> | ||
31 | #include "qed.h" | ||
32 | #include "qed_cxt.h" | ||
33 | #include "qed_dev_api.h" | ||
34 | #include "qed_hsi.h" | ||
35 | #include "qed_hw.h" | ||
36 | #include "qed_int.h" | ||
37 | #include "qed_ll2.h" | ||
38 | #include "qed_mcp.h" | ||
39 | #include "qed_reg_addr.h" | ||
40 | #include "qed_sp.h" | ||
41 | |||
42 | #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) | ||
43 | #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) | ||
44 | |||
45 | #define QED_LL2_TX_SIZE (256) | ||
46 | #define QED_LL2_RX_SIZE (4096) | ||
47 | |||
48 | struct qed_cb_ll2_info { | ||
49 | int rx_cnt; | ||
50 | u32 rx_size; | ||
51 | u8 handle; | ||
52 | bool frags_mapped; | ||
53 | |||
54 | /* Lock protecting LL2 buffer lists in sleepless context */ | ||
55 | spinlock_t lock; | ||
56 | struct list_head list; | ||
57 | |||
58 | const struct qed_ll2_cb_ops *cbs; | ||
59 | void *cb_cookie; | ||
60 | }; | ||
61 | |||
62 | struct qed_ll2_buffer { | ||
63 | struct list_head list; | ||
64 | void *data; | ||
65 | dma_addr_t phys_addr; | ||
66 | }; | ||
67 | |||
68 | static void qed_ll2b_complete_tx_packet(struct qed_hwfn *p_hwfn, | ||
69 | u8 connection_handle, | ||
70 | void *cookie, | ||
71 | dma_addr_t first_frag_addr, | ||
72 | bool b_last_fragment, | ||
73 | bool b_last_packet) | ||
74 | { | ||
75 | struct qed_dev *cdev = p_hwfn->cdev; | ||
76 | struct sk_buff *skb = cookie; | ||
77 | |||
78 | /* All we need to do is release the mapping */ | ||
79 | dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, | ||
80 | skb_headlen(skb), DMA_TO_DEVICE); | ||
81 | |||
82 | if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) | ||
83 | cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, | ||
84 | b_last_fragment); | ||
85 | |||
86 | if (cdev->ll2->frags_mapped) | ||
87 | /* Case where mapped frags were received, need to | ||
88 | * free skb with nr_frags marked as 0 | ||
89 | */ | ||
90 | skb_shinfo(skb)->nr_frags = 0; | ||
91 | |||
92 | dev_kfree_skb_any(skb); | ||
93 | } | ||
94 | |||
95 | static int qed_ll2_alloc_buffer(struct qed_dev *cdev, | ||
96 | u8 **data, dma_addr_t *phys_addr) | ||
97 | { | ||
98 | *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC); | ||
99 | if (!(*data)) { | ||
100 | DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); | ||
101 | return -ENOMEM; | ||
102 | } | ||
103 | |||
104 | *phys_addr = dma_map_single(&cdev->pdev->dev, | ||
105 | ((*data) + NET_SKB_PAD), | ||
106 | cdev->ll2->rx_size, DMA_FROM_DEVICE); | ||
107 | if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) { | ||
108 | DP_INFO(cdev, "Failed to map LL2 buffer data\n"); | ||
109 | kfree((*data)); | ||
110 | return -ENOMEM; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, | ||
117 | struct qed_ll2_buffer *buffer) | ||
118 | { | ||
119 | spin_lock_bh(&cdev->ll2->lock); | ||
120 | |||
121 | dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, | ||
122 | cdev->ll2->rx_size, DMA_FROM_DEVICE); | ||
123 | kfree(buffer->data); | ||
124 | list_del(&buffer->list); | ||
125 | |||
126 | cdev->ll2->rx_cnt--; | ||
127 | if (!cdev->ll2->rx_cnt) | ||
128 | DP_INFO(cdev, "All LL2 entries were removed\n"); | ||
129 | |||
130 | spin_unlock_bh(&cdev->ll2->lock); | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static void qed_ll2_kill_buffers(struct qed_dev *cdev) | ||
136 | { | ||
137 | struct qed_ll2_buffer *buffer, *tmp_buffer; | ||
138 | |||
139 | list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) | ||
140 | qed_ll2_dealloc_buffer(cdev, buffer); | ||
141 | } | ||
142 | |||
143 | void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn, | ||
144 | u8 connection_handle, | ||
145 | struct qed_ll2_rx_packet *p_pkt, | ||
146 | struct core_rx_fast_path_cqe *p_cqe, | ||
147 | bool b_last_packet) | ||
148 | { | ||
149 | u16 packet_length = le16_to_cpu(p_cqe->packet_length); | ||
150 | struct qed_ll2_buffer *buffer = p_pkt->cookie; | ||
151 | struct qed_dev *cdev = p_hwfn->cdev; | ||
152 | u16 vlan = le16_to_cpu(p_cqe->vlan); | ||
153 | u32 opaque_data_0, opaque_data_1; | ||
154 | u8 pad = p_cqe->placement_offset; | ||
155 | dma_addr_t new_phys_addr; | ||
156 | struct sk_buff *skb; | ||
157 | bool reuse = false; | ||
158 | int rc = -EINVAL; | ||
159 | u8 *new_data; | ||
160 | |||
161 | opaque_data_0 = le32_to_cpu(p_cqe->opaque_data.data[0]); | ||
162 | opaque_data_1 = le32_to_cpu(p_cqe->opaque_data.data[1]); | ||
163 | |||
164 | DP_VERBOSE(p_hwfn, | ||
165 | (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), | ||
166 | "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", | ||
167 | (u64)p_pkt->rx_buf_addr, pad, packet_length, | ||
168 | le16_to_cpu(p_cqe->parse_flags.flags), vlan, | ||
169 | opaque_data_0, opaque_data_1); | ||
170 | |||
171 | if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { | ||
172 | print_hex_dump(KERN_INFO, "", | ||
173 | DUMP_PREFIX_OFFSET, 16, 1, | ||
174 | buffer->data, packet_length, false); | ||
175 | } | ||
176 | |||
177 | /* Determine if data is valid */ | ||
178 | if (packet_length < ETH_HLEN) | ||
179 | reuse = true; | ||
180 | |||
181 | /* Allocate a replacement for buffer; Reuse upon failure */ | ||
182 | if (!reuse) | ||
183 | rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data, | ||
184 | &new_phys_addr); | ||
185 | |||
186 | /* If need to reuse or there's no replacement buffer, repost this */ | ||
187 | if (rc) | ||
188 | goto out_post; | ||
189 | |||
190 | skb = build_skb(buffer->data, 0); | ||
191 | if (!skb) { | ||
192 | rc = -ENOMEM; | ||
193 | goto out_post; | ||
194 | } | ||
195 | |||
196 | pad += NET_SKB_PAD; | ||
197 | skb_reserve(skb, pad); | ||
198 | skb_put(skb, packet_length); | ||
199 | skb_checksum_none_assert(skb); | ||
200 | |||
201 | /* Get parital ethernet information instead of eth_type_trans(), | ||
202 | * Since we don't have an associated net_device. | ||
203 | */ | ||
204 | skb_reset_mac_header(skb); | ||
205 | skb->protocol = eth_hdr(skb)->h_proto; | ||
206 | |||
207 | /* Pass SKB onward */ | ||
208 | if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { | ||
209 | if (vlan) | ||
210 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); | ||
211 | cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, | ||
212 | opaque_data_0, opaque_data_1); | ||
213 | } | ||
214 | |||
215 | /* Update Buffer information and update FW producer */ | ||
216 | buffer->data = new_data; | ||
217 | buffer->phys_addr = new_phys_addr; | ||
218 | |||
219 | out_post: | ||
220 | rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), cdev->ll2->handle, | ||
221 | buffer->phys_addr, 0, buffer, 1); | ||
222 | |||
223 | if (rc) | ||
224 | qed_ll2_dealloc_buffer(cdev, buffer); | ||
225 | } | ||
226 | |||
227 | static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, | ||
228 | u8 connection_handle, | ||
229 | bool b_lock, | ||
230 | bool b_only_active) | ||
231 | { | ||
232 | struct qed_ll2_info *p_ll2_conn, *p_ret = NULL; | ||
233 | |||
234 | if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) | ||
235 | return NULL; | ||
236 | |||
237 | if (!p_hwfn->p_ll2_info) | ||
238 | return NULL; | ||
239 | |||
240 | p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; | ||
241 | |||
242 | if (b_only_active) { | ||
243 | if (b_lock) | ||
244 | mutex_lock(&p_ll2_conn->mutex); | ||
245 | if (p_ll2_conn->b_active) | ||
246 | p_ret = p_ll2_conn; | ||
247 | if (b_lock) | ||
248 | mutex_unlock(&p_ll2_conn->mutex); | ||
249 | } else { | ||
250 | p_ret = p_ll2_conn; | ||
251 | } | ||
252 | |||
253 | return p_ret; | ||
254 | } | ||
255 | |||
256 | static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, | ||
257 | u8 connection_handle) | ||
258 | { | ||
259 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true); | ||
260 | } | ||
261 | |||
262 | static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn, | ||
263 | u8 connection_handle) | ||
264 | { | ||
265 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true); | ||
266 | } | ||
267 | |||
268 | static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn | ||
269 | *p_hwfn, | ||
270 | u8 connection_handle) | ||
271 | { | ||
272 | return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false); | ||
273 | } | ||
274 | |||
275 | static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | ||
276 | { | ||
277 | bool b_last_packet = false, b_last_frag = false; | ||
278 | struct qed_ll2_tx_packet *p_pkt = NULL; | ||
279 | struct qed_ll2_info *p_ll2_conn; | ||
280 | struct qed_ll2_tx_queue *p_tx; | ||
281 | |||
282 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); | ||
283 | if (!p_ll2_conn) | ||
284 | return; | ||
285 | |||
286 | p_tx = &p_ll2_conn->tx_queue; | ||
287 | |||
288 | while (!list_empty(&p_tx->active_descq)) { | ||
289 | p_pkt = list_first_entry(&p_tx->active_descq, | ||
290 | struct qed_ll2_tx_packet, list_entry); | ||
291 | if (!p_pkt) | ||
292 | break; | ||
293 | |||
294 | list_del(&p_pkt->list_entry); | ||
295 | b_last_packet = list_empty(&p_tx->active_descq); | ||
296 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); | ||
297 | p_tx->cur_completing_packet = *p_pkt; | ||
298 | p_tx->cur_completing_bd_idx = 1; | ||
299 | b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; | ||
300 | |||
301 | qed_ll2b_complete_tx_packet(p_hwfn, p_ll2_conn->my_id, | ||
302 | p_pkt->cookie, | ||
303 | p_pkt->bds_set[0].tx_frag, | ||
304 | b_last_frag, b_last_packet); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) | ||
309 | { | ||
310 | struct qed_ll2_info *p_ll2_conn = p_cookie; | ||
311 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; | ||
312 | u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; | ||
313 | struct qed_ll2_tx_packet *p_pkt; | ||
314 | bool b_last_frag = false; | ||
315 | unsigned long flags; | ||
316 | int rc = -EINVAL; | ||
317 | |||
318 | spin_lock_irqsave(&p_tx->lock, flags); | ||
319 | if (p_tx->b_completing_packet) { | ||
320 | rc = -EBUSY; | ||
321 | goto out; | ||
322 | } | ||
323 | |||
324 | new_idx = le16_to_cpu(*p_tx->p_fw_cons); | ||
325 | num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); | ||
326 | while (num_bds) { | ||
327 | if (list_empty(&p_tx->active_descq)) | ||
328 | goto out; | ||
329 | |||
330 | p_pkt = list_first_entry(&p_tx->active_descq, | ||
331 | struct qed_ll2_tx_packet, list_entry); | ||
332 | if (!p_pkt) | ||
333 | goto out; | ||
334 | |||
335 | p_tx->b_completing_packet = true; | ||
336 | p_tx->cur_completing_packet = *p_pkt; | ||
337 | num_bds_in_packet = p_pkt->bd_used; | ||
338 | list_del(&p_pkt->list_entry); | ||
339 | |||
340 | if (num_bds < num_bds_in_packet) { | ||
341 | DP_NOTICE(p_hwfn, | ||
342 | "Rest of BDs does not cover whole packet\n"); | ||
343 | goto out; | ||
344 | } | ||
345 | |||
346 | num_bds -= num_bds_in_packet; | ||
347 | p_tx->bds_idx += num_bds_in_packet; | ||
348 | while (num_bds_in_packet--) | ||
349 | qed_chain_consume(&p_tx->txq_chain); | ||
350 | |||
351 | p_tx->cur_completing_bd_idx = 1; | ||
352 | b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; | ||
353 | list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); | ||
354 | |||
355 | spin_unlock_irqrestore(&p_tx->lock, flags); | ||
356 | qed_ll2b_complete_tx_packet(p_hwfn, | ||
357 | p_ll2_conn->my_id, | ||
358 | p_pkt->cookie, | ||
359 | p_pkt->bds_set[0].tx_frag, | ||
360 | b_last_frag, !num_bds); | ||
361 | spin_lock_irqsave(&p_tx->lock, flags); | ||
362 | } | ||
363 | |||
364 | p_tx->b_completing_packet = false; | ||
365 | rc = 0; | ||
366 | out: | ||
367 | spin_unlock_irqrestore(&p_tx->lock, flags); | ||
368 | return rc; | ||
369 | } | ||
370 | |||
371 | static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn, | ||
372 | struct qed_ll2_info *p_ll2_conn, | ||
373 | union core_rx_cqe_union *p_cqe, | ||
374 | unsigned long lock_flags, | ||
375 | bool b_last_cqe) | ||
376 | { | ||
377 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; | ||
378 | struct qed_ll2_rx_packet *p_pkt = NULL; | ||
379 | |||
380 | if (!list_empty(&p_rx->active_descq)) | ||
381 | p_pkt = list_first_entry(&p_rx->active_descq, | ||
382 | struct qed_ll2_rx_packet, list_entry); | ||
383 | if (!p_pkt) { | ||
384 | DP_NOTICE(p_hwfn, | ||
385 | "LL2 Rx completion but active_descq is empty\n"); | ||
386 | return -EIO; | ||
387 | } | ||
388 | list_del(&p_pkt->list_entry); | ||
389 | |||
390 | if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) | ||
391 | DP_NOTICE(p_hwfn, | ||
392 | "Mismatch between active_descq and the LL2 Rx chain\n"); | ||
393 | list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); | ||
394 | |||
395 | spin_unlock_irqrestore(&p_rx->lock, lock_flags); | ||
396 | qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id, | ||
397 | p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe); | ||
398 | spin_lock_irqsave(&p_rx->lock, lock_flags); | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) | ||
404 | { | ||
405 | struct qed_ll2_info *p_ll2_conn = cookie; | ||
406 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; | ||
407 | union core_rx_cqe_union *cqe = NULL; | ||
408 | u16 cq_new_idx = 0, cq_old_idx = 0; | ||
409 | unsigned long flags = 0; | ||
410 | int rc = 0; | ||
411 | |||
412 | spin_lock_irqsave(&p_rx->lock, flags); | ||
413 | cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); | ||
414 | cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); | ||
415 | |||
416 | while (cq_new_idx != cq_old_idx) { | ||
417 | bool b_last_cqe = (cq_new_idx == cq_old_idx); | ||
418 | |||
419 | cqe = qed_chain_consume(&p_rx->rcq_chain); | ||
420 | cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); | ||
421 | |||
422 | DP_VERBOSE(p_hwfn, | ||
423 | QED_MSG_LL2, | ||
424 | "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n", | ||
425 | cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); | ||
426 | |||
427 | switch (cqe->rx_cqe_sp.type) { | ||
428 | case CORE_RX_CQE_TYPE_SLOW_PATH: | ||
429 | DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); | ||
430 | rc = -EINVAL; | ||
431 | break; | ||
432 | case CORE_RX_CQE_TYPE_REGULAR: | ||
433 | rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn, | ||
434 | cqe, flags, b_last_cqe); | ||
435 | break; | ||
436 | default: | ||
437 | rc = -EIO; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | spin_unlock_irqrestore(&p_rx->lock, flags); | ||
442 | return rc; | ||
443 | } | ||
444 | |||
445 | void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) | ||
446 | { | ||
447 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
448 | struct qed_ll2_rx_packet *p_pkt = NULL; | ||
449 | struct qed_ll2_rx_queue *p_rx; | ||
450 | |||
451 | p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); | ||
452 | if (!p_ll2_conn) | ||
453 | return; | ||
454 | |||
455 | p_rx = &p_ll2_conn->rx_queue; | ||
456 | |||
457 | while (!list_empty(&p_rx->active_descq)) { | ||
458 | dma_addr_t rx_buf_addr; | ||
459 | void *cookie; | ||
460 | bool b_last; | ||
461 | |||
462 | p_pkt = list_first_entry(&p_rx->active_descq, | ||
463 | struct qed_ll2_rx_packet, list_entry); | ||
464 | if (!p_pkt) | ||
465 | break; | ||
466 | |||
467 | list_del(&p_pkt->list_entry); | ||
468 | list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); | ||
469 | |||
470 | rx_buf_addr = p_pkt->rx_buf_addr; | ||
471 | cookie = p_pkt->cookie; | ||
472 | |||
473 | b_last = list_empty(&p_rx->active_descq); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, | ||
478 | struct qed_ll2_info *p_ll2_conn, | ||
479 | u8 action_on_error) | ||
480 | { | ||
481 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; | ||
482 | struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; | ||
483 | struct core_rx_start_ramrod_data *p_ramrod = NULL; | ||
484 | struct qed_spq_entry *p_ent = NULL; | ||
485 | struct qed_sp_init_data init_data; | ||
486 | u16 cqe_pbl_size; | ||
487 | int rc = 0; | ||
488 | |||
489 | /* Get SPQ entry */ | ||
490 | memset(&init_data, 0, sizeof(init_data)); | ||
491 | init_data.cid = p_ll2_conn->cid; | ||
492 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
493 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
494 | |||
495 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
496 | CORE_RAMROD_RX_QUEUE_START, | ||
497 | PROTOCOLID_CORE, &init_data); | ||
498 | if (rc) | ||
499 | return rc; | ||
500 | |||
501 | p_ramrod = &p_ent->ramrod.core_rx_queue_start; | ||
502 | |||
503 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); | ||
504 | p_ramrod->sb_index = p_rx->rx_sb_index; | ||
505 | p_ramrod->complete_event_flg = 1; | ||
506 | |||
507 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); | ||
508 | DMA_REGPAIR_LE(p_ramrod->bd_base, | ||
509 | p_rx->rxq_chain.p_phys_addr); | ||
510 | cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); | ||
511 | p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); | ||
512 | DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, | ||
513 | qed_chain_get_pbl_phys(&p_rx->rcq_chain)); | ||
514 | |||
515 | p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg; | ||
516 | p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en; | ||
517 | p_ramrod->queue_id = p_ll2_conn->queue_id; | ||
518 | p_ramrod->main_func_queue = 1; | ||
519 | |||
520 | if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && | ||
521 | p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE)) { | ||
522 | p_ramrod->mf_si_bcast_accept_all = 1; | ||
523 | p_ramrod->mf_si_mcast_accept_all = 1; | ||
524 | } else { | ||
525 | p_ramrod->mf_si_bcast_accept_all = 0; | ||
526 | p_ramrod->mf_si_mcast_accept_all = 0; | ||
527 | } | ||
528 | |||
529 | p_ramrod->action_on_error.error_type = action_on_error; | ||
530 | return qed_spq_post(p_hwfn, p_ent, NULL); | ||
531 | } | ||
532 | |||
533 | static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, | ||
534 | struct qed_ll2_info *p_ll2_conn) | ||
535 | { | ||
536 | enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type; | ||
537 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; | ||
538 | struct core_tx_start_ramrod_data *p_ramrod = NULL; | ||
539 | struct qed_spq_entry *p_ent = NULL; | ||
540 | struct qed_sp_init_data init_data; | ||
541 | union qed_qm_pq_params pq_params; | ||
542 | u16 pq_id = 0, pbl_size; | ||
543 | int rc = -EINVAL; | ||
544 | |||
545 | if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) | ||
546 | return 0; | ||
547 | |||
548 | /* Get SPQ entry */ | ||
549 | memset(&init_data, 0, sizeof(init_data)); | ||
550 | init_data.cid = p_ll2_conn->cid; | ||
551 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
552 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
553 | |||
554 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
555 | CORE_RAMROD_TX_QUEUE_START, | ||
556 | PROTOCOLID_CORE, &init_data); | ||
557 | if (rc) | ||
558 | return rc; | ||
559 | |||
560 | p_ramrod = &p_ent->ramrod.core_tx_queue_start; | ||
561 | |||
562 | p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); | ||
563 | p_ramrod->sb_index = p_tx->tx_sb_index; | ||
564 | p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu); | ||
565 | p_ll2_conn->tx_stats_en = 1; | ||
566 | p_ramrod->stats_en = p_ll2_conn->tx_stats_en; | ||
567 | p_ramrod->stats_id = p_ll2_conn->tx_stats_id; | ||
568 | |||
569 | DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, | ||
570 | qed_chain_get_pbl_phys(&p_tx->txq_chain)); | ||
571 | pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); | ||
572 | p_ramrod->pbl_size = cpu_to_le16(pbl_size); | ||
573 | |||
574 | memset(&pq_params, 0, sizeof(pq_params)); | ||
575 | pq_params.core.tc = p_ll2_conn->tx_tc; | ||
576 | pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); | ||
577 | p_ramrod->qm_pq_id = cpu_to_le16(pq_id); | ||
578 | |||
579 | switch (conn_type) { | ||
580 | case QED_LL2_TYPE_ISCSI: | ||
581 | case QED_LL2_TYPE_ISCSI_OOO: | ||
582 | p_ramrod->conn_type = PROTOCOLID_ISCSI; | ||
583 | break; | ||
584 | case QED_LL2_TYPE_ROCE: | ||
585 | p_ramrod->conn_type = PROTOCOLID_ROCE; | ||
586 | break; | ||
587 | default: | ||
588 | p_ramrod->conn_type = PROTOCOLID_ETH; | ||
589 | DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); | ||
590 | } | ||
591 | |||
592 | return qed_spq_post(p_hwfn, p_ent, NULL); | ||
593 | } | ||
594 | |||
595 | static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, | ||
596 | struct qed_ll2_info *p_ll2_conn) | ||
597 | { | ||
598 | struct core_rx_stop_ramrod_data *p_ramrod = NULL; | ||
599 | struct qed_spq_entry *p_ent = NULL; | ||
600 | struct qed_sp_init_data init_data; | ||
601 | int rc = -EINVAL; | ||
602 | |||
603 | /* Get SPQ entry */ | ||
604 | memset(&init_data, 0, sizeof(init_data)); | ||
605 | init_data.cid = p_ll2_conn->cid; | ||
606 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
607 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
608 | |||
609 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
610 | CORE_RAMROD_RX_QUEUE_STOP, | ||
611 | PROTOCOLID_CORE, &init_data); | ||
612 | if (rc) | ||
613 | return rc; | ||
614 | |||
615 | p_ramrod = &p_ent->ramrod.core_rx_queue_stop; | ||
616 | |||
617 | p_ramrod->complete_event_flg = 1; | ||
618 | p_ramrod->queue_id = p_ll2_conn->queue_id; | ||
619 | |||
620 | return qed_spq_post(p_hwfn, p_ent, NULL); | ||
621 | } | ||
622 | |||
623 | static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, | ||
624 | struct qed_ll2_info *p_ll2_conn) | ||
625 | { | ||
626 | struct qed_spq_entry *p_ent = NULL; | ||
627 | struct qed_sp_init_data init_data; | ||
628 | int rc = -EINVAL; | ||
629 | |||
630 | /* Get SPQ entry */ | ||
631 | memset(&init_data, 0, sizeof(init_data)); | ||
632 | init_data.cid = p_ll2_conn->cid; | ||
633 | init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; | ||
634 | init_data.comp_mode = QED_SPQ_MODE_EBLOCK; | ||
635 | |||
636 | rc = qed_sp_init_request(p_hwfn, &p_ent, | ||
637 | CORE_RAMROD_TX_QUEUE_STOP, | ||
638 | PROTOCOLID_CORE, &init_data); | ||
639 | if (rc) | ||
640 | return rc; | ||
641 | |||
642 | return qed_spq_post(p_hwfn, p_ent, NULL); | ||
643 | } | ||
644 | |||
645 | static int | ||
646 | qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, | ||
647 | struct qed_ll2_info *p_ll2_info, u16 rx_num_desc) | ||
648 | { | ||
649 | struct qed_ll2_rx_packet *p_descq; | ||
650 | u32 capacity; | ||
651 | int rc = 0; | ||
652 | |||
653 | if (!rx_num_desc) | ||
654 | goto out; | ||
655 | |||
656 | rc = qed_chain_alloc(p_hwfn->cdev, | ||
657 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | ||
658 | QED_CHAIN_MODE_NEXT_PTR, | ||
659 | QED_CHAIN_CNT_TYPE_U16, | ||
660 | rx_num_desc, | ||
661 | sizeof(struct core_rx_bd), | ||
662 | &p_ll2_info->rx_queue.rxq_chain); | ||
663 | if (rc) { | ||
664 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); | ||
665 | goto out; | ||
666 | } | ||
667 | |||
668 | capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); | ||
669 | p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet), | ||
670 | GFP_KERNEL); | ||
671 | if (!p_descq) { | ||
672 | rc = -ENOMEM; | ||
673 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n"); | ||
674 | goto out; | ||
675 | } | ||
676 | p_ll2_info->rx_queue.descq_array = p_descq; | ||
677 | |||
678 | rc = qed_chain_alloc(p_hwfn->cdev, | ||
679 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | ||
680 | QED_CHAIN_MODE_PBL, | ||
681 | QED_CHAIN_CNT_TYPE_U16, | ||
682 | rx_num_desc, | ||
683 | sizeof(struct core_rx_fast_path_cqe), | ||
684 | &p_ll2_info->rx_queue.rcq_chain); | ||
685 | if (rc) { | ||
686 | DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); | ||
687 | goto out; | ||
688 | } | ||
689 | |||
690 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, | ||
691 | "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", | ||
692 | p_ll2_info->conn_type, rx_num_desc); | ||
693 | |||
694 | out: | ||
695 | return rc; | ||
696 | } | ||
697 | |||
698 | static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, | ||
699 | struct qed_ll2_info *p_ll2_info, | ||
700 | u16 tx_num_desc) | ||
701 | { | ||
702 | struct qed_ll2_tx_packet *p_descq; | ||
703 | u32 capacity; | ||
704 | int rc = 0; | ||
705 | |||
706 | if (!tx_num_desc) | ||
707 | goto out; | ||
708 | |||
709 | rc = qed_chain_alloc(p_hwfn->cdev, | ||
710 | QED_CHAIN_USE_TO_CONSUME_PRODUCE, | ||
711 | QED_CHAIN_MODE_PBL, | ||
712 | QED_CHAIN_CNT_TYPE_U16, | ||
713 | tx_num_desc, | ||
714 | sizeof(struct core_tx_bd), | ||
715 | &p_ll2_info->tx_queue.txq_chain); | ||
716 | if (rc) | ||
717 | goto out; | ||
718 | |||
719 | capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); | ||
720 | p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet), | ||
721 | GFP_KERNEL); | ||
722 | if (!p_descq) { | ||
723 | rc = -ENOMEM; | ||
724 | goto out; | ||
725 | } | ||
726 | p_ll2_info->tx_queue.descq_array = p_descq; | ||
727 | |||
728 | DP_VERBOSE(p_hwfn, QED_MSG_LL2, | ||
729 | "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", | ||
730 | p_ll2_info->conn_type, tx_num_desc); | ||
731 | |||
732 | out: | ||
733 | if (rc) | ||
734 | DP_NOTICE(p_hwfn, | ||
735 | "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", | ||
736 | tx_num_desc); | ||
737 | return rc; | ||
738 | } | ||
739 | |||
740 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, | ||
741 | struct qed_ll2_info *p_params, | ||
742 | u16 rx_num_desc, | ||
743 | u16 tx_num_desc, | ||
744 | u8 *p_connection_handle) | ||
745 | { | ||
746 | qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; | ||
747 | struct qed_ll2_info *p_ll2_info = NULL; | ||
748 | int rc; | ||
749 | u8 i; | ||
750 | |||
751 | if (!p_connection_handle || !p_hwfn->p_ll2_info) | ||
752 | return -EINVAL; | ||
753 | |||
754 | /* Find a free connection to be used */ | ||
755 | for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) { | ||
756 | mutex_lock(&p_hwfn->p_ll2_info[i].mutex); | ||
757 | if (p_hwfn->p_ll2_info[i].b_active) { | ||
758 | mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); | ||
759 | continue; | ||
760 | } | ||
761 | |||
762 | p_hwfn->p_ll2_info[i].b_active = true; | ||
763 | p_ll2_info = &p_hwfn->p_ll2_info[i]; | ||
764 | mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); | ||
765 | break; | ||
766 | } | ||
767 | if (!p_ll2_info) | ||
768 | return -EBUSY; | ||
769 | |||
770 | p_ll2_info->conn_type = p_params->conn_type; | ||
771 | p_ll2_info->mtu = p_params->mtu; | ||
772 | p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg; | ||
773 | p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en; | ||
774 | p_ll2_info->tx_tc = p_params->tx_tc; | ||
775 | p_ll2_info->tx_dest = p_params->tx_dest; | ||
776 | p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big; | ||
777 | p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf; | ||
778 | |||
779 | rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc); | ||
780 | if (rc) | ||
781 | goto q_allocate_fail; | ||
782 | |||
783 | rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info, tx_num_desc); | ||
784 | if (rc) | ||
785 | goto q_allocate_fail; | ||
786 | |||
787 | /* Register callbacks for the Rx/Tx queues */ | ||
788 | comp_rx_cb = qed_ll2_rxq_completion; | ||
789 | comp_tx_cb = qed_ll2_txq_completion; | ||
790 | |||
791 | if (rx_num_desc) { | ||
792 | qed_int_register_cb(p_hwfn, comp_rx_cb, | ||
793 | &p_hwfn->p_ll2_info[i], | ||
794 | &p_ll2_info->rx_queue.rx_sb_index, | ||
795 | &p_ll2_info->rx_queue.p_fw_cons); | ||
796 | p_ll2_info->rx_queue.b_cb_registred = true; | ||
797 | } | ||
798 | |||
799 | if (tx_num_desc) { | ||
800 | qed_int_register_cb(p_hwfn, | ||
801 | comp_tx_cb, | ||
802 | &p_hwfn->p_ll2_info[i], | ||
803 | &p_ll2_info->tx_queue.tx_sb_index, | ||
804 | &p_ll2_info->tx_queue.p_fw_cons); | ||
805 | p_ll2_info->tx_queue.b_cb_registred = true; | ||
806 | } | ||
807 | |||
808 | *p_connection_handle = i; | ||
809 | return rc; | ||
810 | |||
811 | q_allocate_fail: | ||
812 | qed_ll2_release_connection(p_hwfn, i); | ||
813 | return -ENOMEM; | ||
814 | } | ||
815 | |||
816 | static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, | ||
817 | struct qed_ll2_info *p_ll2_conn) | ||
818 | { | ||
819 | u8 action_on_error = 0; | ||
820 | |||
821 | if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) | ||
822 | return 0; | ||
823 | |||
824 | DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); | ||
825 | |||
826 | SET_FIELD(action_on_error, | ||
827 | CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, | ||
828 | p_ll2_conn->ai_err_packet_too_big); | ||
829 | SET_FIELD(action_on_error, | ||
830 | CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf); | ||
831 | |||
832 | return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); | ||
833 | } | ||
834 | |||
835 | int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) | ||
836 | { | ||
837 | struct qed_ll2_info *p_ll2_conn; | ||
838 | struct qed_ll2_rx_queue *p_rx; | ||
839 | struct qed_ll2_tx_queue *p_tx; | ||
840 | int rc = -EINVAL; | ||
841 | u32 i, capacity; | ||
842 | u8 qid; | ||
843 | |||
844 | p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); | ||
845 | if (!p_ll2_conn) | ||
846 | return -EINVAL; | ||
847 | p_rx = &p_ll2_conn->rx_queue; | ||
848 | p_tx = &p_ll2_conn->tx_queue; | ||
849 | |||
850 | qed_chain_reset(&p_rx->rxq_chain); | ||
851 | qed_chain_reset(&p_rx->rcq_chain); | ||
852 | INIT_LIST_HEAD(&p_rx->active_descq); | ||
853 | INIT_LIST_HEAD(&p_rx->free_descq); | ||
854 | INIT_LIST_HEAD(&p_rx->posting_descq); | ||
855 | spin_lock_init(&p_rx->lock); | ||
856 | capacity = qed_chain_get_capacity(&p_rx->rxq_chain); | ||
857 | for (i = 0; i < capacity; i++) | ||
858 | list_add_tail(&p_rx->descq_array[i].list_entry, | ||
859 | &p_rx->free_descq); | ||
860 | *p_rx->p_fw_cons = 0; | ||
861 | |||
862 | qed_chain_reset(&p_tx->txq_chain); | ||
863 | INIT_LIST_HEAD(&p_tx->active_descq); | ||
864 | INIT_LIST_HEAD(&p_tx->free_descq); | ||
865 | INIT_LIST_HEAD(&p_tx->sending_descq); | ||
866 | spin_lock_init(&p_tx->lock); | ||
867 | capacity = qed_chain_get_capacity(&p_tx->txq_chain); | ||
868 | for (i = 0; i < capacity; i++) | ||
869 | list_add_tail(&p_tx->descq_array[i].list_entry, | ||
870 | &p_tx->free_descq); | ||
871 | p_tx->cur_completing_bd_idx = 0; | ||
872 | p_tx->bds_idx = 0; | ||
873 | p_tx->b_completing_packet = false; | ||
874 | p_tx->cur_send_packet = NULL; | ||
875 | p_tx->cur_send_frag_num = 0; | ||
876 | p_tx->cur_completing_frag_num = 0; | ||
877 | *p_tx->p_fw_cons = 0; | ||
878 | |||
879 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); | ||
880 | |||
881 | qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle; | ||
882 | p_ll2_conn->queue_id = qid; | ||
883 | p_ll2_conn->tx_stats_id = qid; | ||
884 | p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview + | ||
885 | GTT_BAR0_MAP_REG_TSDM_RAM + | ||
886 | TSTORM_LL2_RX_PRODS_OFFSET(qid); | ||
887 | p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + | ||
888 | qed_db_addr(p_ll2_conn->cid, | ||
889 | DQ_DEMS_LEGACY); | ||
890 | |||
891 | rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); | ||
892 | if (rc) | ||
893 | return rc; | ||
894 | |||
895 | rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); | ||
896 | if (rc) | ||
897 | return rc; | ||
898 | |||
899 | if (p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE) | ||
900 | qed_wr(p_hwfn, p_hwfn->p_main_ptt, PRS_REG_USE_LIGHT_L2, 1); | ||
901 | |||
902 | return rc; | ||
903 | } | ||
904 | |||
905 | static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, | ||
906 | struct qed_ll2_rx_queue *p_rx, | ||
907 | struct qed_ll2_rx_packet *p_curp) | ||
908 | { | ||
909 | struct qed_ll2_rx_packet *p_posting_packet = NULL; | ||
910 | struct core_ll2_rx_prod rx_prod = { 0, 0, 0 }; | ||
911 | bool b_notify_fw = false; | ||
912 | u16 bd_prod, cq_prod; | ||
913 | |||
914 | /* This handles the flushing of already posted buffers */ | ||
915 | while (!list_empty(&p_rx->posting_descq)) { | ||
916 | p_posting_packet = list_first_entry(&p_rx->posting_descq, | ||
917 | struct qed_ll2_rx_packet, | ||
918 | list_entry); | ||
919 | list_del(&p_posting_packet->list_entry); | ||
920 | list_add_tail(&p_posting_packet->list_entry, | ||
921 | &p_rx->active_descq); | ||
922 | b_notify_fw = true; | ||
923 | } | ||
924 | |||
925 | /* This handles the supplied packet [if there is one] */ | ||
926 | if (p_curp) { | ||
927 | list_add_tail(&p_curp->list_entry, &p_rx->active_descq); | ||
928 | b_notify_fw = true; | ||
929 | } | ||
930 | |||
931 | if (!b_notify_fw) | ||
932 | return; | ||
933 | |||
934 | bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain); | ||
935 | cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); | ||
936 | rx_prod.bd_prod = cpu_to_le16(bd_prod); | ||
937 | rx_prod.cqe_prod = cpu_to_le16(cq_prod); | ||
938 | DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); | ||
939 | } | ||
940 | |||
941 | int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, | ||
942 | u8 connection_handle, | ||
943 | dma_addr_t addr, | ||
944 | u16 buf_len, void *cookie, u8 notify_fw) | ||
945 | { | ||
946 | struct core_rx_bd_with_buff_len *p_curb = NULL; | ||
947 | struct qed_ll2_rx_packet *p_curp = NULL; | ||
948 | struct qed_ll2_info *p_ll2_conn; | ||
949 | struct qed_ll2_rx_queue *p_rx; | ||
950 | unsigned long flags; | ||
951 | void *p_data; | ||
952 | int rc = 0; | ||
953 | |||
954 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); | ||
955 | if (!p_ll2_conn) | ||
956 | return -EINVAL; | ||
957 | p_rx = &p_ll2_conn->rx_queue; | ||
958 | |||
959 | spin_lock_irqsave(&p_rx->lock, flags); | ||
960 | if (!list_empty(&p_rx->free_descq)) | ||
961 | p_curp = list_first_entry(&p_rx->free_descq, | ||
962 | struct qed_ll2_rx_packet, list_entry); | ||
963 | if (p_curp) { | ||
964 | if (qed_chain_get_elem_left(&p_rx->rxq_chain) && | ||
965 | qed_chain_get_elem_left(&p_rx->rcq_chain)) { | ||
966 | p_data = qed_chain_produce(&p_rx->rxq_chain); | ||
967 | p_curb = (struct core_rx_bd_with_buff_len *)p_data; | ||
968 | qed_chain_produce(&p_rx->rcq_chain); | ||
969 | } | ||
970 | } | ||
971 | |||
972 | /* If we're lacking entires, let's try to flush buffers to FW */ | ||
973 | if (!p_curp || !p_curb) { | ||
974 | rc = -EBUSY; | ||
975 | p_curp = NULL; | ||
976 | goto out_notify; | ||
977 | } | ||
978 | |||
979 | /* We have an Rx packet we can fill */ | ||
980 | DMA_REGPAIR_LE(p_curb->addr, addr); | ||
981 | p_curb->buff_length = cpu_to_le16(buf_len); | ||
982 | p_curp->rx_buf_addr = addr; | ||
983 | p_curp->cookie = cookie; | ||
984 | p_curp->rxq_bd = p_curb; | ||
985 | p_curp->buf_length = buf_len; | ||
986 | list_del(&p_curp->list_entry); | ||
987 | |||
988 | /* Check if we only want to enqueue this packet without informing FW */ | ||
989 | if (!notify_fw) { | ||
990 | list_add_tail(&p_curp->list_entry, &p_rx->posting_descq); | ||
991 | goto out; | ||
992 | } | ||
993 | |||
994 | out_notify: | ||
995 | qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); | ||
996 | out: | ||
997 | spin_unlock_irqrestore(&p_rx->lock, flags); | ||
998 | return rc; | ||
999 | } | ||
1000 | |||
1001 | static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, | ||
1002 | struct qed_ll2_tx_queue *p_tx, | ||
1003 | struct qed_ll2_tx_packet *p_curp, | ||
1004 | u8 num_of_bds, | ||
1005 | dma_addr_t first_frag, | ||
1006 | u16 first_frag_len, void *p_cookie, | ||
1007 | u8 notify_fw) | ||
1008 | { | ||
1009 | list_del(&p_curp->list_entry); | ||
1010 | p_curp->cookie = p_cookie; | ||
1011 | p_curp->bd_used = num_of_bds; | ||
1012 | p_curp->notify_fw = notify_fw; | ||
1013 | p_tx->cur_send_packet = p_curp; | ||
1014 | p_tx->cur_send_frag_num = 0; | ||
1015 | |||
1016 | p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = first_frag; | ||
1017 | p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = first_frag_len; | ||
1018 | p_tx->cur_send_frag_num++; | ||
1019 | } | ||
1020 | |||
1021 | static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | ||
1022 | struct qed_ll2_info *p_ll2, | ||
1023 | struct qed_ll2_tx_packet *p_curp, | ||
1024 | u8 num_of_bds, | ||
1025 | enum core_tx_dest tx_dest, | ||
1026 | u16 vlan, | ||
1027 | u8 bd_flags, | ||
1028 | u16 l4_hdr_offset_w, | ||
1029 | dma_addr_t first_frag, | ||
1030 | u16 first_frag_len) | ||
1031 | { | ||
1032 | struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; | ||
1033 | u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); | ||
1034 | struct core_tx_bd *start_bd = NULL; | ||
1035 | u16 frag_idx; | ||
1036 | |||
1037 | start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); | ||
1038 | start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan); | ||
1039 | SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, | ||
1040 | cpu_to_le16(l4_hdr_offset_w)); | ||
1041 | SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); | ||
1042 | start_bd->bd_flags.as_bitfield = bd_flags; | ||
1043 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << | ||
1044 | CORE_TX_BD_FLAGS_START_BD_SHIFT; | ||
1045 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); | ||
1046 | DMA_REGPAIR_LE(start_bd->addr, first_frag); | ||
1047 | start_bd->nbytes = cpu_to_le16(first_frag_len); | ||
1048 | |||
1049 | DP_VERBOSE(p_hwfn, | ||
1050 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), | ||
1051 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", | ||
1052 | p_ll2->queue_id, | ||
1053 | p_ll2->cid, | ||
1054 | p_ll2->conn_type, | ||
1055 | prod_idx, | ||
1056 | first_frag_len, | ||
1057 | num_of_bds, | ||
1058 | le32_to_cpu(start_bd->addr.hi), | ||
1059 | le32_to_cpu(start_bd->addr.lo)); | ||
1060 | |||
1061 | if (p_ll2->tx_queue.cur_send_frag_num == num_of_bds) | ||
1062 | return; | ||
1063 | |||
1064 | /* Need to provide the packet with additional BDs for frags */ | ||
1065 | for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; | ||
1066 | frag_idx < num_of_bds; frag_idx++) { | ||
1067 | struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; | ||
1068 | |||
1069 | *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); | ||
1070 | (*p_bd)->bd_flags.as_bitfield = 0; | ||
1071 | (*p_bd)->bitfield1 = 0; | ||
1072 | (*p_bd)->bitfield0 = 0; | ||
1073 | p_curp->bds_set[frag_idx].tx_frag = 0; | ||
1074 | p_curp->bds_set[frag_idx].frag_len = 0; | ||
1075 | } | ||
1076 | } | ||
1077 | |||
1078 | /* This should be called while the Txq spinlock is being held */ | ||
1079 | static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, | ||
1080 | struct qed_ll2_info *p_ll2_conn) | ||
1081 | { | ||
1082 | bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; | ||
1083 | struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; | ||
1084 | struct qed_ll2_tx_packet *p_pkt = NULL; | ||
1085 | struct core_db_data db_msg = { 0, 0, 0 }; | ||
1086 | u16 bd_prod; | ||
1087 | |||
1088 | /* If there are missing BDs, don't do anything now */ | ||
1089 | if (p_ll2_conn->tx_queue.cur_send_frag_num != | ||
1090 | p_ll2_conn->tx_queue.cur_send_packet->bd_used) | ||
1091 | return; | ||
1092 | |||
1093 | /* Push the current packet to the list and clean after it */ | ||
1094 | list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry, | ||
1095 | &p_ll2_conn->tx_queue.sending_descq); | ||
1096 | p_ll2_conn->tx_queue.cur_send_packet = NULL; | ||
1097 | p_ll2_conn->tx_queue.cur_send_frag_num = 0; | ||
1098 | |||
1099 | /* Notify FW of packet only if requested to */ | ||
1100 | if (!b_notify) | ||
1101 | return; | ||
1102 | |||
1103 | bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain); | ||
1104 | |||
1105 | while (!list_empty(&p_tx->sending_descq)) { | ||
1106 | p_pkt = list_first_entry(&p_tx->sending_descq, | ||
1107 | struct qed_ll2_tx_packet, list_entry); | ||
1108 | if (!p_pkt) | ||
1109 | break; | ||
1110 | |||
1111 | list_del(&p_pkt->list_entry); | ||
1112 | list_add_tail(&p_pkt->list_entry, &p_tx->active_descq); | ||
1113 | } | ||
1114 | |||
1115 | SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); | ||
1116 | SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); | ||
1117 | SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, | ||
1118 | DQ_XCM_CORE_TX_BD_PROD_CMD); | ||
1119 | db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; | ||
1120 | db_msg.spq_prod = cpu_to_le16(bd_prod); | ||
1121 | |||
1122 | /* Make sure the BDs data is updated before ringing the doorbell */ | ||
1123 | wmb(); | ||
1124 | |||
1125 | DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg)); | ||
1126 | |||
1127 | DP_VERBOSE(p_hwfn, | ||
1128 | (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), | ||
1129 | "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", | ||
1130 | p_ll2_conn->queue_id, | ||
1131 | p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod); | ||
1132 | } | ||
1133 | |||
1134 | int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, | ||
1135 | u8 connection_handle, | ||
1136 | u8 num_of_bds, | ||
1137 | u16 vlan, | ||
1138 | u8 bd_flags, | ||
1139 | u16 l4_hdr_offset_w, | ||
1140 | dma_addr_t first_frag, | ||
1141 | u16 first_frag_len, void *cookie, u8 notify_fw) | ||
1142 | { | ||
1143 | struct qed_ll2_tx_packet *p_curp = NULL; | ||
1144 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
1145 | struct qed_ll2_tx_queue *p_tx; | ||
1146 | struct qed_chain *p_tx_chain; | ||
1147 | unsigned long flags; | ||
1148 | int rc = 0; | ||
1149 | |||
1150 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); | ||
1151 | if (!p_ll2_conn) | ||
1152 | return -EINVAL; | ||
1153 | p_tx = &p_ll2_conn->tx_queue; | ||
1154 | p_tx_chain = &p_tx->txq_chain; | ||
1155 | |||
1156 | if (num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) | ||
1157 | return -EIO; | ||
1158 | |||
1159 | spin_lock_irqsave(&p_tx->lock, flags); | ||
1160 | if (p_tx->cur_send_packet) { | ||
1161 | rc = -EEXIST; | ||
1162 | goto out; | ||
1163 | } | ||
1164 | |||
1165 | /* Get entry, but only if we have tx elements for it */ | ||
1166 | if (!list_empty(&p_tx->free_descq)) | ||
1167 | p_curp = list_first_entry(&p_tx->free_descq, | ||
1168 | struct qed_ll2_tx_packet, list_entry); | ||
1169 | if (p_curp && qed_chain_get_elem_left(p_tx_chain) < num_of_bds) | ||
1170 | p_curp = NULL; | ||
1171 | |||
1172 | if (!p_curp) { | ||
1173 | rc = -EBUSY; | ||
1174 | goto out; | ||
1175 | } | ||
1176 | |||
1177 | /* Prepare packet and BD, and perhaps send a doorbell to FW */ | ||
1178 | qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, | ||
1179 | num_of_bds, first_frag, | ||
1180 | first_frag_len, cookie, notify_fw); | ||
1181 | qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, | ||
1182 | num_of_bds, CORE_TX_DEST_NW, | ||
1183 | vlan, bd_flags, l4_hdr_offset_w, | ||
1184 | first_frag, first_frag_len); | ||
1185 | |||
1186 | qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); | ||
1187 | |||
1188 | out: | ||
1189 | spin_unlock_irqrestore(&p_tx->lock, flags); | ||
1190 | return rc; | ||
1191 | } | ||
1192 | |||
1193 | int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, | ||
1194 | u8 connection_handle, | ||
1195 | dma_addr_t addr, u16 nbytes) | ||
1196 | { | ||
1197 | struct qed_ll2_tx_packet *p_cur_send_packet = NULL; | ||
1198 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
1199 | u16 cur_send_frag_num = 0; | ||
1200 | struct core_tx_bd *p_bd; | ||
1201 | unsigned long flags; | ||
1202 | |||
1203 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); | ||
1204 | if (!p_ll2_conn) | ||
1205 | return -EINVAL; | ||
1206 | |||
1207 | if (!p_ll2_conn->tx_queue.cur_send_packet) | ||
1208 | return -EINVAL; | ||
1209 | |||
1210 | p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; | ||
1211 | cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; | ||
1212 | |||
1213 | if (cur_send_frag_num >= p_cur_send_packet->bd_used) | ||
1214 | return -EINVAL; | ||
1215 | |||
1216 | /* Fill the BD information, and possibly notify FW */ | ||
1217 | p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; | ||
1218 | DMA_REGPAIR_LE(p_bd->addr, addr); | ||
1219 | p_bd->nbytes = cpu_to_le16(nbytes); | ||
1220 | p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; | ||
1221 | p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; | ||
1222 | |||
1223 | p_ll2_conn->tx_queue.cur_send_frag_num++; | ||
1224 | |||
1225 | spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags); | ||
1226 | qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); | ||
1227 | spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags); | ||
1228 | |||
1229 | return 0; | ||
1230 | } | ||
1231 | |||
1232 | int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) | ||
1233 | { | ||
1234 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
1235 | int rc = -EINVAL; | ||
1236 | |||
1237 | p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); | ||
1238 | if (!p_ll2_conn) | ||
1239 | return -EINVAL; | ||
1240 | |||
1241 | /* Stop Tx & Rx of connection, if needed */ | ||
1242 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { | ||
1243 | rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); | ||
1244 | if (rc) | ||
1245 | return rc; | ||
1246 | qed_ll2_txq_flush(p_hwfn, connection_handle); | ||
1247 | } | ||
1248 | |||
1249 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { | ||
1250 | rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); | ||
1251 | if (rc) | ||
1252 | return rc; | ||
1253 | qed_ll2_rxq_flush(p_hwfn, connection_handle); | ||
1254 | } | ||
1255 | |||
1256 | return rc; | ||
1257 | } | ||
1258 | |||
1259 | void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle) | ||
1260 | { | ||
1261 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
1262 | |||
1263 | p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); | ||
1264 | if (!p_ll2_conn) | ||
1265 | return; | ||
1266 | |||
1267 | if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { | ||
1268 | p_ll2_conn->rx_queue.b_cb_registred = false; | ||
1269 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); | ||
1270 | } | ||
1271 | |||
1272 | if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { | ||
1273 | p_ll2_conn->tx_queue.b_cb_registred = false; | ||
1274 | qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); | ||
1275 | } | ||
1276 | |||
1277 | kfree(p_ll2_conn->tx_queue.descq_array); | ||
1278 | qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); | ||
1279 | |||
1280 | kfree(p_ll2_conn->rx_queue.descq_array); | ||
1281 | qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain); | ||
1282 | qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain); | ||
1283 | |||
1284 | qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); | ||
1285 | |||
1286 | mutex_lock(&p_ll2_conn->mutex); | ||
1287 | p_ll2_conn->b_active = false; | ||
1288 | mutex_unlock(&p_ll2_conn->mutex); | ||
1289 | } | ||
1290 | |||
1291 | struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn) | ||
1292 | { | ||
1293 | struct qed_ll2_info *p_ll2_connections; | ||
1294 | u8 i; | ||
1295 | |||
1296 | /* Allocate LL2's set struct */ | ||
1297 | p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS, | ||
1298 | sizeof(struct qed_ll2_info), GFP_KERNEL); | ||
1299 | if (!p_ll2_connections) { | ||
1300 | DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); | ||
1301 | return NULL; | ||
1302 | } | ||
1303 | |||
1304 | for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) | ||
1305 | p_ll2_connections[i].my_id = i; | ||
1306 | |||
1307 | return p_ll2_connections; | ||
1308 | } | ||
1309 | |||
1310 | void qed_ll2_setup(struct qed_hwfn *p_hwfn, | ||
1311 | struct qed_ll2_info *p_ll2_connections) | ||
1312 | { | ||
1313 | int i; | ||
1314 | |||
1315 | for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) | ||
1316 | mutex_init(&p_ll2_connections[i].mutex); | ||
1317 | } | ||
1318 | |||
1319 | void qed_ll2_free(struct qed_hwfn *p_hwfn, | ||
1320 | struct qed_ll2_info *p_ll2_connections) | ||
1321 | { | ||
1322 | kfree(p_ll2_connections); | ||
1323 | } | ||
1324 | |||
1325 | static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, | ||
1326 | struct qed_ptt *p_ptt, | ||
1327 | struct qed_ll2_info *p_ll2_conn, | ||
1328 | struct qed_ll2_stats *p_stats) | ||
1329 | { | ||
1330 | struct core_ll2_tstorm_per_queue_stat tstats; | ||
1331 | u8 qid = p_ll2_conn->queue_id; | ||
1332 | u32 tstats_addr; | ||
1333 | |||
1334 | memset(&tstats, 0, sizeof(tstats)); | ||
1335 | tstats_addr = BAR0_MAP_REG_TSDM_RAM + | ||
1336 | CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); | ||
1337 | qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); | ||
1338 | |||
1339 | p_stats->packet_too_big_discard = | ||
1340 | HILO_64_REGPAIR(tstats.packet_too_big_discard); | ||
1341 | p_stats->no_buff_discard = HILO_64_REGPAIR(tstats.no_buff_discard); | ||
1342 | } | ||
1343 | |||
1344 | static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn, | ||
1345 | struct qed_ptt *p_ptt, | ||
1346 | struct qed_ll2_info *p_ll2_conn, | ||
1347 | struct qed_ll2_stats *p_stats) | ||
1348 | { | ||
1349 | struct core_ll2_ustorm_per_queue_stat ustats; | ||
1350 | u8 qid = p_ll2_conn->queue_id; | ||
1351 | u32 ustats_addr; | ||
1352 | |||
1353 | memset(&ustats, 0, sizeof(ustats)); | ||
1354 | ustats_addr = BAR0_MAP_REG_USDM_RAM + | ||
1355 | CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); | ||
1356 | qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); | ||
1357 | |||
1358 | p_stats->rcv_ucast_bytes = HILO_64_REGPAIR(ustats.rcv_ucast_bytes); | ||
1359 | p_stats->rcv_mcast_bytes = HILO_64_REGPAIR(ustats.rcv_mcast_bytes); | ||
1360 | p_stats->rcv_bcast_bytes = HILO_64_REGPAIR(ustats.rcv_bcast_bytes); | ||
1361 | p_stats->rcv_ucast_pkts = HILO_64_REGPAIR(ustats.rcv_ucast_pkts); | ||
1362 | p_stats->rcv_mcast_pkts = HILO_64_REGPAIR(ustats.rcv_mcast_pkts); | ||
1363 | p_stats->rcv_bcast_pkts = HILO_64_REGPAIR(ustats.rcv_bcast_pkts); | ||
1364 | } | ||
1365 | |||
1366 | static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, | ||
1367 | struct qed_ptt *p_ptt, | ||
1368 | struct qed_ll2_info *p_ll2_conn, | ||
1369 | struct qed_ll2_stats *p_stats) | ||
1370 | { | ||
1371 | struct core_ll2_pstorm_per_queue_stat pstats; | ||
1372 | u8 stats_id = p_ll2_conn->tx_stats_id; | ||
1373 | u32 pstats_addr; | ||
1374 | |||
1375 | memset(&pstats, 0, sizeof(pstats)); | ||
1376 | pstats_addr = BAR0_MAP_REG_PSDM_RAM + | ||
1377 | CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); | ||
1378 | qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); | ||
1379 | |||
1380 | p_stats->sent_ucast_bytes = HILO_64_REGPAIR(pstats.sent_ucast_bytes); | ||
1381 | p_stats->sent_mcast_bytes = HILO_64_REGPAIR(pstats.sent_mcast_bytes); | ||
1382 | p_stats->sent_bcast_bytes = HILO_64_REGPAIR(pstats.sent_bcast_bytes); | ||
1383 | p_stats->sent_ucast_pkts = HILO_64_REGPAIR(pstats.sent_ucast_pkts); | ||
1384 | p_stats->sent_mcast_pkts = HILO_64_REGPAIR(pstats.sent_mcast_pkts); | ||
1385 | p_stats->sent_bcast_pkts = HILO_64_REGPAIR(pstats.sent_bcast_pkts); | ||
1386 | } | ||
1387 | |||
1388 | int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, | ||
1389 | u8 connection_handle, struct qed_ll2_stats *p_stats) | ||
1390 | { | ||
1391 | struct qed_ll2_info *p_ll2_conn = NULL; | ||
1392 | struct qed_ptt *p_ptt; | ||
1393 | |||
1394 | memset(p_stats, 0, sizeof(*p_stats)); | ||
1395 | |||
1396 | if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) || | ||
1397 | !p_hwfn->p_ll2_info) | ||
1398 | return -EINVAL; | ||
1399 | |||
1400 | p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; | ||
1401 | |||
1402 | p_ptt = qed_ptt_acquire(p_hwfn); | ||
1403 | if (!p_ptt) { | ||
1404 | DP_ERR(p_hwfn, "Failed to acquire ptt\n"); | ||
1405 | return -EINVAL; | ||
1406 | } | ||
1407 | |||
1408 | _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); | ||
1409 | _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); | ||
1410 | if (p_ll2_conn->tx_stats_en) | ||
1411 | _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); | ||
1412 | |||
1413 | qed_ptt_release(p_hwfn, p_ptt); | ||
1414 | return 0; | ||
1415 | } | ||
1416 | |||
1417 | static void qed_ll2_register_cb_ops(struct qed_dev *cdev, | ||
1418 | const struct qed_ll2_cb_ops *ops, | ||
1419 | void *cookie) | ||
1420 | { | ||
1421 | cdev->ll2->cbs = ops; | ||
1422 | cdev->ll2->cb_cookie = cookie; | ||
1423 | } | ||
1424 | |||
1425 | static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) | ||
1426 | { | ||
1427 | struct qed_ll2_info ll2_info; | ||
1428 | struct qed_ll2_buffer *buffer; | ||
1429 | enum qed_ll2_conn_type conn_type; | ||
1430 | struct qed_ptt *p_ptt; | ||
1431 | int rc, i; | ||
1432 | |||
1433 | /* Initialize LL2 locks & lists */ | ||
1434 | INIT_LIST_HEAD(&cdev->ll2->list); | ||
1435 | spin_lock_init(&cdev->ll2->lock); | ||
1436 | cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN + | ||
1437 | L1_CACHE_BYTES + params->mtu; | ||
1438 | cdev->ll2->frags_mapped = params->frags_mapped; | ||
1439 | |||
1440 | /*Allocate memory for LL2 */ | ||
1441 | DP_INFO(cdev, "Allocating LL2 buffers of size %08x bytes\n", | ||
1442 | cdev->ll2->rx_size); | ||
1443 | for (i = 0; i < QED_LL2_RX_SIZE; i++) { | ||
1444 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); | ||
1445 | if (!buffer) { | ||
1446 | DP_INFO(cdev, "Failed to allocate LL2 buffers\n"); | ||
1447 | goto fail; | ||
1448 | } | ||
1449 | |||
1450 | rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data, | ||
1451 | &buffer->phys_addr); | ||
1452 | if (rc) { | ||
1453 | kfree(buffer); | ||
1454 | goto fail; | ||
1455 | } | ||
1456 | |||
1457 | list_add_tail(&buffer->list, &cdev->ll2->list); | ||
1458 | } | ||
1459 | |||
1460 | switch (QED_LEADING_HWFN(cdev)->hw_info.personality) { | ||
1461 | case QED_PCI_ISCSI: | ||
1462 | conn_type = QED_LL2_TYPE_ISCSI; | ||
1463 | break; | ||
1464 | case QED_PCI_ETH_ROCE: | ||
1465 | conn_type = QED_LL2_TYPE_ROCE; | ||
1466 | break; | ||
1467 | default: | ||
1468 | conn_type = QED_LL2_TYPE_TEST; | ||
1469 | } | ||
1470 | |||
1471 | /* Prepare the temporary ll2 information */ | ||
1472 | memset(&ll2_info, 0, sizeof(ll2_info)); | ||
1473 | ll2_info.conn_type = conn_type; | ||
1474 | ll2_info.mtu = params->mtu; | ||
1475 | ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets; | ||
1476 | ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping; | ||
1477 | ll2_info.tx_tc = 0; | ||
1478 | ll2_info.tx_dest = CORE_TX_DEST_NW; | ||
1479 | |||
1480 | rc = qed_ll2_acquire_connection(QED_LEADING_HWFN(cdev), &ll2_info, | ||
1481 | QED_LL2_RX_SIZE, QED_LL2_TX_SIZE, | ||
1482 | &cdev->ll2->handle); | ||
1483 | if (rc) { | ||
1484 | DP_INFO(cdev, "Failed to acquire LL2 connection\n"); | ||
1485 | goto fail; | ||
1486 | } | ||
1487 | |||
1488 | rc = qed_ll2_establish_connection(QED_LEADING_HWFN(cdev), | ||
1489 | cdev->ll2->handle); | ||
1490 | if (rc) { | ||
1491 | DP_INFO(cdev, "Failed to establish LL2 connection\n"); | ||
1492 | goto release_fail; | ||
1493 | } | ||
1494 | |||
1495 | /* Post all Rx buffers to FW */ | ||
1496 | spin_lock_bh(&cdev->ll2->lock); | ||
1497 | list_for_each_entry(buffer, &cdev->ll2->list, list) { | ||
1498 | rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev), | ||
1499 | cdev->ll2->handle, | ||
1500 | buffer->phys_addr, 0, buffer, 1); | ||
1501 | if (rc) { | ||
1502 | DP_INFO(cdev, | ||
1503 | "Failed to post an Rx buffer; Deleting it\n"); | ||
1504 | dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, | ||
1505 | cdev->ll2->rx_size, DMA_FROM_DEVICE); | ||
1506 | kfree(buffer->data); | ||
1507 | list_del(&buffer->list); | ||
1508 | kfree(buffer); | ||
1509 | } else { | ||
1510 | cdev->ll2->rx_cnt++; | ||
1511 | } | ||
1512 | } | ||
1513 | spin_unlock_bh(&cdev->ll2->lock); | ||
1514 | |||
1515 | if (!cdev->ll2->rx_cnt) { | ||
1516 | DP_INFO(cdev, "Failed passing even a single Rx buffer\n"); | ||
1517 | goto release_terminate; | ||
1518 | } | ||
1519 | |||
1520 | if (!is_valid_ether_addr(params->ll2_mac_address)) { | ||
1521 | DP_INFO(cdev, "Invalid Ethernet address\n"); | ||
1522 | goto release_terminate; | ||
1523 | } | ||
1524 | |||
1525 | p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); | ||
1526 | if (!p_ptt) { | ||
1527 | DP_INFO(cdev, "Failed to acquire PTT\n"); | ||
1528 | goto release_terminate; | ||
1529 | } | ||
1530 | |||
1531 | rc = qed_llh_add_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, | ||
1532 | params->ll2_mac_address); | ||
1533 | qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); | ||
1534 | if (rc) { | ||
1535 | DP_ERR(cdev, "Failed to allocate LLH filter\n"); | ||
1536 | goto release_terminate_all; | ||
1537 | } | ||
1538 | |||
1539 | ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); | ||
1540 | |||
1541 | return 0; | ||
1542 | |||
1543 | release_terminate_all: | ||
1544 | |||
1545 | release_terminate: | ||
1546 | qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); | ||
1547 | release_fail: | ||
1548 | qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); | ||
1549 | fail: | ||
1550 | qed_ll2_kill_buffers(cdev); | ||
1551 | cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; | ||
1552 | return -EINVAL; | ||
1553 | } | ||
1554 | |||
1555 | static int qed_ll2_stop(struct qed_dev *cdev) | ||
1556 | { | ||
1557 | struct qed_ptt *p_ptt; | ||
1558 | int rc; | ||
1559 | |||
1560 | if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) | ||
1561 | return 0; | ||
1562 | |||
1563 | p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); | ||
1564 | if (!p_ptt) { | ||
1565 | DP_INFO(cdev, "Failed to acquire PTT\n"); | ||
1566 | goto fail; | ||
1567 | } | ||
1568 | |||
1569 | qed_llh_remove_mac_filter(QED_LEADING_HWFN(cdev), p_ptt, | ||
1570 | cdev->ll2_mac_address); | ||
1571 | qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); | ||
1572 | eth_zero_addr(cdev->ll2_mac_address); | ||
1573 | |||
1574 | rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), | ||
1575 | cdev->ll2->handle); | ||
1576 | if (rc) | ||
1577 | DP_INFO(cdev, "Failed to terminate LL2 connection\n"); | ||
1578 | |||
1579 | qed_ll2_kill_buffers(cdev); | ||
1580 | |||
1581 | qed_ll2_release_connection(QED_LEADING_HWFN(cdev), cdev->ll2->handle); | ||
1582 | cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; | ||
1583 | |||
1584 | return rc; | ||
1585 | fail: | ||
1586 | return -EINVAL; | ||
1587 | } | ||
1588 | |||
1589 | static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) | ||
1590 | { | ||
1591 | const skb_frag_t *frag; | ||
1592 | int rc = -EINVAL, i; | ||
1593 | dma_addr_t mapping; | ||
1594 | u16 vlan = 0; | ||
1595 | u8 flags = 0; | ||
1596 | |||
1597 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { | ||
1598 | DP_INFO(cdev, "Cannot transmit a checksumed packet\n"); | ||
1599 | return -EINVAL; | ||
1600 | } | ||
1601 | |||
1602 | if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { | ||
1603 | DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", | ||
1604 | 1 + skb_shinfo(skb)->nr_frags); | ||
1605 | return -EINVAL; | ||
1606 | } | ||
1607 | |||
1608 | mapping = dma_map_single(&cdev->pdev->dev, skb->data, | ||
1609 | skb->len, DMA_TO_DEVICE); | ||
1610 | if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { | ||
1611 | DP_NOTICE(cdev, "SKB mapping failed\n"); | ||
1612 | return -EINVAL; | ||
1613 | } | ||
1614 | |||
1615 | /* Request HW to calculate IP csum */ | ||
1616 | if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && | ||
1617 | ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) | ||
1618 | flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT); | ||
1619 | |||
1620 | if (skb_vlan_tag_present(skb)) { | ||
1621 | vlan = skb_vlan_tag_get(skb); | ||
1622 | flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT); | ||
1623 | } | ||
1624 | |||
1625 | rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), | ||
1626 | cdev->ll2->handle, | ||
1627 | 1 + skb_shinfo(skb)->nr_frags, | ||
1628 | vlan, flags, 0, mapping, | ||
1629 | skb->len, skb, 1); | ||
1630 | if (rc) | ||
1631 | goto err; | ||
1632 | |||
1633 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
1634 | frag = &skb_shinfo(skb)->frags[i]; | ||
1635 | if (!cdev->ll2->frags_mapped) { | ||
1636 | mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, | ||
1637 | skb_frag_size(frag), | ||
1638 | DMA_TO_DEVICE); | ||
1639 | |||
1640 | if (unlikely(dma_mapping_error(&cdev->pdev->dev, | ||
1641 | mapping))) { | ||
1642 | DP_NOTICE(cdev, | ||
1643 | "Unable to map frag - dropping packet\n"); | ||
1644 | goto err; | ||
1645 | } | ||
1646 | } else { | ||
1647 | mapping = page_to_phys(skb_frag_page(frag)) | | ||
1648 | frag->page_offset; | ||
1649 | } | ||
1650 | |||
1651 | rc = qed_ll2_set_fragment_of_tx_packet(QED_LEADING_HWFN(cdev), | ||
1652 | cdev->ll2->handle, | ||
1653 | mapping, | ||
1654 | skb_frag_size(frag)); | ||
1655 | |||
1656 | /* if failed not much to do here, partial packet has been posted | ||
1657 | * we can't free memory, will need to wait for completion. | ||
1658 | */ | ||
1659 | if (rc) | ||
1660 | goto err2; | ||
1661 | } | ||
1662 | |||
1663 | return 0; | ||
1664 | |||
1665 | err: | ||
1666 | dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); | ||
1667 | |||
1668 | err2: | ||
1669 | return rc; | ||
1670 | } | ||
1671 | |||
1672 | static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) | ||
1673 | { | ||
1674 | if (!cdev->ll2) | ||
1675 | return -EINVAL; | ||
1676 | |||
1677 | return qed_ll2_get_stats(QED_LEADING_HWFN(cdev), | ||
1678 | cdev->ll2->handle, stats); | ||
1679 | } | ||
1680 | |||
1681 | const struct qed_ll2_ops qed_ll2_ops_pass = { | ||
1682 | .start = &qed_ll2_start, | ||
1683 | .stop = &qed_ll2_stop, | ||
1684 | .start_xmit = &qed_ll2_start_xmit, | ||
1685 | .register_cb_ops = &qed_ll2_register_cb_ops, | ||
1686 | .get_stats = &qed_ll2_stats, | ||
1687 | }; | ||
1688 | |||
1689 | int qed_ll2_alloc_if(struct qed_dev *cdev) | ||
1690 | { | ||
1691 | cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL); | ||
1692 | return cdev->ll2 ? 0 : -ENOMEM; | ||
1693 | } | ||
1694 | |||
1695 | void qed_ll2_dealloc_if(struct qed_dev *cdev) | ||
1696 | { | ||
1697 | kfree(cdev->ll2); | ||
1698 | cdev->ll2 = NULL; | ||
1699 | } | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h new file mode 100644 index 000000000000..a037c4845928 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h | |||
@@ -0,0 +1,289 @@ | |||
1 | /* QLogic qed NIC Driver | ||
2 | * | ||
3 | * Copyright (c) 2015 QLogic Corporation | ||
4 | * | ||
5 | * This software is available under the terms of the GNU General Public License | ||
6 | * (GPL) Version 2, available from the file COPYING in the main directory of | ||
7 | * this source tree. | ||
8 | */ | ||
9 | |||
10 | #ifndef _QED_LL2_H | ||
11 | #define _QED_LL2_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/qed/qed_chain.h> | ||
20 | #include <linux/qed/qed_ll2_if.h> | ||
21 | #include "qed.h" | ||
22 | #include "qed_hsi.h" | ||
23 | #include "qed_sp.h" | ||
24 | |||
25 | #define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) | ||
26 | |||
27 | enum qed_ll2_conn_type { | ||
28 | QED_LL2_TYPE_RESERVED, | ||
29 | QED_LL2_TYPE_ISCSI, | ||
30 | QED_LL2_TYPE_TEST, | ||
31 | QED_LL2_TYPE_ISCSI_OOO, | ||
32 | QED_LL2_TYPE_RESERVED2, | ||
33 | QED_LL2_TYPE_ROCE, | ||
34 | QED_LL2_TYPE_RESERVED3, | ||
35 | MAX_QED_LL2_RX_CONN_TYPE | ||
36 | }; | ||
37 | |||
38 | struct qed_ll2_rx_packet { | ||
39 | struct list_head list_entry; | ||
40 | struct core_rx_bd_with_buff_len *rxq_bd; | ||
41 | dma_addr_t rx_buf_addr; | ||
42 | u16 buf_length; | ||
43 | void *cookie; | ||
44 | u8 placement_offset; | ||
45 | u16 parse_flags; | ||
46 | u16 packet_length; | ||
47 | u16 vlan; | ||
48 | u32 opaque_data[2]; | ||
49 | }; | ||
50 | |||
51 | struct qed_ll2_tx_packet { | ||
52 | struct list_head list_entry; | ||
53 | u16 bd_used; | ||
54 | u16 vlan; | ||
55 | u16 l4_hdr_offset_w; | ||
56 | u8 bd_flags; | ||
57 | bool notify_fw; | ||
58 | void *cookie; | ||
59 | |||
60 | struct { | ||
61 | struct core_tx_bd *txq_bd; | ||
62 | dma_addr_t tx_frag; | ||
63 | u16 frag_len; | ||
64 | } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET]; | ||
65 | }; | ||
66 | |||
67 | struct qed_ll2_rx_queue { | ||
68 | /* Lock protecting the Rx queue manipulation */ | ||
69 | spinlock_t lock; | ||
70 | struct qed_chain rxq_chain; | ||
71 | struct qed_chain rcq_chain; | ||
72 | u8 rx_sb_index; | ||
73 | bool b_cb_registred; | ||
74 | __le16 *p_fw_cons; | ||
75 | struct list_head active_descq; | ||
76 | struct list_head free_descq; | ||
77 | struct list_head posting_descq; | ||
78 | struct qed_ll2_rx_packet *descq_array; | ||
79 | void __iomem *set_prod_addr; | ||
80 | }; | ||
81 | |||
82 | struct qed_ll2_tx_queue { | ||
83 | /* Lock protecting the Tx queue manipulation */ | ||
84 | spinlock_t lock; | ||
85 | struct qed_chain txq_chain; | ||
86 | u8 tx_sb_index; | ||
87 | bool b_cb_registred; | ||
88 | __le16 *p_fw_cons; | ||
89 | struct list_head active_descq; | ||
90 | struct list_head free_descq; | ||
91 | struct list_head sending_descq; | ||
92 | struct qed_ll2_tx_packet *descq_array; | ||
93 | struct qed_ll2_tx_packet *cur_send_packet; | ||
94 | struct qed_ll2_tx_packet cur_completing_packet; | ||
95 | u16 cur_completing_bd_idx; | ||
96 | void __iomem *doorbell_addr; | ||
97 | u16 bds_idx; | ||
98 | u16 cur_send_frag_num; | ||
99 | u16 cur_completing_frag_num; | ||
100 | bool b_completing_packet; | ||
101 | }; | ||
102 | |||
103 | struct qed_ll2_info { | ||
104 | /* Lock protecting the state of LL2 */ | ||
105 | struct mutex mutex; | ||
106 | enum qed_ll2_conn_type conn_type; | ||
107 | u32 cid; | ||
108 | u8 my_id; | ||
109 | u8 queue_id; | ||
110 | u8 tx_stats_id; | ||
111 | bool b_active; | ||
112 | u16 mtu; | ||
113 | u8 rx_drop_ttl0_flg; | ||
114 | u8 rx_vlan_removal_en; | ||
115 | u8 tx_tc; | ||
116 | enum core_tx_dest tx_dest; | ||
117 | enum core_error_handle ai_err_packet_too_big; | ||
118 | enum core_error_handle ai_err_no_buf; | ||
119 | u8 tx_stats_en; | ||
120 | struct qed_ll2_rx_queue rx_queue; | ||
121 | struct qed_ll2_tx_queue tx_queue; | ||
122 | }; | ||
123 | |||
124 | /** | ||
125 | * @brief qed_ll2_acquire_connection - allocate resources, | ||
126 | * starts rx & tx (if relevant) queues pair. Provides | ||
127 | * connecion handler as output parameter. | ||
128 | * | ||
129 | * @param p_hwfn | ||
130 | * @param p_params Contain various configuration properties | ||
131 | * @param rx_num_desc | ||
132 | * @param tx_num_desc | ||
133 | * | ||
134 | * @param p_connection_handle Output container for LL2 connection's handle | ||
135 | * | ||
136 | * @return 0 on success, failure otherwise | ||
137 | */ | ||
138 | int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn, | ||
139 | struct qed_ll2_info *p_params, | ||
140 | u16 rx_num_desc, | ||
141 | u16 tx_num_desc, | ||
142 | u8 *p_connection_handle); | ||
143 | |||
144 | /** | ||
145 | * @brief qed_ll2_establish_connection - start previously | ||
146 | * allocated LL2 queues pair | ||
147 | * | ||
148 | * @param p_hwfn | ||
149 | * @param p_ptt | ||
150 | * @param connection_handle LL2 connection's handle obtained from | ||
151 | * qed_ll2_require_connection | ||
152 | * | ||
153 | * @return 0 on success, failure otherwise | ||
154 | */ | ||
155 | int qed_ll2_establish_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); | ||
156 | |||
157 | /** | ||
158 | * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. | ||
159 | * | ||
160 | * @param p_hwfn | ||
161 | * @param connection_handle LL2 connection's handle obtained from | ||
162 | * qed_ll2_require_connection | ||
163 | * @param addr rx (physical address) buffers to submit | ||
164 | * @param cookie | ||
165 | * @param notify_fw produce corresponding Rx BD immediately | ||
166 | * | ||
167 | * @return 0 on success, failure otherwise | ||
168 | */ | ||
169 | int qed_ll2_post_rx_buffer(struct qed_hwfn *p_hwfn, | ||
170 | u8 connection_handle, | ||
171 | dma_addr_t addr, | ||
172 | u16 buf_len, void *cookie, u8 notify_fw); | ||
173 | |||
174 | /** | ||
175 | * @brief qed_ll2_prepare_tx_packet - request for start Tx BD | ||
176 | * to prepare Tx packet submission to FW. | ||
177 | * | ||
178 | * @param p_hwfn | ||
179 | * @param connection_handle LL2 connection's handle obtained from | ||
180 | * qed_ll2_require_connection | ||
181 | * @param num_of_bds a number of requested BD equals a number of | ||
182 | * fragments in Tx packet | ||
183 | * @param vlan VLAN to insert to packet (if insertion set) | ||
184 | * @param bd_flags | ||
185 | * @param l4_hdr_offset_w L4 Header Offset from start of packet | ||
186 | * (in words). This is needed if both l4_csum | ||
187 | * and ipv6_ext are set | ||
188 | * @param first_frag | ||
189 | * @param first_frag_len | ||
190 | * @param cookie | ||
191 | * | ||
192 | * @param notify_fw | ||
193 | * | ||
194 | * @return 0 on success, failure otherwise | ||
195 | */ | ||
196 | int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn, | ||
197 | u8 connection_handle, | ||
198 | u8 num_of_bds, | ||
199 | u16 vlan, | ||
200 | u8 bd_flags, | ||
201 | u16 l4_hdr_offset_w, | ||
202 | dma_addr_t first_frag, | ||
203 | u16 first_frag_len, void *cookie, u8 notify_fw); | ||
204 | |||
205 | /** | ||
206 | * @brief qed_ll2_release_connection - releases resources | ||
207 | * allocated for LL2 connection | ||
208 | * | ||
209 | * @param p_hwfn | ||
210 | * @param connection_handle LL2 connection's handle obtained from | ||
211 | * qed_ll2_require_connection | ||
212 | */ | ||
213 | void qed_ll2_release_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); | ||
214 | |||
215 | /** | ||
216 | * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill | ||
217 | * Tx BD of BDs requested by | ||
218 | * qed_ll2_prepare_tx_packet | ||
219 | * | ||
220 | * @param p_hwfn | ||
221 | * @param connection_handle LL2 connection's handle | ||
222 | * obtained from | ||
223 | * qed_ll2_require_connection | ||
224 | * @param addr | ||
225 | * @param nbytes | ||
226 | * | ||
227 | * @return 0 on success, failure otherwise | ||
228 | */ | ||
229 | int qed_ll2_set_fragment_of_tx_packet(struct qed_hwfn *p_hwfn, | ||
230 | u8 connection_handle, | ||
231 | dma_addr_t addr, u16 nbytes); | ||
232 | |||
233 | /** | ||
234 | * @brief qed_ll2_terminate_connection - stops Tx/Rx queues | ||
235 | * | ||
236 | * | ||
237 | * @param p_hwfn | ||
238 | * @param connection_handle LL2 connection's handle | ||
239 | * obtained from | ||
240 | * qed_ll2_require_connection | ||
241 | * | ||
242 | * @return 0 on success, failure otherwise | ||
243 | */ | ||
244 | int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle); | ||
245 | |||
246 | /** | ||
247 | * @brief qed_ll2_get_stats - get LL2 queue's statistics | ||
248 | * | ||
249 | * | ||
250 | * @param p_hwfn | ||
251 | * @param connection_handle LL2 connection's handle obtained from | ||
252 | * qed_ll2_require_connection | ||
253 | * @param p_stats | ||
254 | * | ||
255 | * @return 0 on success, failure otherwise | ||
256 | */ | ||
257 | int qed_ll2_get_stats(struct qed_hwfn *p_hwfn, | ||
258 | u8 connection_handle, struct qed_ll2_stats *p_stats); | ||
259 | |||
260 | /** | ||
261 | * @brief qed_ll2_alloc - Allocates LL2 connections set | ||
262 | * | ||
263 | * @param p_hwfn | ||
264 | * | ||
265 | * @return pointer to alocated qed_ll2_info or NULL | ||
266 | */ | ||
267 | struct qed_ll2_info *qed_ll2_alloc(struct qed_hwfn *p_hwfn); | ||
268 | |||
269 | /** | ||
270 | * @brief qed_ll2_setup - Inits LL2 connections set | ||
271 | * | ||
272 | * @param p_hwfn | ||
273 | * @param p_ll2_connections | ||
274 | * | ||
275 | */ | ||
276 | void qed_ll2_setup(struct qed_hwfn *p_hwfn, | ||
277 | struct qed_ll2_info *p_ll2_connections); | ||
278 | |||
279 | /** | ||
280 | * @brief qed_ll2_free - Releases LL2 connections set | ||
281 | * | ||
282 | * @param p_hwfn | ||
283 | * @param p_ll2_connections | ||
284 | * | ||
285 | */ | ||
286 | void qed_ll2_free(struct qed_hwfn *p_hwfn, | ||
287 | struct qed_ll2_info *p_ll2_connections); | ||
288 | |||
289 | #endif | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b730a632c383..48cdf62c025b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -22,11 +22,13 @@ | |||
22 | #include <linux/etherdevice.h> | 22 | #include <linux/etherdevice.h> |
23 | #include <linux/vmalloc.h> | 23 | #include <linux/vmalloc.h> |
24 | #include <linux/qed/qed_if.h> | 24 | #include <linux/qed/qed_if.h> |
25 | #include <linux/qed/qed_ll2_if.h> | ||
25 | 26 | ||
26 | #include "qed.h" | 27 | #include "qed.h" |
27 | #include "qed_sriov.h" | 28 | #include "qed_sriov.h" |
28 | #include "qed_sp.h" | 29 | #include "qed_sp.h" |
29 | #include "qed_dev_api.h" | 30 | #include "qed_dev_api.h" |
31 | #include "qed_ll2.h" | ||
30 | #include "qed_mcp.h" | 32 | #include "qed_mcp.h" |
31 | #include "qed_hw.h" | 33 | #include "qed_hw.h" |
32 | #include "qed_selftest.h" | 34 | #include "qed_selftest.h" |
@@ -608,7 +610,16 @@ static int qed_nic_reset(struct qed_dev *cdev) | |||
608 | 610 | ||
609 | static int qed_nic_setup(struct qed_dev *cdev) | 611 | static int qed_nic_setup(struct qed_dev *cdev) |
610 | { | 612 | { |
611 | int rc; | 613 | int rc, i; |
614 | |||
615 | /* Determine if interface is going to require LL2 */ | ||
616 | if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { | ||
617 | for (i = 0; i < cdev->num_hwfns; i++) { | ||
618 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | ||
619 | |||
620 | p_hwfn->using_ll2 = true; | ||
621 | } | ||
622 | } | ||
612 | 623 | ||
613 | rc = qed_resc_alloc(cdev); | 624 | rc = qed_resc_alloc(cdev); |
614 | if (rc) | 625 | if (rc) |
@@ -873,6 +884,12 @@ static int qed_slowpath_start(struct qed_dev *cdev, | |||
873 | DP_INFO(cdev, | 884 | DP_INFO(cdev, |
874 | "HW initialization and function start completed successfully\n"); | 885 | "HW initialization and function start completed successfully\n"); |
875 | 886 | ||
887 | /* Allocate LL2 interface if needed */ | ||
888 | if (QED_LEADING_HWFN(cdev)->using_ll2) { | ||
889 | rc = qed_ll2_alloc_if(cdev); | ||
890 | if (rc) | ||
891 | goto err3; | ||
892 | } | ||
876 | if (IS_PF(cdev)) { | 893 | if (IS_PF(cdev)) { |
877 | hwfn = QED_LEADING_HWFN(cdev); | 894 | hwfn = QED_LEADING_HWFN(cdev); |
878 | drv_version.version = (params->drv_major << 24) | | 895 | drv_version.version = (params->drv_major << 24) | |
@@ -893,6 +910,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, | |||
893 | 910 | ||
894 | return 0; | 911 | return 0; |
895 | 912 | ||
913 | err3: | ||
914 | qed_hw_stop(cdev); | ||
896 | err2: | 915 | err2: |
897 | qed_hw_timers_stop_all(cdev); | 916 | qed_hw_timers_stop_all(cdev); |
898 | if (IS_PF(cdev)) | 917 | if (IS_PF(cdev)) |
@@ -915,6 +934,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) | |||
915 | if (!cdev) | 934 | if (!cdev) |
916 | return -ENODEV; | 935 | return -ENODEV; |
917 | 936 | ||
937 | qed_ll2_dealloc_if(cdev); | ||
938 | |||
918 | if (IS_PF(cdev)) { | 939 | if (IS_PF(cdev)) { |
919 | qed_free_stream_mem(cdev); | 940 | qed_free_stream_mem(cdev); |
920 | if (IS_QED_ETH_IF(cdev)) | 941 | if (IS_QED_ETH_IF(cdev)) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 759cb04e02b0..e75738d21783 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
@@ -208,6 +208,26 @@ | |||
208 | 0x50196cUL | 208 | 0x50196cUL |
209 | #define NIG_REG_LLH_CLS_TYPE_DUALMODE \ | 209 | #define NIG_REG_LLH_CLS_TYPE_DUALMODE \ |
210 | 0x501964UL | 210 | 0x501964UL |
211 | #define NIG_REG_LLH_FUNC_FILTER_VALUE \ | ||
212 | 0x501a00UL | ||
213 | #define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \ | ||
214 | 32 | ||
215 | #define NIG_REG_LLH_FUNC_FILTER_EN \ | ||
216 | 0x501a80UL | ||
217 | #define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \ | ||
218 | 16 | ||
219 | #define NIG_REG_LLH_FUNC_FILTER_MODE \ | ||
220 | 0x501ac0UL | ||
221 | #define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \ | ||
222 | 16 | ||
223 | #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \ | ||
224 | 0x501b00UL | ||
225 | #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \ | ||
226 | 16 | ||
227 | #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \ | ||
228 | 0x501b40UL | ||
229 | #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \ | ||
230 | 16 | ||
211 | #define NCSI_REG_CONFIG \ | 231 | #define NCSI_REG_CONFIG \ |
212 | 0x040200UL | 232 | 0x040200UL |
213 | #define PBF_REG_INIT \ | 233 | #define PBF_REG_INIT \ |
@@ -264,6 +284,8 @@ | |||
264 | 0x1f0a1cUL | 284 | 0x1f0a1cUL |
265 | #define PRS_REG_ROCE_DEST_QP_MAX_PF \ | 285 | #define PRS_REG_ROCE_DEST_QP_MAX_PF \ |
266 | 0x1f0430UL | 286 | 0x1f0430UL |
287 | #define PRS_REG_USE_LIGHT_L2 \ | ||
288 | 0x1f096cUL | ||
267 | #define PSDM_REG_ENABLE_IN1 \ | 289 | #define PSDM_REG_ENABLE_IN1 \ |
268 | 0xfa0004UL | 290 | 0xfa0004UL |
269 | #define PSEM_REG_ENABLE_IN \ | 291 | #define PSEM_REG_ENABLE_IN \ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index a548504c3420..a3c539f1c2ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -61,6 +61,10 @@ union ramrod_data { | |||
61 | struct vport_start_ramrod_data vport_start; | 61 | struct vport_start_ramrod_data vport_start; |
62 | struct vport_stop_ramrod_data vport_stop; | 62 | struct vport_stop_ramrod_data vport_stop; |
63 | struct vport_update_ramrod_data vport_update; | 63 | struct vport_update_ramrod_data vport_update; |
64 | struct core_rx_start_ramrod_data core_rx_queue_start; | ||
65 | struct core_rx_stop_ramrod_data core_rx_queue_stop; | ||
66 | struct core_tx_start_ramrod_data core_tx_queue_start; | ||
67 | struct core_tx_stop_ramrod_data core_tx_queue_stop; | ||
64 | struct vport_filter_update_ramrod_data vport_filter_update; | 68 | struct vport_filter_update_ramrod_data vport_filter_update; |
65 | 69 | ||
66 | struct rdma_init_func_ramrod_data rdma_init_func; | 70 | struct rdma_init_func_ramrod_data rdma_init_func; |