aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTariq Toukan <tariqt@mellanox.com>2018-07-15 03:34:39 -0400
committerSaeed Mahameed <saeedm@mellanox.com>2018-07-26 18:23:55 -0400
commitc94e4f117e473dec11c7b9395b4d88cae2ba27c9 (patch)
treeda4b127a504b6776cae0c416ec0e98cbbc63a256
parent86690b4b4a5127b912348201f4f5880bb75a6621 (diff)
net/mlx5e: Make XDP xmit functions more generic
Convert the XDP xmit functions to use the generic xdp_frame API in XDP_TX flow. Same functions will be used later in this series to transmit the XDP redirect-out packets as well. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c10
4 files changed, 61 insertions, 42 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2f1058da0907..118d66207079 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -395,6 +395,17 @@ struct mlx5e_txqsq {
395 } recover; 395 } recover;
396} ____cacheline_aligned_in_smp; 396} ____cacheline_aligned_in_smp;
397 397
398struct mlx5e_dma_info {
399 struct page *page;
400 dma_addr_t addr;
401};
402
403struct mlx5e_xdp_info {
404 struct xdp_frame *xdpf;
405 dma_addr_t dma_addr;
406 struct mlx5e_dma_info di;
407};
408
398struct mlx5e_xdpsq { 409struct mlx5e_xdpsq {
399 /* data path */ 410 /* data path */
400 411
@@ -406,7 +417,7 @@ struct mlx5e_xdpsq {
406 417
407 /* write@xmit, read@completion */ 418 /* write@xmit, read@completion */
408 struct { 419 struct {
409 struct mlx5e_dma_info *di; 420 struct mlx5e_xdp_info *xdpi;
410 bool doorbell; 421 bool doorbell;
411 bool redirect_flush; 422 bool redirect_flush;
412 } db; 423 } db;
@@ -419,6 +430,7 @@ struct mlx5e_xdpsq {
419 __be32 mkey_be; 430 __be32 mkey_be;
420 u8 min_inline_mode; 431 u8 min_inline_mode;
421 unsigned long state; 432 unsigned long state;
433 unsigned int hw_mtu;
422 434
423 /* control path */ 435 /* control path */
424 struct mlx5_wq_ctrl wq_ctrl; 436 struct mlx5_wq_ctrl wq_ctrl;
@@ -455,11 +467,6 @@ mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n)
455 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 467 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc);
456} 468}
457 469
458struct mlx5e_dma_info {
459 struct page *page;
460 dma_addr_t addr;
461};
462
463struct mlx5e_wqe_frag_info { 470struct mlx5e_wqe_frag_info {
464 struct mlx5e_dma_info *di; 471 struct mlx5e_dma_info *di;
465 u32 offset; 472 u32 offset;
@@ -562,7 +569,6 @@ struct mlx5e_rq {
562 569
563 /* XDP */ 570 /* XDP */
564 struct bpf_prog *xdp_prog; 571 struct bpf_prog *xdp_prog;
565 unsigned int hw_mtu;
566 struct mlx5e_xdpsq xdpsq; 572 struct mlx5e_xdpsq xdpsq;
567 DECLARE_BITMAP(flags, 8); 573 DECLARE_BITMAP(flags, 8);
568 struct page_pool *page_pool; 574 struct page_pool *page_pool;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 34accf3f4cee..53d011eb71ab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -33,6 +33,23 @@
33#include <linux/bpf_trace.h> 33#include <linux/bpf_trace.h>
34#include "en/xdp.h" 34#include "en/xdp.h"
35 35
36static inline bool
37mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
38 struct xdp_buff *xdp)
39{
40 struct mlx5e_xdp_info xdpi;
41
42 xdpi.xdpf = convert_to_xdp_frame(xdp);
43 if (unlikely(!xdpi.xdpf))
44 return false;
45 xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf);
46 dma_sync_single_for_device(sq->pdev, xdpi.dma_addr,
47 xdpi.xdpf->len, PCI_DMA_TODEVICE);
48 xdpi.di = *di;
49
50 return mlx5e_xmit_xdp_frame(sq, &xdpi);
51}
52
36/* returns true if packet was consumed by xdp */ 53/* returns true if packet was consumed by xdp */
37bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 54bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
38 void *va, u16 *rx_headroom, u32 *len) 55 void *va, u16 *rx_headroom, u32 *len)
@@ -58,22 +75,24 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
58 *len = xdp.data_end - xdp.data; 75 *len = xdp.data_end - xdp.data;
59 return false; 76 return false;
60 case XDP_TX: 77 case XDP_TX:
61 if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) 78 if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp)))
62 trace_xdp_exception(rq->netdev, prog, act); 79 goto xdp_abort;
80 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
63 return true; 81 return true;
64 case XDP_REDIRECT: 82 case XDP_REDIRECT:
65 /* When XDP enabled then page-refcnt==1 here */ 83 /* When XDP enabled then page-refcnt==1 here */
66 err = xdp_do_redirect(rq->netdev, &xdp, prog); 84 err = xdp_do_redirect(rq->netdev, &xdp, prog);
67 if (!err) { 85 if (unlikely(err))
68 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 86 goto xdp_abort;
69 rq->xdpsq.db.redirect_flush = true; 87 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
70 mlx5e_page_dma_unmap(rq, di); 88 rq->xdpsq.db.redirect_flush = true;
71 } 89 mlx5e_page_dma_unmap(rq, di);
72 rq->stats->xdp_redirect++; 90 rq->stats->xdp_redirect++;
73 return true; 91 return true;
74 default: 92 default:
75 bpf_warn_invalid_xdp_action(act); 93 bpf_warn_invalid_xdp_action(act);
76 case XDP_ABORTED: 94 case XDP_ABORTED:
95xdp_abort:
77 trace_xdp_exception(rq->netdev, prog, act); 96 trace_xdp_exception(rq->netdev, prog, act);
78 case XDP_DROP: 97 case XDP_DROP:
79 rq->stats->xdp_drop++; 98 rq->stats->xdp_drop++;
@@ -81,27 +100,27 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
81 } 100 }
82} 101}
83 102
84bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 103bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
85 const struct xdp_buff *xdp)
86{ 104{
87 struct mlx5e_xdpsq *sq = &rq->xdpsq;
88 struct mlx5_wq_cyc *wq = &sq->wq; 105 struct mlx5_wq_cyc *wq = &sq->wq;
89 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 106 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
90 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 107 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
91 108
109 struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
110
92 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 111 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
93 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 112 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
94 struct mlx5_wqe_data_seg *dseg; 113 struct mlx5_wqe_data_seg *dseg = wqe->data;
95 114
96 ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; 115 struct xdp_frame *xdpf = xdpi->xdpf;
97 dma_addr_t dma_addr = di->addr + data_offset; 116 dma_addr_t dma_addr = xdpi->dma_addr;
98 unsigned int dma_len = xdp->data_end - xdp->data; 117 unsigned int dma_len = xdpf->len;
99 118
100 struct mlx5e_rq_stats *stats = rq->stats; 119 struct mlx5e_rq_stats *stats = rq->stats;
101 120
102 prefetchw(wqe); 121 prefetchw(wqe);
103 122
104 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { 123 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
105 stats->xdp_drop++; 124 stats->xdp_drop++;
106 return false; 125 return false;
107 } 126 }
@@ -116,15 +135,11 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
116 return false; 135 return false;
117 } 136 }
118 137
119 dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
120
121 cseg->fm_ce_se = 0; 138 cseg->fm_ce_se = 0;
122 139
123 dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
124
125 /* copy the inline part if required */ 140 /* copy the inline part if required */
126 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 141 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
127 memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); 142 memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE);
128 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); 143 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
129 dma_len -= MLX5E_XDP_MIN_INLINE; 144 dma_len -= MLX5E_XDP_MIN_INLINE;
130 dma_addr += MLX5E_XDP_MIN_INLINE; 145 dma_addr += MLX5E_XDP_MIN_INLINE;
@@ -140,8 +155,7 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
140 /* move page to reference to sq responsibility, 155 /* move page to reference to sq responsibility,
141 * and mark so it's not put back in page-cache. 156 * and mark so it's not put back in page-cache.
142 */ 157 */
143 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 158 sq->db.xdpi[pi] = *xdpi;
144 sq->db.di[pi] = *di;
145 sq->pc++; 159 sq->pc++;
146 160
147 sq->db.doorbell = true; 161 sq->db.doorbell = true;
@@ -184,17 +198,17 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
184 wqe_counter = be16_to_cpu(cqe->wqe_counter); 198 wqe_counter = be16_to_cpu(cqe->wqe_counter);
185 199
186 do { 200 do {
187 struct mlx5e_dma_info *di; 201 struct mlx5e_xdp_info *xdpi;
188 u16 ci; 202 u16 ci;
189 203
190 last_wqe = (sqcc == wqe_counter); 204 last_wqe = (sqcc == wqe_counter);
191 205
192 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 206 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
193 di = &sq->db.di[ci]; 207 xdpi = &sq->db.xdpi[ci];
194 208
195 sqcc++; 209 sqcc++;
196 /* Recycle RX page */ 210 /* Recycle RX page */
197 mlx5e_page_release(rq, di, true); 211 mlx5e_page_release(rq, &xdpi->di, true);
198 } while (!last_wqe); 212 } while (!last_wqe);
199 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 213 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
200 214
@@ -212,15 +226,15 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
212void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 226void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
213{ 227{
214 struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); 228 struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq);
215 struct mlx5e_dma_info *di; 229 struct mlx5e_xdp_info *xdpi;
216 u16 ci; 230 u16 ci;
217 231
218 while (sq->cc != sq->pc) { 232 while (sq->cc != sq->pc) {
219 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 233 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
220 di = &sq->db.di[ci]; 234 xdpi = &sq->db.xdpi[ci];
221 sq->cc++; 235 sq->cc++;
222 236
223 mlx5e_page_release(rq, di, false); 237 mlx5e_page_release(rq, &xdpi->di, false);
224 } 238 }
225} 239}
226 240
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index a8a856a82c63..81739aad0188 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -45,8 +45,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
45bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); 45bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
46void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 46void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
47 47
48bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 48bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
49 const struct xdp_buff *xdp);
50 49
51static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 50static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
52{ 51{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dd5eec923766..7ed71db9b32f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -491,7 +491,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
491 rq->channel = c; 491 rq->channel = c;
492 rq->ix = c->ix; 492 rq->ix = c->ix;
493 rq->mdev = mdev; 493 rq->mdev = mdev;
494 rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
495 rq->stats = &c->priv->channel_stats[c->ix].rq; 494 rq->stats = &c->priv->channel_stats[c->ix].rq;
496 495
497 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; 496 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -969,16 +968,16 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
969 968
970static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) 969static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
971{ 970{
972 kvfree(sq->db.di); 971 kvfree(sq->db.xdpi);
973} 972}
974 973
975static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) 974static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
976{ 975{
977 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 976 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
978 977
979 sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), 978 sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
980 GFP_KERNEL, numa); 979 GFP_KERNEL, numa);
981 if (!sq->db.di) { 980 if (!sq->db.xdpi) {
982 mlx5e_free_xdpsq_db(sq); 981 mlx5e_free_xdpsq_db(sq);
983 return -ENOMEM; 982 return -ENOMEM;
984 } 983 }
@@ -1001,6 +1000,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
1001 sq->channel = c; 1000 sq->channel = c;
1002 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1001 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1003 sq->min_inline_mode = params->tx_min_inline_mode; 1002 sq->min_inline_mode = params->tx_min_inline_mode;
1003 sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
1004 1004
1005 param->wq.db_numa_node = cpu_to_node(c->cpu); 1005 param->wq.db_numa_node = cpu_to_node(c->cpu);
1006 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl); 1006 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);