diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2009-08-06 22:28:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-06 22:28:18 -0400 |
commit | 9f519f68cfffba022978634f724944a0b971fec1 (patch) | |
tree | dec840b3dfd1bd3173c12686e85a62466a82202b /drivers | |
parent | b6b912e0804dc1b3e856da3cc82fbe78b50e968c (diff) |
mlx4_en: Not using Shared Receive Queues
We use 1:1 mapping between QPs and SRQs on receive side,
so additional indirection level not required. Allocated the receive
buffers for the RSS QPs.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 3 | ||||
-rw-r--r-- | drivers/net/mlx4/en_resources.c | 9 | ||||
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 77 | ||||
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 4 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4_en.h | 6 |
5 files changed, 26 insertions, 73 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index f8bbc5a39e15..c48b0f4b17b7 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -622,8 +622,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
622 | 622 | ||
623 | /* Configure ring */ | 623 | /* Configure ring */ |
624 | tx_ring = &priv->tx_ring[i]; | 624 | tx_ring = &priv->tx_ring[i]; |
625 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | 625 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); |
626 | priv->rx_ring[0].srq.srqn); | ||
627 | if (err) { | 626 | if (err) { |
628 | en_err(priv, "Failed allocating Tx ring\n"); | 627 | en_err(priv, "Failed allocating Tx ring\n"); |
629 | mlx4_en_deactivate_cq(priv, cq); | 628 | mlx4_en_deactivate_cq(priv, cq); |
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c index 65ca706c04bb..16256784a943 100644 --- a/drivers/net/mlx4/en_resources.c +++ b/drivers/net/mlx4/en_resources.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include "mlx4_en.h" | 37 | #include "mlx4_en.h" |
38 | 38 | ||
39 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | 39 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, |
40 | int is_tx, int rss, int qpn, int cqn, int srqn, | 40 | int is_tx, int rss, int qpn, int cqn, |
41 | struct mlx4_qp_context *context) | 41 | struct mlx4_qp_context *context) |
42 | { | 42 | { |
43 | struct mlx4_en_dev *mdev = priv->mdev; | 43 | struct mlx4_en_dev *mdev = priv->mdev; |
@@ -46,11 +46,12 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | |||
46 | context->flags = cpu_to_be32(7 << 16 | rss << 13); | 46 | context->flags = cpu_to_be32(7 << 16 | rss << 13); |
47 | context->pd = cpu_to_be32(mdev->priv_pdn); | 47 | context->pd = cpu_to_be32(mdev->priv_pdn); |
48 | context->mtu_msgmax = 0xff; | 48 | context->mtu_msgmax = 0xff; |
49 | context->rq_size_stride = 0; | 49 | if (!is_tx && !rss) |
50 | context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | ||
50 | if (is_tx) | 51 | if (is_tx) |
51 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); | 52 | context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); |
52 | else | 53 | else |
53 | context->sq_size_stride = 1; | 54 | context->sq_size_stride = ilog2(TXBB_SIZE) - 4; |
54 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); | 55 | context->usr_page = cpu_to_be32(mdev->priv_uar.index); |
55 | context->local_qpn = cpu_to_be32(qpn); | 56 | context->local_qpn = cpu_to_be32(qpn); |
56 | context->pri_path.ackto = 1 & 0x07; | 57 | context->pri_path.ackto = 1 & 0x07; |
@@ -59,8 +60,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | |||
59 | context->cqn_send = cpu_to_be32(cqn); | 60 | context->cqn_send = cpu_to_be32(cqn); |
60 | context->cqn_recv = cpu_to_be32(cqn); | 61 | context->cqn_recv = cpu_to_be32(cqn); |
61 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); | 62 | context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2); |
62 | if (!rss) | ||
63 | context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn); | ||
64 | } | 63 | } |
65 | 64 | ||
66 | 65 | ||
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 47b178e2b49e..cd084de322f4 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -40,16 +40,6 @@ | |||
40 | 40 | ||
41 | #include "mlx4_en.h" | 41 | #include "mlx4_en.h" |
42 | 42 | ||
43 | static void *get_wqe(struct mlx4_en_rx_ring *ring, int n) | ||
44 | { | ||
45 | int offset = n << ring->srq.wqe_shift; | ||
46 | return ring->buf + offset; | ||
47 | } | ||
48 | |||
49 | static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | ||
50 | { | ||
51 | return; | ||
52 | } | ||
53 | 43 | ||
54 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, | 44 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, |
55 | void **ip_hdr, void **tcpudp_hdr, | 45 | void **ip_hdr, void **tcpudp_hdr, |
@@ -154,9 +144,6 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | |||
154 | int possible_frags; | 144 | int possible_frags; |
155 | int i; | 145 | int i; |
156 | 146 | ||
157 | /* Pre-link descriptor */ | ||
158 | rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask); | ||
159 | |||
160 | /* Set size and memtype fields */ | 147 | /* Set size and memtype fields */ |
161 | for (i = 0; i < priv->num_frags; i++) { | 148 | for (i = 0; i < priv->num_frags; i++) { |
162 | skb_frags[i].size = priv->frag_info[i].frag_size; | 149 | skb_frags[i].size = priv->frag_info[i].frag_size; |
@@ -294,9 +281,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |||
294 | int err; | 281 | int err; |
295 | int tmp; | 282 | int tmp; |
296 | 283 | ||
297 | /* Sanity check SRQ size before proceeding */ | ||
298 | if (size >= mdev->dev->caps.max_srq_wqes) | ||
299 | return -EINVAL; | ||
300 | 284 | ||
301 | ring->prod = 0; | 285 | ring->prod = 0; |
302 | ring->cons = 0; | 286 | ring->cons = 0; |
@@ -304,7 +288,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |||
304 | ring->size_mask = size - 1; | 288 | ring->size_mask = size - 1; |
305 | ring->stride = stride; | 289 | ring->stride = stride; |
306 | ring->log_stride = ffs(ring->stride) - 1; | 290 | ring->log_stride = ffs(ring->stride) - 1; |
307 | ring->buf_size = ring->size * ring->stride; | 291 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; |
308 | 292 | ||
309 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | 293 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * |
310 | sizeof(struct skb_frag_struct)); | 294 | sizeof(struct skb_frag_struct)); |
@@ -360,15 +344,12 @@ err_ring: | |||
360 | 344 | ||
361 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | 345 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) |
362 | { | 346 | { |
363 | struct mlx4_en_dev *mdev = priv->mdev; | ||
364 | struct mlx4_wqe_srq_next_seg *next; | ||
365 | struct mlx4_en_rx_ring *ring; | 347 | struct mlx4_en_rx_ring *ring; |
366 | int i; | 348 | int i; |
367 | int ring_ind; | 349 | int ring_ind; |
368 | int err; | 350 | int err; |
369 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | 351 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + |
370 | DS_SIZE * priv->num_frags); | 352 | DS_SIZE * priv->num_frags); |
371 | int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE; | ||
372 | 353 | ||
373 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | 354 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
374 | ring = &priv->rx_ring[ring_ind]; | 355 | ring = &priv->rx_ring[ring_ind]; |
@@ -379,6 +360,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
379 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; | 360 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; |
380 | 361 | ||
381 | ring->stride = stride; | 362 | ring->stride = stride; |
363 | if (ring->stride <= TXBB_SIZE) | ||
364 | ring->buf += TXBB_SIZE; | ||
365 | |||
382 | ring->log_stride = ffs(ring->stride) - 1; | 366 | ring->log_stride = ffs(ring->stride) - 1; |
383 | ring->buf_size = ring->size * ring->stride; | 367 | ring->buf_size = ring->size * ring->stride; |
384 | 368 | ||
@@ -405,37 +389,10 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
405 | ring = &priv->rx_ring[ring_ind]; | 389 | ring = &priv->rx_ring[ring_ind]; |
406 | 390 | ||
407 | mlx4_en_update_rx_prod_db(ring); | 391 | mlx4_en_update_rx_prod_db(ring); |
408 | |||
409 | /* Configure SRQ representing the ring */ | ||
410 | ring->srq.max = ring->actual_size; | ||
411 | ring->srq.max_gs = max_gs; | ||
412 | ring->srq.wqe_shift = ilog2(ring->stride); | ||
413 | |||
414 | for (i = 0; i < ring->srq.max; ++i) { | ||
415 | next = get_wqe(ring, i); | ||
416 | next->next_wqe_index = | ||
417 | cpu_to_be16((i + 1) & (ring->srq.max - 1)); | ||
418 | } | ||
419 | |||
420 | err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, | ||
421 | ring->wqres.db.dma, &ring->srq); | ||
422 | if (err){ | ||
423 | en_err(priv, "Failed to allocate srq\n"); | ||
424 | ring_ind--; | ||
425 | goto err_srq; | ||
426 | } | ||
427 | ring->srq.event = mlx4_en_srq_event; | ||
428 | } | 392 | } |
429 | 393 | ||
430 | return 0; | 394 | return 0; |
431 | 395 | ||
432 | err_srq: | ||
433 | while (ring_ind >= 0) { | ||
434 | ring = &priv->rx_ring[ring_ind]; | ||
435 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
436 | ring_ind--; | ||
437 | } | ||
438 | |||
439 | err_buffers: | 396 | err_buffers: |
440 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | 397 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) |
441 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); | 398 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); |
@@ -456,7 +413,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |||
456 | 413 | ||
457 | kfree(ring->lro.lro_arr); | 414 | kfree(ring->lro.lro_arr); |
458 | mlx4_en_unmap_buffer(&ring->wqres.buf); | 415 | mlx4_en_unmap_buffer(&ring->wqres.buf); |
459 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | 416 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); |
460 | vfree(ring->rx_info); | 417 | vfree(ring->rx_info); |
461 | ring->rx_info = NULL; | 418 | ring->rx_info = NULL; |
462 | } | 419 | } |
@@ -464,10 +421,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |||
464 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | 421 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, |
465 | struct mlx4_en_rx_ring *ring) | 422 | struct mlx4_en_rx_ring *ring) |
466 | { | 423 | { |
467 | struct mlx4_en_dev *mdev = priv->mdev; | ||
468 | |||
469 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
470 | mlx4_en_free_rx_buf(priv, ring); | 424 | mlx4_en_free_rx_buf(priv, ring); |
425 | if (ring->stride <= TXBB_SIZE) | ||
426 | ring->buf -= TXBB_SIZE; | ||
471 | mlx4_en_destroy_allocator(priv, ring); | 427 | mlx4_en_destroy_allocator(priv, ring); |
472 | } | 428 | } |
473 | 429 | ||
@@ -835,8 +791,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) | |||
835 | 791 | ||
836 | /* RSS related functions */ | 792 | /* RSS related functions */ |
837 | 793 | ||
838 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | 794 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, |
839 | int qpn, int srqn, int cqn, | 795 | struct mlx4_en_rx_ring *ring, |
840 | enum mlx4_qp_state *state, | 796 | enum mlx4_qp_state *state, |
841 | struct mlx4_qp *qp) | 797 | struct mlx4_qp *qp) |
842 | { | 798 | { |
@@ -858,13 +814,16 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | |||
858 | qp->event = mlx4_en_sqp_event; | 814 | qp->event = mlx4_en_sqp_event; |
859 | 815 | ||
860 | memset(context, 0, sizeof *context); | 816 | memset(context, 0, sizeof *context); |
861 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); | 817 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0, |
818 | qpn, ring->cqn, context); | ||
819 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); | ||
862 | 820 | ||
863 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); | 821 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); |
864 | if (err) { | 822 | if (err) { |
865 | mlx4_qp_remove(mdev->dev, qp); | 823 | mlx4_qp_remove(mdev->dev, qp); |
866 | mlx4_qp_free(mdev->dev, qp); | 824 | mlx4_qp_free(mdev->dev, qp); |
867 | } | 825 | } |
826 | mlx4_en_update_rx_prod_db(ring); | ||
868 | out: | 827 | out: |
869 | kfree(context); | 828 | kfree(context); |
870 | return err; | 829 | return err; |
@@ -880,7 +839,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
880 | void *ptr; | 839 | void *ptr; |
881 | int rss_xor = mdev->profile.rss_xor; | 840 | int rss_xor = mdev->profile.rss_xor; |
882 | u8 rss_mask = mdev->profile.rss_mask; | 841 | u8 rss_mask = mdev->profile.rss_mask; |
883 | int i, srqn, qpn, cqn; | 842 | int i, qpn; |
884 | int err = 0; | 843 | int err = 0; |
885 | int good_qps = 0; | 844 | int good_qps = 0; |
886 | 845 | ||
@@ -894,10 +853,8 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
894 | } | 853 | } |
895 | 854 | ||
896 | for (i = 0; i < priv->rx_ring_num; i++) { | 855 | for (i = 0; i < priv->rx_ring_num; i++) { |
897 | cqn = priv->rx_ring[i].cqn; | ||
898 | srqn = priv->rx_ring[i].srq.srqn; | ||
899 | qpn = rss_map->base_qpn + i; | 856 | qpn = rss_map->base_qpn + i; |
900 | err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, | 857 | err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], |
901 | &rss_map->state[i], | 858 | &rss_map->state[i], |
902 | &rss_map->qps[i]); | 859 | &rss_map->qps[i]); |
903 | if (err) | 860 | if (err) |
@@ -920,7 +877,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
920 | } | 877 | } |
921 | rss_map->indir_qp.event = mlx4_en_sqp_event; | 878 | rss_map->indir_qp.event = mlx4_en_sqp_event; |
922 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | 879 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
923 | priv->rx_ring[0].cqn, 0, &context); | 880 | priv->rx_ring[0].cqn, &context); |
924 | 881 | ||
925 | ptr = ((void *) &context) + 0x3c; | 882 | ptr = ((void *) &context) + 0x3c; |
926 | rss_context = (struct mlx4_en_rss_context *) ptr; | 883 | rss_context = (struct mlx4_en_rss_context *) ptr; |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index d5c18c674255..cbc8ab0c6d12 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -150,7 +150,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, | |||
150 | 150 | ||
151 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | 151 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, |
152 | struct mlx4_en_tx_ring *ring, | 152 | struct mlx4_en_tx_ring *ring, |
153 | int cq, int srqn) | 153 | int cq) |
154 | { | 154 | { |
155 | struct mlx4_en_dev *mdev = priv->mdev; | 155 | struct mlx4_en_dev *mdev = priv->mdev; |
156 | int err; | 156 | int err; |
@@ -168,7 +168,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
168 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); | 168 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); |
169 | 169 | ||
170 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | 170 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, |
171 | ring->cqn, srqn, &ring->context); | 171 | ring->cqn, &ring->context); |
172 | 172 | ||
173 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | 173 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, |
174 | &ring->qp, &ring->qp_state); | 174 | &ring->qp, &ring->qp_state); |
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index 2d76ff4c564b..4513fb4960dc 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -274,13 +274,11 @@ struct mlx4_en_tx_ring { | |||
274 | }; | 274 | }; |
275 | 275 | ||
276 | struct mlx4_en_rx_desc { | 276 | struct mlx4_en_rx_desc { |
277 | struct mlx4_wqe_srq_next_seg next; | ||
278 | /* actual number of entries depends on rx ring stride */ | 277 | /* actual number of entries depends on rx ring stride */ |
279 | struct mlx4_wqe_data_seg data[0]; | 278 | struct mlx4_wqe_data_seg data[0]; |
280 | }; | 279 | }; |
281 | 280 | ||
282 | struct mlx4_en_rx_ring { | 281 | struct mlx4_en_rx_ring { |
283 | struct mlx4_srq srq; | ||
284 | struct mlx4_hwq_resources wqres; | 282 | struct mlx4_hwq_resources wqres; |
285 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; | 283 | struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS]; |
286 | struct net_lro_mgr lro; | 284 | struct net_lro_mgr lro; |
@@ -527,7 +525,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri | |||
527 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); | 525 | void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); |
528 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | 526 | int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, |
529 | struct mlx4_en_tx_ring *ring, | 527 | struct mlx4_en_tx_ring *ring, |
530 | int cq, int srqn); | 528 | int cq); |
531 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | 529 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, |
532 | struct mlx4_en_tx_ring *ring); | 530 | struct mlx4_en_tx_ring *ring); |
533 | 531 | ||
@@ -544,7 +542,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, | |||
544 | int budget); | 542 | int budget); |
545 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); | 543 | int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); |
546 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, | 544 | void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, |
547 | int is_tx, int rss, int qpn, int cqn, int srqn, | 545 | int is_tx, int rss, int qpn, int cqn, |
548 | struct mlx4_qp_context *context); | 546 | struct mlx4_qp_context *context); |
549 | void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); | 547 | void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); |
550 | int mlx4_en_map_buffer(struct mlx4_buf *buf); | 548 | int mlx4_en_map_buffer(struct mlx4_buf *buf); |