diff options
author | Yevgeny Petrilin <yevgenyp@mellanox.co.il> | 2009-08-06 22:28:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-06 22:28:18 -0400 |
commit | 9f519f68cfffba022978634f724944a0b971fec1 (patch) | |
tree | dec840b3dfd1bd3173c12686e85a62466a82202b /drivers/net/mlx4/en_rx.c | |
parent | b6b912e0804dc1b3e856da3cc82fbe78b50e968c (diff) |
mlx4_en: Not using Shared Receive Queues
We use 1:1 mapping between QPs and SRQs on receive side,
so additional indirection level not required. Allocated the receive
buffers for the RSS QPs.
Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_rx.c')
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 77 |
1 files changed, 17 insertions, 60 deletions
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 47b178e2b49e..cd084de322f4 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -40,16 +40,6 @@ | |||
40 | 40 | ||
41 | #include "mlx4_en.h" | 41 | #include "mlx4_en.h" |
42 | 42 | ||
43 | static void *get_wqe(struct mlx4_en_rx_ring *ring, int n) | ||
44 | { | ||
45 | int offset = n << ring->srq.wqe_shift; | ||
46 | return ring->buf + offset; | ||
47 | } | ||
48 | |||
49 | static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | ||
50 | { | ||
51 | return; | ||
52 | } | ||
53 | 43 | ||
54 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, | 44 | static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr, |
55 | void **ip_hdr, void **tcpudp_hdr, | 45 | void **ip_hdr, void **tcpudp_hdr, |
@@ -154,9 +144,6 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, | |||
154 | int possible_frags; | 144 | int possible_frags; |
155 | int i; | 145 | int i; |
156 | 146 | ||
157 | /* Pre-link descriptor */ | ||
158 | rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask); | ||
159 | |||
160 | /* Set size and memtype fields */ | 147 | /* Set size and memtype fields */ |
161 | for (i = 0; i < priv->num_frags; i++) { | 148 | for (i = 0; i < priv->num_frags; i++) { |
162 | skb_frags[i].size = priv->frag_info[i].frag_size; | 149 | skb_frags[i].size = priv->frag_info[i].frag_size; |
@@ -294,9 +281,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |||
294 | int err; | 281 | int err; |
295 | int tmp; | 282 | int tmp; |
296 | 283 | ||
297 | /* Sanity check SRQ size before proceeding */ | ||
298 | if (size >= mdev->dev->caps.max_srq_wqes) | ||
299 | return -EINVAL; | ||
300 | 284 | ||
301 | ring->prod = 0; | 285 | ring->prod = 0; |
302 | ring->cons = 0; | 286 | ring->cons = 0; |
@@ -304,7 +288,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | |||
304 | ring->size_mask = size - 1; | 288 | ring->size_mask = size - 1; |
305 | ring->stride = stride; | 289 | ring->stride = stride; |
306 | ring->log_stride = ffs(ring->stride) - 1; | 290 | ring->log_stride = ffs(ring->stride) - 1; |
307 | ring->buf_size = ring->size * ring->stride; | 291 | ring->buf_size = ring->size * ring->stride + TXBB_SIZE; |
308 | 292 | ||
309 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * | 293 | tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * |
310 | sizeof(struct skb_frag_struct)); | 294 | sizeof(struct skb_frag_struct)); |
@@ -360,15 +344,12 @@ err_ring: | |||
360 | 344 | ||
361 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | 345 | int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) |
362 | { | 346 | { |
363 | struct mlx4_en_dev *mdev = priv->mdev; | ||
364 | struct mlx4_wqe_srq_next_seg *next; | ||
365 | struct mlx4_en_rx_ring *ring; | 347 | struct mlx4_en_rx_ring *ring; |
366 | int i; | 348 | int i; |
367 | int ring_ind; | 349 | int ring_ind; |
368 | int err; | 350 | int err; |
369 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | 351 | int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + |
370 | DS_SIZE * priv->num_frags); | 352 | DS_SIZE * priv->num_frags); |
371 | int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE; | ||
372 | 353 | ||
373 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { | 354 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { |
374 | ring = &priv->rx_ring[ring_ind]; | 355 | ring = &priv->rx_ring[ring_ind]; |
@@ -379,6 +360,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
379 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; | 360 | ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; |
380 | 361 | ||
381 | ring->stride = stride; | 362 | ring->stride = stride; |
363 | if (ring->stride <= TXBB_SIZE) | ||
364 | ring->buf += TXBB_SIZE; | ||
365 | |||
382 | ring->log_stride = ffs(ring->stride) - 1; | 366 | ring->log_stride = ffs(ring->stride) - 1; |
383 | ring->buf_size = ring->size * ring->stride; | 367 | ring->buf_size = ring->size * ring->stride; |
384 | 368 | ||
@@ -405,37 +389,10 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
405 | ring = &priv->rx_ring[ring_ind]; | 389 | ring = &priv->rx_ring[ring_ind]; |
406 | 390 | ||
407 | mlx4_en_update_rx_prod_db(ring); | 391 | mlx4_en_update_rx_prod_db(ring); |
408 | |||
409 | /* Configure SRQ representing the ring */ | ||
410 | ring->srq.max = ring->actual_size; | ||
411 | ring->srq.max_gs = max_gs; | ||
412 | ring->srq.wqe_shift = ilog2(ring->stride); | ||
413 | |||
414 | for (i = 0; i < ring->srq.max; ++i) { | ||
415 | next = get_wqe(ring, i); | ||
416 | next->next_wqe_index = | ||
417 | cpu_to_be16((i + 1) & (ring->srq.max - 1)); | ||
418 | } | ||
419 | |||
420 | err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, | ||
421 | ring->wqres.db.dma, &ring->srq); | ||
422 | if (err){ | ||
423 | en_err(priv, "Failed to allocate srq\n"); | ||
424 | ring_ind--; | ||
425 | goto err_srq; | ||
426 | } | ||
427 | ring->srq.event = mlx4_en_srq_event; | ||
428 | } | 392 | } |
429 | 393 | ||
430 | return 0; | 394 | return 0; |
431 | 395 | ||
432 | err_srq: | ||
433 | while (ring_ind >= 0) { | ||
434 | ring = &priv->rx_ring[ring_ind]; | ||
435 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
436 | ring_ind--; | ||
437 | } | ||
438 | |||
439 | err_buffers: | 396 | err_buffers: |
440 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) | 397 | for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) |
441 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); | 398 | mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); |
@@ -456,7 +413,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |||
456 | 413 | ||
457 | kfree(ring->lro.lro_arr); | 414 | kfree(ring->lro.lro_arr); |
458 | mlx4_en_unmap_buffer(&ring->wqres.buf); | 415 | mlx4_en_unmap_buffer(&ring->wqres.buf); |
459 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); | 416 | mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE); |
460 | vfree(ring->rx_info); | 417 | vfree(ring->rx_info); |
461 | ring->rx_info = NULL; | 418 | ring->rx_info = NULL; |
462 | } | 419 | } |
@@ -464,10 +421,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | |||
464 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, | 421 | void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, |
465 | struct mlx4_en_rx_ring *ring) | 422 | struct mlx4_en_rx_ring *ring) |
466 | { | 423 | { |
467 | struct mlx4_en_dev *mdev = priv->mdev; | ||
468 | |||
469 | mlx4_srq_free(mdev->dev, &ring->srq); | ||
470 | mlx4_en_free_rx_buf(priv, ring); | 424 | mlx4_en_free_rx_buf(priv, ring); |
425 | if (ring->stride <= TXBB_SIZE) | ||
426 | ring->buf -= TXBB_SIZE; | ||
471 | mlx4_en_destroy_allocator(priv, ring); | 427 | mlx4_en_destroy_allocator(priv, ring); |
472 | } | 428 | } |
473 | 429 | ||
@@ -835,8 +791,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) | |||
835 | 791 | ||
836 | /* RSS related functions */ | 792 | /* RSS related functions */ |
837 | 793 | ||
838 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | 794 | static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, |
839 | int qpn, int srqn, int cqn, | 795 | struct mlx4_en_rx_ring *ring, |
840 | enum mlx4_qp_state *state, | 796 | enum mlx4_qp_state *state, |
841 | struct mlx4_qp *qp) | 797 | struct mlx4_qp *qp) |
842 | { | 798 | { |
@@ -858,13 +814,16 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, | |||
858 | qp->event = mlx4_en_sqp_event; | 814 | qp->event = mlx4_en_sqp_event; |
859 | 815 | ||
860 | memset(context, 0, sizeof *context); | 816 | memset(context, 0, sizeof *context); |
861 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context); | 817 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 0, 0, |
818 | qpn, ring->cqn, context); | ||
819 | context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); | ||
862 | 820 | ||
863 | err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); | 821 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state); |
864 | if (err) { | 822 | if (err) { |
865 | mlx4_qp_remove(mdev->dev, qp); | 823 | mlx4_qp_remove(mdev->dev, qp); |
866 | mlx4_qp_free(mdev->dev, qp); | 824 | mlx4_qp_free(mdev->dev, qp); |
867 | } | 825 | } |
826 | mlx4_en_update_rx_prod_db(ring); | ||
868 | out: | 827 | out: |
869 | kfree(context); | 828 | kfree(context); |
870 | return err; | 829 | return err; |
@@ -880,7 +839,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
880 | void *ptr; | 839 | void *ptr; |
881 | int rss_xor = mdev->profile.rss_xor; | 840 | int rss_xor = mdev->profile.rss_xor; |
882 | u8 rss_mask = mdev->profile.rss_mask; | 841 | u8 rss_mask = mdev->profile.rss_mask; |
883 | int i, srqn, qpn, cqn; | 842 | int i, qpn; |
884 | int err = 0; | 843 | int err = 0; |
885 | int good_qps = 0; | 844 | int good_qps = 0; |
886 | 845 | ||
@@ -894,10 +853,8 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
894 | } | 853 | } |
895 | 854 | ||
896 | for (i = 0; i < priv->rx_ring_num; i++) { | 855 | for (i = 0; i < priv->rx_ring_num; i++) { |
897 | cqn = priv->rx_ring[i].cqn; | ||
898 | srqn = priv->rx_ring[i].srq.srqn; | ||
899 | qpn = rss_map->base_qpn + i; | 856 | qpn = rss_map->base_qpn + i; |
900 | err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn, | 857 | err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], |
901 | &rss_map->state[i], | 858 | &rss_map->state[i], |
902 | &rss_map->qps[i]); | 859 | &rss_map->qps[i]); |
903 | if (err) | 860 | if (err) |
@@ -920,7 +877,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) | |||
920 | } | 877 | } |
921 | rss_map->indir_qp.event = mlx4_en_sqp_event; | 878 | rss_map->indir_qp.event = mlx4_en_sqp_event; |
922 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, | 879 | mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, |
923 | priv->rx_ring[0].cqn, 0, &context); | 880 | priv->rx_ring[0].cqn, &context); |
924 | 881 | ||
925 | ptr = ((void *) &context) + 0x3c; | 882 | ptr = ((void *) &context) + 0x3c; |
926 | rss_context = (struct mlx4_en_rss_context *) ptr; | 883 | rss_context = (struct mlx4_en_rss_context *) ptr; |