aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2011-03-22 18:38:52 -0400
committerDavid S. Miller <davem@davemloft.net>2011-03-23 15:24:25 -0400
commit87a5c3896f9cf3b7f374af95b342d1089989327a (patch)
tree48e13f9f860e514b365d62eae3ade23182ac67f0 /drivers/net/mlx4
parent9ace5e0176f165f04537497143506621d33a0139 (diff)
mlx4_en: Using blue flame support
Doorbell is used according to usage of BlueFlame. For Blue Flame to work in Ethernet mode QP number should have 0 at bits 6,7. Allocating range of QPs accordingly. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_tx.c72
-rw-r--r--drivers/net/mlx4/mlx4_en.h4
3 files changed, 64 insertions, 22 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 08e680100db..5762ebde445 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -931,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
931{ 931{
932 struct mlx4_en_port_profile *prof = priv->prof; 932 struct mlx4_en_port_profile *prof = priv->prof;
933 int i; 933 int i;
934 int base_tx_qpn, err;
935
936 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
937 if (err) {
938 en_err(priv, "failed reserving range for TX rings\n");
939 return err;
940 }
934 941
935 /* Create tx Rings */ 942 /* Create tx Rings */
936 for (i = 0; i < priv->tx_ring_num; i++) { 943 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -938,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
938 prof->tx_ring_size, i, TX)) 945 prof->tx_ring_size, i, TX))
939 goto err; 946 goto err;
940 947
941 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 948 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
942 prof->tx_ring_size, TXBB_SIZE)) 949 prof->tx_ring_size, TXBB_SIZE))
943 goto err; 950 goto err;
944 } 951 }
@@ -958,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
958 965
959err: 966err:
960 en_err(priv, "Failed to allocate NIC resources\n"); 967 en_err(priv, "Failed to allocate NIC resources\n");
968 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
961 return -ENOMEM; 969 return -ENOMEM;
962} 970}
963 971
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index a680cd4a5ab..01feb8fd42a 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -44,6 +44,7 @@
44 44
45enum { 45enum {
46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
47 MAX_BF = 256,
47}; 48};
48 49
49static int inline_thold __read_mostly = MAX_INLINE; 50static int inline_thold __read_mostly = MAX_INLINE;
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
52MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
53 54
54int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
55 struct mlx4_en_tx_ring *ring, u32 size, 56 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
56 u16 stride) 57 u16 stride)
57{ 58{
58 struct mlx4_en_dev *mdev = priv->mdev; 59 struct mlx4_en_dev *mdev = priv->mdev;
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
105 106
106 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 107 ring->qpn = qpn;
107 if (err) {
108 en_err(priv, "Failed reserving qp for tx ring.\n");
109 goto err_map;
110 }
111
112 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
113 if (err) { 109 if (err) {
114 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
115 goto err_reserve; 111 goto err_map;
116 } 112 }
117 ring->qp.event = mlx4_en_sqp_event; 113 ring->qp.event = mlx4_en_sqp_event;
118 114
115 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
116 if (err) {
117 en_dbg(DRV, priv, "working without blueflame (%d)", err);
118 ring->bf.uar = &mdev->priv_uar;
119 ring->bf.uar->map = mdev->uar_map;
120 ring->bf_enabled = false;
121 } else
122 ring->bf_enabled = true;
123
119 return 0; 124 return 0;
120 125
121err_reserve:
122 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
123err_map: 126err_map:
124 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 mlx4_en_unmap_buffer(&ring->wqres.buf);
125err_hwq_res: 128err_hwq_res:
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
140 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
141 144
145 if (ring->bf_enabled)
146 mlx4_bf_free(mdev->dev, &ring->bf);
142 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_remove(mdev->dev, &ring->qp);
143 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_free(mdev->dev, &ring->qp);
144 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
171 176
172 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
173 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
179 if (ring->bf_enabled)
180 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
174 181
175 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
176 &ring->qp, &ring->qp_state); 183 &ring->qp, &ring->qp_state);
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
591 return skb_tx_hash(dev, skb); 598 return skb_tx_hash(dev, skb);
592} 599}
593 600
601static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
602{
603 __iowrite64_copy(dst, src, bytecnt / 8);
604}
605
594netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 606netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
595{ 607{
596 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
609 int desc_size; 621 int desc_size;
610 int real_size; 622 int real_size;
611 dma_addr_t dma; 623 dma_addr_t dma;
612 u32 index; 624 u32 index, bf_index;
613 __be32 op_own; 625 __be32 op_own;
614 u16 vlan_tag = 0; 626 u16 vlan_tag = 0;
615 int i; 627 int i;
616 int lso_header_size; 628 int lso_header_size;
617 void *fragptr; 629 void *fragptr;
630 bool bounce = false;
618 631
619 if (!priv->port_up) 632 if (!priv->port_up)
620 goto tx_drop; 633 goto tx_drop;
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
657 670
658 /* Packet is good - grab an index and transmit it */ 671 /* Packet is good - grab an index and transmit it */
659 index = ring->prod & ring->size_mask; 672 index = ring->prod & ring->size_mask;
673 bf_index = ring->prod;
660 674
661 /* See if we have enough space for whole descriptor TXBB for setting 675 /* See if we have enough space for whole descriptor TXBB for setting
662 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 * SW ownership on next descriptor; if not, use a bounce buffer. */
663 if (likely(index + nr_txbb <= ring->size)) 677 if (likely(index + nr_txbb <= ring->size))
664 tx_desc = ring->buf + index * TXBB_SIZE; 678 tx_desc = ring->buf + index * TXBB_SIZE;
665 else 679 else {
666 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
681 bounce = true;
682 }
667 683
668 /* Save skb in tx_info ring */ 684 /* Save skb in tx_info ring */
669 tx_info = &ring->tx_info[index]; 685 tx_info = &ring->tx_info[index];
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
768 ring->prod += nr_txbb; 784 ring->prod += nr_txbb;
769 785
770 /* If we used a bounce buffer then copy descriptor back into place */ 786 /* If we used a bounce buffer then copy descriptor back into place */
771 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 787 if (bounce)
772 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
773 789
774 /* Run destructor before passing skb to HW */ 790 /* Run destructor before passing skb to HW */
775 if (likely(!skb_shared(skb))) 791 if (likely(!skb_shared(skb)))
776 skb_orphan(skb); 792 skb_orphan(skb);
777 793
778 /* Ensure new descirptor hits memory 794 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
779 * before setting ownership of this descriptor to HW */ 795 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
780 wmb(); 796 op_own |= htonl((bf_index & 0xffff) << 8);
781 tx_desc->ctrl.owner_opcode = op_own; 797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
782 801
783 /* Ring doorbell! */ 802 wmb();
784 wmb(); 803
785 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 804 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
805 desc_size);
806
807 wmb();
808
809 ring->bf.offset ^= ring->bf.buf_size;
810 } else {
811 /* Ensure new descirptor hits memory
812 * before setting ownership of this descriptor to HW */
813 wmb();
814 tx_desc->ctrl.owner_opcode = op_own;
815 wmb();
816 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
817 }
786 818
787 /* Poll CQ here */ 819 /* Poll CQ here */
788 mlx4_en_xmit_poll(priv, tx_ind); 820 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 5a2c5602366..edcb53562c1 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -248,6 +248,8 @@ struct mlx4_en_tx_ring {
248 unsigned long bytes; 248 unsigned long bytes;
249 unsigned long packets; 249 unsigned long packets;
250 spinlock_t comp_lock; 250 spinlock_t comp_lock;
251 struct mlx4_bf bf;
252 bool bf_enabled;
251}; 253};
252 254
253struct mlx4_en_rx_desc { 255struct mlx4_en_rx_desc {
@@ -518,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
518netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 520netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
519 521
520int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 522int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
521 u32 size, u16 stride); 523 int qpn, u32 size, u16 stride);
522void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 524void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
523int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 525int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
524 struct mlx4_en_tx_ring *ring, 526 struct mlx4_en_tx_ring *ring,