aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/en_tx.c')
-rw-r--r--drivers/net/mlx4/en_tx.c39
1 files changed, 19 insertions, 20 deletions
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 1c83a96fde35..95703f90c1b9 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 tmp = size * sizeof(struct mlx4_en_tx_info); 68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp); 69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) { 70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n"); 71 en_err(priv, "Failed allocating tx_info ring\n");
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 74 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp); 75 ring->tx_info, tmp);
76 76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) { 78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n"); 79 en_err(priv, "Failed allocating bounce buffer\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto err_tx; 81 goto err_tx;
82 } 82 }
@@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 86 2 * PAGE_SIZE);
87 if (err) { 87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n"); 88 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 89 goto err_bounce;
90 } 90 }
91 91
92 err = mlx4_en_map_buffer(&ring->wqres.buf); 92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) { 93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n"); 94 en_err(priv, "Failed to map TX buffer\n");
95 goto err_hwq_res; 95 goto err_hwq_res;
96 } 96 }
97 97
98 ring->buf = ring->wqres.buf.direct.buf; 98 ring->buf = ring->wqres.buf.direct.buf;
99 99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 100 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103 103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) { 105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); 106 en_err(priv, "Failed reserving qp for tx ring.\n");
107 goto err_map; 107 goto err_map;
108 } 108 }
109 109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) { 111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); 112 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve; 113 goto err_reserve;
114 } 114 }
115 ring->qp.event = mlx4_en_sqp_event; 115 ring->qp.event = mlx4_en_sqp_event;
@@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
135 struct mlx4_en_tx_ring *ring) 135 struct mlx4_en_tx_ring *ring)
136{ 136{
137 struct mlx4_en_dev *mdev = priv->mdev; 137 struct mlx4_en_dev *mdev = priv->mdev;
138 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 138 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
139 139
140 mlx4_qp_remove(mdev->dev, &ring->qp); 140 mlx4_qp_remove(mdev->dev, &ring->qp);
141 mlx4_qp_free(mdev->dev, &ring->qp); 141 mlx4_qp_free(mdev->dev, &ring->qp);
@@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
274 274
275 /* Skip last polled descriptor */ 275 /* Skip last polled descriptor */
276 ring->cons += ring->last_nr_txbb; 276 ring->cons += ring->last_nr_txbb;
277 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 277 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
278 ring->cons, ring->prod); 278 ring->cons, ring->prod);
279 279
280 if ((u32) (ring->prod - ring->cons) > ring->size) { 280 if ((u32) (ring->prod - ring->cons) > ring->size) {
281 if (netif_msg_tx_err(priv)) 281 if (netif_msg_tx_err(priv))
282 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); 282 en_warn(priv, "Tx consumer passed producer!\n");
283 return 0; 283 return 0;
284 } 284 }
285 285
@@ -292,7 +292,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
292 } 292 }
293 293
294 if (cnt) 294 if (cnt)
295 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 295 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
296 296
297 return cnt; 297 return cnt;
298} 298}
@@ -321,7 +321,7 @@ void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num
321 num = 0; 321 num = 0;
322 } 322 }
323 prio_map[prio] = ring; 323 prio_map[prio] = ring;
324 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); 324 en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
325 num++; 325 num++;
326 } 326 }
327} 327}
@@ -539,7 +539,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
539 int *lso_header_size) 539 int *lso_header_size)
540{ 540{
541 struct mlx4_en_priv *priv = netdev_priv(dev); 541 struct mlx4_en_priv *priv = netdev_priv(dev);
542 struct mlx4_en_dev *mdev = priv->mdev;
543 int real_size; 542 int real_size;
544 543
545 if (skb_is_gso(skb)) { 544 if (skb_is_gso(skb)) {
@@ -553,14 +552,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
553 real_size += DS_SIZE; 552 real_size += DS_SIZE;
554 else { 553 else {
555 if (netif_msg_tx_err(priv)) 554 if (netif_msg_tx_err(priv))
556 mlx4_warn(mdev, "Non-linear headers\n"); 555 en_warn(priv, "Non-linear headers\n");
557 dev_kfree_skb_any(skb); 556 dev_kfree_skb_any(skb);
558 return 0; 557 return 0;
559 } 558 }
560 } 559 }
561 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { 560 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
562 if (netif_msg_tx_err(priv)) 561 if (netif_msg_tx_err(priv))
563 mlx4_warn(mdev, "LSO header size too big\n"); 562 en_warn(priv, "LSO header size too big\n");
564 dev_kfree_skb_any(skb); 563 dev_kfree_skb_any(skb);
565 return 0; 564 return 0;
566 } 565 }
@@ -669,7 +668,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
669 nr_txbb = desc_size / TXBB_SIZE; 668 nr_txbb = desc_size / TXBB_SIZE;
670 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 669 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
671 if (netif_msg_tx_err(priv)) 670 if (netif_msg_tx_err(priv))
672 mlx4_warn(mdev, "Oversized header or SG list\n"); 671 en_warn(priv, "Oversized header or SG list\n");
673 dev_kfree_skb_any(skb); 672 dev_kfree_skb_any(skb);
674 return NETDEV_TX_OK; 673 return NETDEV_TX_OK;
675 } 674 }
@@ -695,7 +694,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* Now that we know what Tx ring to use */ 694 /* Now that we know what Tx ring to use */
696 if (unlikely(!priv->port_up)) { 695 if (unlikely(!priv->port_up)) {
697 if (netif_msg_tx_err(priv)) 696 if (netif_msg_tx_err(priv))
698 mlx4_warn(mdev, "xmit: port down!\n"); 697 en_warn(priv, "xmit: port down!\n");
699 dev_kfree_skb_any(skb); 698 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 699 return NETDEV_TX_OK;
701 } 700 }