aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEugenia Emantayev <eugenia@mellanox.com>2013-11-07 05:19:52 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-07 19:22:48 -0500
commit41d942d56cfd21058fba465804e14ba349541442 (patch)
treef377a2454a9a9e1ed117851ddf335354e6e3120a /drivers
parentf0f829bf42cdeb027234a1d0e1e5f62d77380a4d (diff)
net/mlx4_en: Datapath resources allocated dynamically
Currently all TX/RX rings and completion queues are part of the netdev priv structure and are allocated statically. This patch will change the priv to hold only arrays of pointers and therefore all TX/RX rings and completetion queues will be allocated dynamically. This is in preparation for NUMA aware allocations. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.com> Signed-off-by: Eugenia Emantayev <eugenia@mellanox.com> Reviewed-by: Hadar Hen Zion <hadarh@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h27
8 files changed, 178 insertions, 111 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 3e2d5047cdb3..d203f11b9edf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -44,12 +44,19 @@ static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
44 44
45 45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv, 46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq, 47 struct mlx4_en_cq **pcq,
48 int entries, int ring, enum cq_type mode) 48 int entries, int ring, enum cq_type mode)
49{ 49{
50 struct mlx4_en_dev *mdev = priv->mdev; 50 struct mlx4_en_dev *mdev = priv->mdev;
51 struct mlx4_en_cq *cq;
51 int err; 52 int err;
52 53
54 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
55 if (!cq) {
56 en_err(priv, "Failed to allocate CQ structure\n");
57 return -ENOMEM;
58 }
59
53 cq->size = entries; 60 cq->size = entries;
54 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; 61 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
55 62
@@ -60,14 +67,22 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
60 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, 67 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
61 cq->buf_size, 2 * PAGE_SIZE); 68 cq->buf_size, 2 * PAGE_SIZE);
62 if (err) 69 if (err)
63 return err; 70 goto err_cq;
64 71
65 err = mlx4_en_map_buffer(&cq->wqres.buf); 72 err = mlx4_en_map_buffer(&cq->wqres.buf);
66 if (err) 73 if (err)
67 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 74 goto err_res;
68 else 75
69 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf; 76 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
77 *pcq = cq;
70 78
79 return 0;
80
81err_res:
82 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
83err_cq:
84 kfree(cq);
85 *pcq = NULL;
71 return err; 86 return err;
72} 87}
73 88
@@ -117,12 +132,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
117 struct mlx4_en_cq *rx_cq; 132 struct mlx4_en_cq *rx_cq;
118 133
119 cq_idx = cq_idx % priv->rx_ring_num; 134 cq_idx = cq_idx % priv->rx_ring_num;
120 rx_cq = &priv->rx_cq[cq_idx]; 135 rx_cq = priv->rx_cq[cq_idx];
121 cq->vector = rx_cq->vector; 136 cq->vector = rx_cq->vector;
122 } 137 }
123 138
124 if (!cq->is_tx) 139 if (!cq->is_tx)
125 cq->size = priv->rx_ring[cq->ring].actual_size; 140 cq->size = priv->rx_ring[cq->ring]->actual_size;
126 141
127 if ((cq->is_tx && priv->hwtstamp_config.tx_type) || 142 if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
128 (!cq->is_tx && priv->hwtstamp_config.rx_filter)) 143 (!cq->is_tx && priv->hwtstamp_config.rx_filter))
@@ -146,9 +161,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
146 return 0; 161 return 0;
147} 162}
148 163
149void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 164void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
150{ 165{
151 struct mlx4_en_dev *mdev = priv->mdev; 166 struct mlx4_en_dev *mdev = priv->mdev;
167 struct mlx4_en_cq *cq = *pcq;
152 168
153 mlx4_en_unmap_buffer(&cq->wqres.buf); 169 mlx4_en_unmap_buffer(&cq->wqres.buf);
154 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 170 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
@@ -157,6 +173,8 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
157 cq->vector = 0; 173 cq->vector = 0;
158 cq->buf_size = 0; 174 cq->buf_size = 0;
159 cq->buf = NULL; 175 cq->buf = NULL;
176 kfree(cq);
177 *pcq = NULL;
160} 178}
161 179
162void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 180void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 0c750985f47e..0596f9f85a0e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -51,10 +51,10 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
51 int err = 0; 51 int err = 0;
52 52
53 for (i = 0; i < priv->tx_ring_num; i++) { 53 for (i = 0; i < priv->tx_ring_num; i++) {
54 priv->tx_cq[i].moder_cnt = priv->tx_frames; 54 priv->tx_cq[i]->moder_cnt = priv->tx_frames;
55 priv->tx_cq[i].moder_time = priv->tx_usecs; 55 priv->tx_cq[i]->moder_time = priv->tx_usecs;
56 if (priv->port_up) { 56 if (priv->port_up) {
57 err = mlx4_en_set_cq_moder(priv, &priv->tx_cq[i]); 57 err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
58 if (err) 58 if (err)
59 return err; 59 return err;
60 } 60 }
@@ -64,11 +64,11 @@ static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
64 return 0; 64 return 0;
65 65
66 for (i = 0; i < priv->rx_ring_num; i++) { 66 for (i = 0; i < priv->rx_ring_num; i++) {
67 priv->rx_cq[i].moder_cnt = priv->rx_frames; 67 priv->rx_cq[i]->moder_cnt = priv->rx_frames;
68 priv->rx_cq[i].moder_time = priv->rx_usecs; 68 priv->rx_cq[i]->moder_time = priv->rx_usecs;
69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 69 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
70 if (priv->port_up) { 70 if (priv->port_up) {
71 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]); 71 err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
72 if (err) 72 if (err)
73 return err; 73 return err;
74 } 74 }
@@ -274,16 +274,16 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
274 } 274 }
275 } 275 }
276 for (i = 0; i < priv->tx_ring_num; i++) { 276 for (i = 0; i < priv->tx_ring_num; i++) {
277 data[index++] = priv->tx_ring[i].packets; 277 data[index++] = priv->tx_ring[i]->packets;
278 data[index++] = priv->tx_ring[i].bytes; 278 data[index++] = priv->tx_ring[i]->bytes;
279 } 279 }
280 for (i = 0; i < priv->rx_ring_num; i++) { 280 for (i = 0; i < priv->rx_ring_num; i++) {
281 data[index++] = priv->rx_ring[i].packets; 281 data[index++] = priv->rx_ring[i]->packets;
282 data[index++] = priv->rx_ring[i].bytes; 282 data[index++] = priv->rx_ring[i]->bytes;
283#ifdef CONFIG_NET_RX_BUSY_POLL 283#ifdef CONFIG_NET_RX_BUSY_POLL
284 data[index++] = priv->rx_ring[i].yields; 284 data[index++] = priv->rx_ring[i]->yields;
285 data[index++] = priv->rx_ring[i].misses; 285 data[index++] = priv->rx_ring[i]->misses;
286 data[index++] = priv->rx_ring[i].cleaned; 286 data[index++] = priv->rx_ring[i]->cleaned;
287#endif 287#endif
288 } 288 }
289 spin_unlock_bh(&priv->stats_lock); 289 spin_unlock_bh(&priv->stats_lock);
@@ -510,9 +510,9 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 510 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 511 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
512 512
513 if (rx_size == (priv->port_up ? priv->rx_ring[0].actual_size : 513 if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
514 priv->rx_ring[0].size) && 514 priv->rx_ring[0]->size) &&
515 tx_size == priv->tx_ring[0].size) 515 tx_size == priv->tx_ring[0]->size)
516 return 0; 516 return 0;
517 517
518 mutex_lock(&mdev->state_lock); 518 mutex_lock(&mdev->state_lock);
@@ -553,8 +553,8 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE; 553 param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE; 554 param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
555 param->rx_pending = priv->port_up ? 555 param->rx_pending = priv->port_up ?
556 priv->rx_ring[0].actual_size : priv->rx_ring[0].size; 556 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
557 param->tx_pending = priv->tx_ring[0].size; 557 param->tx_pending = priv->tx_ring[0]->size;
558} 558}
559 559
560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) 560static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index cd61e26f434d..f430788cc4fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -75,7 +75,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 75 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
76 struct net_device *dev = cq->dev; 76 struct net_device *dev = cq->dev;
77 struct mlx4_en_priv *priv = netdev_priv(dev); 77 struct mlx4_en_priv *priv = netdev_priv(dev);
78 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 78 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
79 int done; 79 int done;
80 80
81 if (!priv->port_up) 81 if (!priv->port_up)
@@ -355,8 +355,7 @@ err:
355 return ret; 355 return ret;
356} 356}
357 357
358void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 358void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv)
359 struct mlx4_en_rx_ring *rx_ring)
360{ 359{
361 struct mlx4_en_filter *filter, *tmp; 360 struct mlx4_en_filter *filter, *tmp;
362 LIST_HEAD(del_list); 361 LIST_HEAD(del_list);
@@ -1242,7 +1241,7 @@ static void mlx4_en_netpoll(struct net_device *dev)
1242 int i; 1241 int i;
1243 1242
1244 for (i = 0; i < priv->rx_ring_num; i++) { 1243 for (i = 0; i < priv->rx_ring_num; i++) {
1245 cq = &priv->rx_cq[i]; 1244 cq = priv->rx_cq[i];
1246 spin_lock_irqsave(&cq->lock, flags); 1245 spin_lock_irqsave(&cq->lock, flags);
1247 napi_synchronize(&cq->napi); 1246 napi_synchronize(&cq->napi);
1248 mlx4_en_process_rx_cq(dev, cq, 0); 1247 mlx4_en_process_rx_cq(dev, cq, 0);
@@ -1264,8 +1263,8 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
1264 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i))) 1263 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
1265 continue; 1264 continue;
1266 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n", 1265 en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1267 i, priv->tx_ring[i].qpn, priv->tx_ring[i].cqn, 1266 i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
1268 priv->tx_ring[i].cons, priv->tx_ring[i].prod); 1267 priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
1269 } 1268 }
1270 1269
1271 priv->port_stats.tx_timeout++; 1270 priv->port_stats.tx_timeout++;
@@ -1305,7 +1304,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1305 1304
1306 /* Setup cq moderation params */ 1305 /* Setup cq moderation params */
1307 for (i = 0; i < priv->rx_ring_num; i++) { 1306 for (i = 0; i < priv->rx_ring_num; i++) {
1308 cq = &priv->rx_cq[i]; 1307 cq = priv->rx_cq[i];
1309 cq->moder_cnt = priv->rx_frames; 1308 cq->moder_cnt = priv->rx_frames;
1310 cq->moder_time = priv->rx_usecs; 1309 cq->moder_time = priv->rx_usecs;
1311 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1310 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
@@ -1314,7 +1313,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
1314 } 1313 }
1315 1314
1316 for (i = 0; i < priv->tx_ring_num; i++) { 1315 for (i = 0; i < priv->tx_ring_num; i++) {
1317 cq = &priv->tx_cq[i]; 1316 cq = priv->tx_cq[i];
1318 cq->moder_cnt = priv->tx_frames; 1317 cq->moder_cnt = priv->tx_frames;
1319 cq->moder_time = priv->tx_usecs; 1318 cq->moder_time = priv->tx_usecs;
1320 } 1319 }
@@ -1348,8 +1347,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1348 1347
1349 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1348 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1350 spin_lock_bh(&priv->stats_lock); 1349 spin_lock_bh(&priv->stats_lock);
1351 rx_packets = priv->rx_ring[ring].packets; 1350 rx_packets = priv->rx_ring[ring]->packets;
1352 rx_bytes = priv->rx_ring[ring].bytes; 1351 rx_bytes = priv->rx_ring[ring]->bytes;
1353 spin_unlock_bh(&priv->stats_lock); 1352 spin_unlock_bh(&priv->stats_lock);
1354 1353
1355 rx_pkt_diff = ((unsigned long) (rx_packets - 1354 rx_pkt_diff = ((unsigned long) (rx_packets -
@@ -1378,7 +1377,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
1378 1377
1379 if (moder_time != priv->last_moder_time[ring]) { 1378 if (moder_time != priv->last_moder_time[ring]) {
1380 priv->last_moder_time[ring] = moder_time; 1379 priv->last_moder_time[ring] = moder_time;
1381 cq = &priv->rx_cq[ring]; 1380 cq = priv->rx_cq[ring];
1382 cq->moder_time = moder_time; 1381 cq->moder_time = moder_time;
1383 cq->moder_cnt = priv->rx_frames; 1382 cq->moder_cnt = priv->rx_frames;
1384 err = mlx4_en_set_cq_moder(priv, cq); 1383 err = mlx4_en_set_cq_moder(priv, cq);
@@ -1501,7 +1500,7 @@ int mlx4_en_start_port(struct net_device *dev)
1501 return err; 1500 return err;
1502 } 1501 }
1503 for (i = 0; i < priv->rx_ring_num; i++) { 1502 for (i = 0; i < priv->rx_ring_num; i++) {
1504 cq = &priv->rx_cq[i]; 1503 cq = priv->rx_cq[i];
1505 1504
1506 mlx4_en_cq_init_lock(cq); 1505 mlx4_en_cq_init_lock(cq);
1507 1506
@@ -1519,7 +1518,7 @@ int mlx4_en_start_port(struct net_device *dev)
1519 goto cq_err; 1518 goto cq_err;
1520 } 1519 }
1521 mlx4_en_arm_cq(priv, cq); 1520 mlx4_en_arm_cq(priv, cq);
1522 priv->rx_ring[i].cqn = cq->mcq.cqn; 1521 priv->rx_ring[i]->cqn = cq->mcq.cqn;
1523 ++rx_index; 1522 ++rx_index;
1524 } 1523 }
1525 1524
@@ -1545,7 +1544,7 @@ int mlx4_en_start_port(struct net_device *dev)
1545 /* Configure tx cq's and rings */ 1544 /* Configure tx cq's and rings */
1546 for (i = 0; i < priv->tx_ring_num; i++) { 1545 for (i = 0; i < priv->tx_ring_num; i++) {
1547 /* Configure cq */ 1546 /* Configure cq */
1548 cq = &priv->tx_cq[i]; 1547 cq = priv->tx_cq[i];
1549 err = mlx4_en_activate_cq(priv, cq, i); 1548 err = mlx4_en_activate_cq(priv, cq, i);
1550 if (err) { 1549 if (err) {
1551 en_err(priv, "Failed allocating Tx CQ\n"); 1550 en_err(priv, "Failed allocating Tx CQ\n");
@@ -1561,7 +1560,7 @@ int mlx4_en_start_port(struct net_device *dev)
1561 cq->buf->wqe_index = cpu_to_be16(0xffff); 1560 cq->buf->wqe_index = cpu_to_be16(0xffff);
1562 1561
1563 /* Configure ring */ 1562 /* Configure ring */
1564 tx_ring = &priv->tx_ring[i]; 1563 tx_ring = priv->tx_ring[i];
1565 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1564 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
1566 i / priv->num_tx_rings_p_up); 1565 i / priv->num_tx_rings_p_up);
1567 if (err) { 1566 if (err) {
@@ -1631,8 +1630,8 @@ int mlx4_en_start_port(struct net_device *dev)
1631 1630
1632tx_err: 1631tx_err:
1633 while (tx_index--) { 1632 while (tx_index--) {
1634 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 1633 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
1635 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); 1634 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
1636 } 1635 }
1637 mlx4_en_destroy_drop_qp(priv); 1636 mlx4_en_destroy_drop_qp(priv);
1638rss_err: 1637rss_err:
@@ -1641,9 +1640,9 @@ mac_err:
1641 mlx4_en_put_qp(priv); 1640 mlx4_en_put_qp(priv);
1642cq_err: 1641cq_err:
1643 while (rx_index--) 1642 while (rx_index--)
1644 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 1643 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
1645 for (i = 0; i < priv->rx_ring_num; i++) 1644 for (i = 0; i < priv->rx_ring_num; i++)
1646 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1645 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1647 1646
1648 return err; /* need to close devices */ 1647 return err; /* need to close devices */
1649} 1648}
@@ -1739,13 +1738,13 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1739 1738
1740 /* Free TX Rings */ 1739 /* Free TX Rings */
1741 for (i = 0; i < priv->tx_ring_num; i++) { 1740 for (i = 0; i < priv->tx_ring_num; i++) {
1742 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); 1741 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
1743 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); 1742 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
1744 } 1743 }
1745 msleep(10); 1744 msleep(10);
1746 1745
1747 for (i = 0; i < priv->tx_ring_num; i++) 1746 for (i = 0; i < priv->tx_ring_num; i++)
1748 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); 1747 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
1749 1748
1750 /* Free RSS qps */ 1749 /* Free RSS qps */
1751 mlx4_en_release_rss_steer(priv); 1750 mlx4_en_release_rss_steer(priv);
@@ -1757,7 +1756,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1757 1756
1758 /* Free RX Rings */ 1757 /* Free RX Rings */
1759 for (i = 0; i < priv->rx_ring_num; i++) { 1758 for (i = 0; i < priv->rx_ring_num; i++) {
1760 struct mlx4_en_cq *cq = &priv->rx_cq[i]; 1759 struct mlx4_en_cq *cq = priv->rx_cq[i];
1761 1760
1762 local_bh_disable(); 1761 local_bh_disable();
1763 while (!mlx4_en_cq_lock_napi(cq)) { 1762 while (!mlx4_en_cq_lock_napi(cq)) {
@@ -1768,7 +1767,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
1768 1767
1769 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state)) 1768 while (test_bit(NAPI_STATE_SCHED, &cq->napi.state))
1770 msleep(1); 1769 msleep(1);
1771 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); 1770 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
1772 mlx4_en_deactivate_cq(priv, cq); 1771 mlx4_en_deactivate_cq(priv, cq);
1773 } 1772 }
1774} 1773}
@@ -1806,15 +1805,15 @@ static void mlx4_en_clear_stats(struct net_device *dev)
1806 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1805 memset(&priv->port_stats, 0, sizeof(priv->port_stats));
1807 1806
1808 for (i = 0; i < priv->tx_ring_num; i++) { 1807 for (i = 0; i < priv->tx_ring_num; i++) {
1809 priv->tx_ring[i].bytes = 0; 1808 priv->tx_ring[i]->bytes = 0;
1810 priv->tx_ring[i].packets = 0; 1809 priv->tx_ring[i]->packets = 0;
1811 priv->tx_ring[i].tx_csum = 0; 1810 priv->tx_ring[i]->tx_csum = 0;
1812 } 1811 }
1813 for (i = 0; i < priv->rx_ring_num; i++) { 1812 for (i = 0; i < priv->rx_ring_num; i++) {
1814 priv->rx_ring[i].bytes = 0; 1813 priv->rx_ring[i]->bytes = 0;
1815 priv->rx_ring[i].packets = 0; 1814 priv->rx_ring[i]->packets = 0;
1816 priv->rx_ring[i].csum_ok = 0; 1815 priv->rx_ring[i]->csum_ok = 0;
1817 priv->rx_ring[i].csum_none = 0; 1816 priv->rx_ring[i]->csum_none = 0;
1818 } 1817 }
1819} 1818}
1820 1819
@@ -1871,17 +1870,17 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
1871#endif 1870#endif
1872 1871
1873 for (i = 0; i < priv->tx_ring_num; i++) { 1872 for (i = 0; i < priv->tx_ring_num; i++) {
1874 if (priv->tx_ring[i].tx_info) 1873 if (priv->tx_ring && priv->tx_ring[i])
1875 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1874 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1876 if (priv->tx_cq[i].buf) 1875 if (priv->tx_cq && priv->tx_cq[i])
1877 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1876 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1878 } 1877 }
1879 1878
1880 for (i = 0; i < priv->rx_ring_num; i++) { 1879 for (i = 0; i < priv->rx_ring_num; i++) {
1881 if (priv->rx_ring[i].rx_info) 1880 if (priv->rx_ring[i])
1882 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1881 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1883 priv->prof->rx_ring_size, priv->stride); 1882 priv->prof->rx_ring_size, priv->stride);
1884 if (priv->rx_cq[i].buf) 1883 if (priv->rx_cq[i])
1885 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1884 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1886 } 1885 }
1887 1886
@@ -1937,6 +1936,20 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1937 1936
1938err: 1937err:
1939 en_err(priv, "Failed to allocate NIC resources\n"); 1938 en_err(priv, "Failed to allocate NIC resources\n");
1939 for (i = 0; i < priv->rx_ring_num; i++) {
1940 if (priv->rx_ring[i])
1941 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
1942 prof->rx_ring_size,
1943 priv->stride);
1944 if (priv->rx_cq[i])
1945 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
1946 }
1947 for (i = 0; i < priv->tx_ring_num; i++) {
1948 if (priv->tx_ring[i])
1949 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
1950 if (priv->tx_cq[i])
1951 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
1952 }
1940 return -ENOMEM; 1953 return -ENOMEM;
1941} 1954}
1942 1955
@@ -2230,13 +2243,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2230 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2243 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2231 priv->tx_ring_num = prof->tx_ring_num; 2244 priv->tx_ring_num = prof->tx_ring_num;
2232 2245
2233 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS, 2246 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2234 GFP_KERNEL); 2247 GFP_KERNEL);
2235 if (!priv->tx_ring) { 2248 if (!priv->tx_ring) {
2236 err = -ENOMEM; 2249 err = -ENOMEM;
2237 goto out; 2250 goto out;
2238 } 2251 }
2239 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS, 2252 priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
2240 GFP_KERNEL); 2253 GFP_KERNEL);
2241 if (!priv->tx_cq) { 2254 if (!priv->tx_cq) {
2242 err = -ENOMEM; 2255 err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 5f8535e408a3..dae1a1f4ae55 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -140,18 +140,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
140 priv->port_stats.rx_chksum_good = 0; 140 priv->port_stats.rx_chksum_good = 0;
141 priv->port_stats.rx_chksum_none = 0; 141 priv->port_stats.rx_chksum_none = 0;
142 for (i = 0; i < priv->rx_ring_num; i++) { 142 for (i = 0; i < priv->rx_ring_num; i++) {
143 stats->rx_packets += priv->rx_ring[i].packets; 143 stats->rx_packets += priv->rx_ring[i]->packets;
144 stats->rx_bytes += priv->rx_ring[i].bytes; 144 stats->rx_bytes += priv->rx_ring[i]->bytes;
145 priv->port_stats.rx_chksum_good += priv->rx_ring[i].csum_ok; 145 priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok;
146 priv->port_stats.rx_chksum_none += priv->rx_ring[i].csum_none; 146 priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none;
147 } 147 }
148 stats->tx_packets = 0; 148 stats->tx_packets = 0;
149 stats->tx_bytes = 0; 149 stats->tx_bytes = 0;
150 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
151 for (i = 0; i < priv->tx_ring_num; i++) { 151 for (i = 0; i < priv->tx_ring_num; i++) {
152 stats->tx_packets += priv->tx_ring[i].packets; 152 stats->tx_packets += priv->tx_ring[i]->packets;
153 stats->tx_bytes += priv->tx_ring[i].bytes; 153 stats->tx_bytes += priv->tx_ring[i]->bytes;
154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i].tx_csum; 154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
155 } 155 }
156 156
157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index afe2efa69c86..1c45f88776c5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -264,7 +264,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
264 264
265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { 265 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 266 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
267 ring = &priv->rx_ring[ring_ind]; 267 ring = priv->rx_ring[ring_ind];
268 268
269 if (mlx4_en_prepare_rx_desc(priv, ring, 269 if (mlx4_en_prepare_rx_desc(priv, ring,
270 ring->actual_size, 270 ring->actual_size,
@@ -289,7 +289,7 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
289 289
290reduce_rings: 290reduce_rings:
291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 291 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
292 ring = &priv->rx_ring[ring_ind]; 292 ring = priv->rx_ring[ring_ind];
293 while (ring->actual_size > new_size) { 293 while (ring->actual_size > new_size) {
294 ring->actual_size--; 294 ring->actual_size--;
295 ring->prod--; 295 ring->prod--;
@@ -319,12 +319,20 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
319} 319}
320 320
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 322 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride)
323{ 324{
324 struct mlx4_en_dev *mdev = priv->mdev; 325 struct mlx4_en_dev *mdev = priv->mdev;
326 struct mlx4_en_rx_ring *ring;
325 int err = -ENOMEM; 327 int err = -ENOMEM;
326 int tmp; 328 int tmp;
327 329
330 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
331 if (!ring) {
332 en_err(priv, "Failed to allocate RX ring structure\n");
333 return -ENOMEM;
334 }
335
328 ring->prod = 0; 336 ring->prod = 0;
329 ring->cons = 0; 337 ring->cons = 0;
330 ring->size = size; 338 ring->size = size;
@@ -336,8 +344,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
336 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * 344 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
337 sizeof(struct mlx4_en_rx_alloc)); 345 sizeof(struct mlx4_en_rx_alloc));
338 ring->rx_info = vmalloc(tmp); 346 ring->rx_info = vmalloc(tmp);
339 if (!ring->rx_info) 347 if (!ring->rx_info) {
340 return -ENOMEM; 348 err = -ENOMEM;
349 goto err_ring;
350 }
341 351
342 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 352 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
343 ring->rx_info, tmp); 353 ring->rx_info, tmp);
@@ -345,7 +355,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
345 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 355 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
346 ring->buf_size, 2 * PAGE_SIZE); 356 ring->buf_size, 2 * PAGE_SIZE);
347 if (err) 357 if (err)
348 goto err_ring; 358 goto err_info;
349 359
350 err = mlx4_en_map_buffer(&ring->wqres.buf); 360 err = mlx4_en_map_buffer(&ring->wqres.buf);
351 if (err) { 361 if (err) {
@@ -356,13 +366,18 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
356 366
357 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; 367 ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
358 368
369 *pring = ring;
359 return 0; 370 return 0;
360 371
361err_hwq: 372err_hwq:
362 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 373 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
363err_ring: 374err_info:
364 vfree(ring->rx_info); 375 vfree(ring->rx_info);
365 ring->rx_info = NULL; 376 ring->rx_info = NULL;
377err_ring:
378 kfree(ring);
379 *pring = NULL;
380
366 return err; 381 return err;
367} 382}
368 383
@@ -376,12 +391,12 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
376 DS_SIZE * priv->num_frags); 391 DS_SIZE * priv->num_frags);
377 392
378 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 393 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
379 ring = &priv->rx_ring[ring_ind]; 394 ring = priv->rx_ring[ring_ind];
380 395
381 ring->prod = 0; 396 ring->prod = 0;
382 ring->cons = 0; 397 ring->cons = 0;
383 ring->actual_size = 0; 398 ring->actual_size = 0;
384 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn; 399 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
385 400
386 ring->stride = stride; 401 ring->stride = stride;
387 if (ring->stride <= TXBB_SIZE) 402 if (ring->stride <= TXBB_SIZE)
@@ -412,7 +427,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
412 goto err_buffers; 427 goto err_buffers;
413 428
414 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { 429 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
415 ring = &priv->rx_ring[ring_ind]; 430 ring = priv->rx_ring[ring_ind];
416 431
417 ring->size_mask = ring->actual_size - 1; 432 ring->size_mask = ring->actual_size - 1;
418 mlx4_en_update_rx_prod_db(ring); 433 mlx4_en_update_rx_prod_db(ring);
@@ -422,30 +437,34 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
422 437
423err_buffers: 438err_buffers:
424 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) 439 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
425 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]); 440 mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
426 441
427 ring_ind = priv->rx_ring_num - 1; 442 ring_ind = priv->rx_ring_num - 1;
428err_allocator: 443err_allocator:
429 while (ring_ind >= 0) { 444 while (ring_ind >= 0) {
430 if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE) 445 if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
431 priv->rx_ring[ring_ind].buf -= TXBB_SIZE; 446 priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
432 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]); 447 mlx4_en_destroy_allocator(priv, priv->rx_ring[ring_ind]);
433 ring_ind--; 448 ring_ind--;
434 } 449 }
435 return err; 450 return err;
436} 451}
437 452
438void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 453void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
439 struct mlx4_en_rx_ring *ring, u32 size, u16 stride) 454 struct mlx4_en_rx_ring **pring,
455 u32 size, u16 stride)
440{ 456{
441 struct mlx4_en_dev *mdev = priv->mdev; 457 struct mlx4_en_dev *mdev = priv->mdev;
458 struct mlx4_en_rx_ring *ring = *pring;
442 459
443 mlx4_en_unmap_buffer(&ring->wqres.buf); 460 mlx4_en_unmap_buffer(&ring->wqres.buf);
444 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); 461 mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
445 vfree(ring->rx_info); 462 vfree(ring->rx_info);
446 ring->rx_info = NULL; 463 ring->rx_info = NULL;
464 kfree(ring);
465 *pring = NULL;
447#ifdef CONFIG_RFS_ACCEL 466#ifdef CONFIG_RFS_ACCEL
448 mlx4_en_cleanup_filters(priv, ring); 467 mlx4_en_cleanup_filters(priv);
449#endif 468#endif
450} 469}
451 470
@@ -592,7 +611,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
592 struct mlx4_en_priv *priv = netdev_priv(dev); 611 struct mlx4_en_priv *priv = netdev_priv(dev);
593 struct mlx4_en_dev *mdev = priv->mdev; 612 struct mlx4_en_dev *mdev = priv->mdev;
594 struct mlx4_cqe *cqe; 613 struct mlx4_cqe *cqe;
595 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 614 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
596 struct mlx4_en_rx_alloc *frags; 615 struct mlx4_en_rx_alloc *frags;
597 struct mlx4_en_rx_desc *rx_desc; 616 struct mlx4_en_rx_desc *rx_desc;
598 struct sk_buff *skb; 617 struct sk_buff *skb;
@@ -991,7 +1010,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
991 1010
992 for (i = 0; i < priv->rx_ring_num; i++) { 1011 for (i = 0; i < priv->rx_ring_num; i++) {
993 qpn = rss_map->base_qpn + i; 1012 qpn = rss_map->base_qpn + i;
994 err = mlx4_en_config_rss_qp(priv, qpn, &priv->rx_ring[i], 1013 err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
995 &rss_map->state[i], 1014 &rss_map->state[i],
996 &rss_map->qps[i]); 1015 &rss_map->qps[i]);
997 if (err) 1016 if (err)
@@ -1008,7 +1027,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1008 } 1027 }
1009 rss_map->indir_qp.event = mlx4_en_sqp_event; 1028 rss_map->indir_qp.event = mlx4_en_sqp_event;
1010 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 1029 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1011 priv->rx_ring[0].cqn, -1, &context); 1030 priv->rx_ring[0]->cqn, -1, &context);
1012 1031
1013 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 1032 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1014 rss_rings = priv->rx_ring_num; 1033 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2448f0d669e6..40626690e8a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -156,7 +156,7 @@ retry_tx:
156 * since we turned the carrier off */ 156 * since we turned the carrier off */
157 msleep(200); 157 msleep(200);
158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) { 158 for (i = 0; i < priv->tx_ring_num && carrier_ok; i++) {
159 tx_ring = &priv->tx_ring[i]; 159 tx_ring = priv->tx_ring[i];
160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb)) 160 if (tx_ring->prod != (tx_ring->cons + tx_ring->last_nr_txbb))
161 goto retry_tx; 161 goto retry_tx;
162 } 162 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 0698c82d6ff1..d4e4cf30a720 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -54,13 +54,20 @@ module_param_named(inline_thold, inline_thold, int, 0444);
54MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 54MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
55 55
56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 56int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
57 struct mlx4_en_tx_ring *ring, int qpn, u32 size, 57 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
58 u16 stride) 58 u16 stride)
59{ 59{
60 struct mlx4_en_dev *mdev = priv->mdev; 60 struct mlx4_en_dev *mdev = priv->mdev;
61 struct mlx4_en_tx_ring *ring;
61 int tmp; 62 int tmp;
62 int err; 63 int err;
63 64
65 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
66 if (!ring) {
67 en_err(priv, "Failed allocating TX ring\n");
68 return -ENOMEM;
69 }
70
64 ring->size = size; 71 ring->size = size;
65 ring->size_mask = size - 1; 72 ring->size_mask = size - 1;
66 ring->stride = stride; 73 ring->stride = stride;
@@ -69,8 +76,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
69 76
70 tmp = size * sizeof(struct mlx4_en_tx_info); 77 tmp = size * sizeof(struct mlx4_en_tx_info);
71 ring->tx_info = vmalloc(tmp); 78 ring->tx_info = vmalloc(tmp);
72 if (!ring->tx_info) 79 if (!ring->tx_info) {
73 return -ENOMEM; 80 err = -ENOMEM;
81 goto err_ring;
82 }
74 83
75 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 84 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
76 ring->tx_info, tmp); 85 ring->tx_info, tmp);
@@ -78,7 +87,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
78 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 87 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
79 if (!ring->bounce_buf) { 88 if (!ring->bounce_buf) {
80 err = -ENOMEM; 89 err = -ENOMEM;
81 goto err_tx; 90 goto err_info;
82 } 91 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 92 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84 93
@@ -120,6 +129,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
120 129
121 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; 130 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
122 131
132 *pring = ring;
123 return 0; 133 return 0;
124 134
125err_map: 135err_map:
@@ -129,16 +139,20 @@ err_hwq_res:
129err_bounce: 139err_bounce:
130 kfree(ring->bounce_buf); 140 kfree(ring->bounce_buf);
131 ring->bounce_buf = NULL; 141 ring->bounce_buf = NULL;
132err_tx: 142err_info:
133 vfree(ring->tx_info); 143 vfree(ring->tx_info);
134 ring->tx_info = NULL; 144 ring->tx_info = NULL;
145err_ring:
146 kfree(ring);
147 *pring = NULL;
135 return err; 148 return err;
136} 149}
137 150
138void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 151void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_tx_ring *ring) 152 struct mlx4_en_tx_ring **pring)
140{ 153{
141 struct mlx4_en_dev *mdev = priv->mdev; 154 struct mlx4_en_dev *mdev = priv->mdev;
155 struct mlx4_en_tx_ring *ring = *pring;
142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 156 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
143 157
144 if (ring->bf_enabled) 158 if (ring->bf_enabled)
@@ -151,6 +165,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
151 ring->bounce_buf = NULL; 165 ring->bounce_buf = NULL;
152 vfree(ring->tx_info); 166 vfree(ring->tx_info);
153 ring->tx_info = NULL; 167 ring->tx_info = NULL;
168 kfree(ring);
169 *pring = NULL;
154} 170}
155 171
156int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 172int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
@@ -330,7 +346,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
330{ 346{
331 struct mlx4_en_priv *priv = netdev_priv(dev); 347 struct mlx4_en_priv *priv = netdev_priv(dev);
332 struct mlx4_cq *mcq = &cq->mcq; 348 struct mlx4_cq *mcq = &cq->mcq;
333 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 349 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring];
334 struct mlx4_cqe *cqe; 350 struct mlx4_cqe *cqe;
335 u16 index; 351 u16 index;
336 u16 new_index, ring_index, stamp_index; 352 u16 new_index, ring_index, stamp_index;
@@ -622,7 +638,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
622 } 638 }
623 639
624 tx_ind = skb->queue_mapping; 640 tx_ind = skb->queue_mapping;
625 ring = &priv->tx_ring[tx_ind]; 641 ring = priv->tx_ring[tx_ind];
626 if (vlan_tx_tag_present(skb)) 642 if (vlan_tx_tag_present(skb))
627 vlan_tag = vlan_tx_tag_get(skb); 643 vlan_tag = vlan_tx_tag_get(skb);
628 644
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index bf06e3610d27..b2547ae07dfa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -530,10 +530,10 @@ struct mlx4_en_priv {
530 u16 num_frags; 530 u16 num_frags;
531 u16 log_rx_info; 531 u16 log_rx_info;
532 532
533 struct mlx4_en_tx_ring *tx_ring; 533 struct mlx4_en_tx_ring **tx_ring;
534 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 534 struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
535 struct mlx4_en_cq *tx_cq; 535 struct mlx4_en_cq **tx_cq;
536 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 536 struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
537 struct mlx4_qp drop_qp; 537 struct mlx4_qp drop_qp;
538 struct work_struct rx_mode_task; 538 struct work_struct rx_mode_task;
539 struct work_struct watchdog_task; 539 struct work_struct watchdog_task;
@@ -626,7 +626,7 @@ static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
626 if ((cq->state & MLX4_CQ_LOCKED)) { 626 if ((cq->state & MLX4_CQ_LOCKED)) {
627 struct net_device *dev = cq->dev; 627 struct net_device *dev = cq->dev;
628 struct mlx4_en_priv *priv = netdev_priv(dev); 628 struct mlx4_en_priv *priv = netdev_priv(dev);
629 struct mlx4_en_rx_ring *rx_ring = &priv->rx_ring[cq->ring]; 629 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
630 630
631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD; 631 cq->state |= MLX4_EN_CQ_STATE_POLL_YIELD;
632 rc = false; 632 rc = false;
@@ -704,9 +704,9 @@ void mlx4_en_stop_port(struct net_device *dev, int detach);
704void mlx4_en_free_resources(struct mlx4_en_priv *priv); 704void mlx4_en_free_resources(struct mlx4_en_priv *priv);
705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 705int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
706 706
707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 707int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
708 int entries, int ring, enum cq_type mode); 708 int entries, int ring, enum cq_type mode);
709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 709void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 710int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
711 int cq_idx); 711 int cq_idx);
712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 712void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -717,9 +717,11 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 717u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 718netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
719 719
720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 720int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
721 struct mlx4_en_tx_ring **pring,
721 int qpn, u32 size, u16 stride); 722 int qpn, u32 size, u16 stride);
722void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 723void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring **pring);
723int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 725int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
724 struct mlx4_en_tx_ring *ring, 726 struct mlx4_en_tx_ring *ring,
725 int cq, int user_prio); 727 int cq, int user_prio);
@@ -727,10 +729,10 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
727 struct mlx4_en_tx_ring *ring); 729 struct mlx4_en_tx_ring *ring);
728 730
729int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 731int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
730 struct mlx4_en_rx_ring *ring, 732 struct mlx4_en_rx_ring **pring,
731 u32 size, u16 stride); 733 u32 size, u16 stride);
732void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, 734void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
733 struct mlx4_en_rx_ring *ring, 735 struct mlx4_en_rx_ring **pring,
734 u32 size, u16 stride); 736 u32 size, u16 stride);
735int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv); 737int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
736void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, 738void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -768,8 +770,7 @@ extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
768int mlx4_en_setup_tc(struct net_device *dev, u8 up); 770int mlx4_en_setup_tc(struct net_device *dev, u8 up);
769 771
770#ifdef CONFIG_RFS_ACCEL 772#ifdef CONFIG_RFS_ACCEL
771void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 773void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
772 struct mlx4_en_rx_ring *rx_ring);
773#endif 774#endif
774 775
775#define MLX4_EN_NUM_SELF_TEST 5 776#define MLX4_EN_NUM_SELF_TEST 5