aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/mlx4/Makefile5
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c254
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c480
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
12 files changed, 5370 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4a11296a9514..69e2c10cb9f4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2465,6 +2465,15 @@ config PASEMI_MAC
2465 This driver supports the on-chip 1/10Gbit Ethernet controller on 2465 This driver supports the on-chip 1/10Gbit Ethernet controller on
2466 PA Semi's PWRficient line of chips. 2466 PA Semi's PWRficient line of chips.
2467 2467
2468config MLX4_EN
2469 tristate "Mellanox Technologies 10Gbit Ethernet support"
2470 depends on PCI && INET
2471 select MLX4_CORE
2472 select INET_LRO
2473 help
2474 This driver supports Mellanox Technologies ConnectX Ethernet
2475 devices.
2476
2468config MLX4_CORE 2477config MLX4_CORE
2469 tristate 2478 tristate
2470 depends on PCI 2479 depends on PCI
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 9f493666e27b..a7a97bf998f8 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -2,3 +2,8 @@ obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
2 2
3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \ 3mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
4 mr.o pd.o port.o profile.o qp.o reset.o srq.o 4 mr.o pd.o port.o profile.o qp.o reset.o srq.o
5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 000000000000..1368a8010af4
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/cmd.h>
37
38#include "mlx4_en.h"
39
40static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
41{
42 return;
43}
44
45
46int mlx4_en_create_cq(struct mlx4_en_priv *priv,
47 struct mlx4_en_cq *cq,
48 int entries, int ring, enum cq_type mode)
49{
50 struct mlx4_en_dev *mdev = priv->mdev;
51 int err;
52
53 cq->size = entries;
54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 else
57 cq->buf_size = sizeof(struct mlx4_cqe);
58
59 cq->ring = ring;
60 cq->is_tx = mode;
61 spin_lock_init(&cq->lock);
62
63 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
64 cq->buf_size, 2 * PAGE_SIZE);
65 if (err)
66 return err;
67
68 err = mlx4_en_map_buffer(&cq->wqres.buf);
69 if (err)
70 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
71
72 return err;
73}
74
75int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
76{
77 struct mlx4_en_dev *mdev = priv->mdev;
78 int err;
79
80 cq->dev = mdev->pndev[priv->port];
81 cq->mcq.set_ci_db = cq->wqres.db.db;
82 cq->mcq.arm_db = cq->wqres.db.db + 1;
83 *cq->mcq.set_ci_db = 0;
84 *cq->mcq.arm_db = 0;
85 cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
86 memset(cq->buf, 0, cq->buf_size);
87
88 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
89 cq->wqres.db.dma, &cq->mcq, cq->is_tx);
90 if (err)
91 return err;
92
93 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
94 cq->mcq.event = mlx4_en_cq_event;
95
96 if (cq->is_tx) {
97 init_timer(&cq->timer);
98 cq->timer.function = mlx4_en_poll_tx_cq;
99 cq->timer.data = (unsigned long) cq;
100 } else {
101 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
102 napi_enable(&cq->napi);
103 }
104
105 return 0;
106}
107
108void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
109{
110 struct mlx4_en_dev *mdev = priv->mdev;
111
112 mlx4_en_unmap_buffer(&cq->wqres.buf);
113 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
114 cq->buf_size = 0;
115 cq->buf = NULL;
116}
117
118void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
119{
120 struct mlx4_en_dev *mdev = priv->mdev;
121
122 if (cq->is_tx)
123 del_timer(&cq->timer);
124 else
125 napi_disable(&cq->napi);
126
127 mlx4_cq_free(mdev->dev, &cq->mcq);
128}
129
130/* Set rx cq moderation parameters */
131int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
132{
133 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
134 cq->moder_cnt, cq->moder_time);
135}
136
137int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
138{
139 cq->armed = 1;
140 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
141 &priv->mdev->uar_lock);
142
143 return 0;
144}
145
146
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 000000000000..1b0eebf84f76
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/cpumask.h>
35#include <linux/module.h>
36#include <linux/delay.h>
37#include <linux/netdevice.h>
38#include <linux/cpumask.h>
39
40#include <linux/mlx4/driver.h>
41#include <linux/mlx4/device.h>
42#include <linux/mlx4/cmd.h>
43
44#include "mlx4_en.h"
45
46MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
47MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
48MODULE_LICENSE("Dual BSD/GPL");
49MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
50
51static const char mlx4_en_version[] =
52 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
53 DRV_VERSION " (" DRV_RELDATE ")\n";
54
55static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
56 enum mlx4_dev_event event, int port)
57{
58 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
59 struct mlx4_en_priv *priv;
60
61 if (!mdev->pndev[port])
62 return;
63
64 priv = netdev_priv(mdev->pndev[port]);
65 switch (event) {
66 case MLX4_DEV_EVENT_PORT_UP:
67 case MLX4_DEV_EVENT_PORT_DOWN:
68 /* To prevent races, we poll the link state in a separate
69 task rather than changing it here */
70 priv->link_state = event;
71 queue_work(mdev->workqueue, &priv->linkstate_task);
72 break;
73
74 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
75 mlx4_err(mdev, "Internal error detected, restarting device\n");
76 break;
77
78 default:
79 mlx4_warn(mdev, "Unhandled event: %d\n", event);
80 }
81}
82
83static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
84{
85 struct mlx4_en_dev *mdev = endev_ptr;
86 int i;
87
88 mutex_lock(&mdev->state_lock);
89 mdev->device_up = false;
90 mutex_unlock(&mdev->state_lock);
91
92 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
93 if (mdev->pndev[i])
94 mlx4_en_destroy_netdev(mdev->pndev[i]);
95
96 flush_workqueue(mdev->workqueue);
97 destroy_workqueue(mdev->workqueue);
98 mlx4_mr_free(dev, &mdev->mr);
99 mlx4_uar_free(dev, &mdev->priv_uar);
100 mlx4_pd_free(dev, mdev->priv_pdn);
101 kfree(mdev);
102}
103
104static void *mlx4_en_add(struct mlx4_dev *dev)
105{
106 static int mlx4_en_version_printed;
107 struct mlx4_en_dev *mdev;
108 int i;
109 int err;
110
111 if (!mlx4_en_version_printed) {
112 printk(KERN_INFO "%s", mlx4_en_version);
113 mlx4_en_version_printed++;
114 }
115
116 mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
117 if (!mdev) {
118 dev_err(&dev->pdev->dev, "Device struct alloc failed, "
119 "aborting.\n");
120 err = -ENOMEM;
121 goto err_free_res;
122 }
123
124 if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
125 goto err_free_dev;
126
127 if (mlx4_uar_alloc(dev, &mdev->priv_uar))
128 goto err_pd;
129
130 mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
131 if (!mdev->uar_map)
132 goto err_uar;
133 spin_lock_init(&mdev->uar_lock);
134
135 mdev->dev = dev;
136 mdev->dma_device = &(dev->pdev->dev);
137 mdev->pdev = dev->pdev;
138 mdev->device_up = false;
139
140 mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
141 if (!mdev->LSO_support)
142 mlx4_warn(mdev, "LSO not supported, please upgrade to later "
143 "FW version to enable LSO\n");
144
145 if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
146 MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
147 0, 0, &mdev->mr)) {
148 mlx4_err(mdev, "Failed allocating memory region\n");
149 goto err_uar;
150 }
151 if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
152 mlx4_err(mdev, "Failed enabling memory region\n");
153 goto err_mr;
154 }
155
156 /* Build device profile according to supplied module parameters */
157 err = mlx4_en_get_profile(mdev);
158 if (err) {
159 mlx4_err(mdev, "Bad module parameters, aborting.\n");
160 goto err_mr;
161 }
162
163 /* Configure wich ports to start according to module parameters */
164 mdev->port_cnt = 0;
165 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
166 mdev->port_cnt++;
167
168 /* If we did not receive an explicit number of Rx rings, default to
169 * the number of completion vectors populated by the mlx4_core */
170 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
171 mlx4_info(mdev, "Using %d tx rings for port:%d\n",
172 mdev->profile.prof[i].tx_ring_num, i);
173 if (!mdev->profile.prof[i].rx_ring_num) {
174 mdev->profile.prof[i].rx_ring_num = 1;
175 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
176 1, i);
177 } else
178 mlx4_info(mdev, "Using %d rx rings for port:%d\n",
179 mdev->profile.prof[i].rx_ring_num, i);
180 }
181
182 /* Create our own workqueue for reset/multicast tasks
183 * Note: we cannot use the shared workqueue because of deadlocks caused
184 * by the rtnl lock */
185 mdev->workqueue = create_singlethread_workqueue("mlx4_en");
186 if (!mdev->workqueue) {
187 err = -ENOMEM;
188 goto err_close_nic;
189 }
190
191 /* At this stage all non-port specific tasks are complete:
192 * mark the card state as up */
193 mutex_init(&mdev->state_lock);
194 mdev->device_up = true;
195
196 /* Setup ports */
197
198 /* Create a netdev for each port */
199 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
200 mlx4_info(mdev, "Activating port:%d\n", i);
201 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
202 mdev->pndev[i] = NULL;
203 goto err_free_netdev;
204 }
205 }
206 return mdev;
207
208
209err_free_netdev:
210 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
211 if (mdev->pndev[i])
212 mlx4_en_destroy_netdev(mdev->pndev[i]);
213 }
214
215 mutex_lock(&mdev->state_lock);
216 mdev->device_up = false;
217 mutex_unlock(&mdev->state_lock);
218 flush_workqueue(mdev->workqueue);
219
220 /* Stop event queue before we drop down to release shared SW state */
221
222err_close_nic:
223 destroy_workqueue(mdev->workqueue);
224err_mr:
225 mlx4_mr_free(dev, &mdev->mr);
226err_uar:
227 mlx4_uar_free(dev, &mdev->priv_uar);
228err_pd:
229 mlx4_pd_free(dev, mdev->priv_pdn);
230err_free_dev:
231 kfree(mdev);
232err_free_res:
233 return NULL;
234}
235
236static struct mlx4_interface mlx4_en_interface = {
237 .add = mlx4_en_add,
238 .remove = mlx4_en_remove,
239 .event = mlx4_en_event,
240};
241
242static int __init mlx4_en_init(void)
243{
244 return mlx4_register_interface(&mlx4_en_interface);
245}
246
247static void __exit mlx4_en_cleanup(void)
248{
249 mlx4_unregister_interface(&mlx4_en_interface);
250}
251
252module_init(mlx4_en_init);
253module_exit(mlx4_en_cleanup);
254
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 000000000000..a339afbeed38
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/etherdevice.h>
35#include <linux/tcp.h>
36#include <linux/if_vlan.h>
37#include <linux/delay.h>
38
39#include <linux/mlx4/driver.h>
40#include <linux/mlx4/device.h>
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/cq.h>
43
44#include "mlx4_en.h"
45#include "en_port.h"
46
47
48static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
49{
50 struct mlx4_en_priv *priv = netdev_priv(dev);
51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err;
53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp;
56
57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n");
62 }
63 mutex_unlock(&mdev->state_lock);
64}
65
66static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
67{
68 struct mlx4_en_priv *priv = netdev_priv(dev);
69 struct mlx4_en_dev *mdev = priv->mdev;
70 int err;
71
72 if (!priv->vlgrp)
73 return;
74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid));
77
78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n");
84 }
85 mutex_unlock(&mdev->state_lock);
86}
87
88static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
89{
90 struct mlx4_en_priv *priv = netdev_priv(dev);
91 struct mlx4_en_dev *mdev = priv->mdev;
92 int err;
93
94 if (!priv->vlgrp)
95 return;
96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
98 "entry:%p)\n", vid, priv->vlgrp,
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL);
101
102 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock);
104 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n");
108 }
109 mutex_unlock(&mdev->state_lock);
110}
111
112static u64 mlx4_en_mac_to_u64(u8 *addr)
113{
114 u64 mac = 0;
115 int i;
116
117 for (i = 0; i < ETH_ALEN; i++) {
118 mac <<= 8;
119 mac |= addr[i];
120 }
121 return mac;
122}
123
124static int mlx4_en_set_mac(struct net_device *dev, void *addr)
125{
126 struct mlx4_en_priv *priv = netdev_priv(dev);
127 struct mlx4_en_dev *mdev = priv->mdev;
128 struct sockaddr *saddr = addr;
129
130 if (!is_valid_ether_addr(saddr->sa_data))
131 return -EADDRNOTAVAIL;
132
133 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
134 priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
135 queue_work(mdev->workqueue, &priv->mac_task);
136 return 0;
137}
138
139static void mlx4_en_do_set_mac(struct work_struct *work)
140{
141 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
142 mac_task);
143 struct mlx4_en_dev *mdev = priv->mdev;
144 int err = 0;
145
146 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
150 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index);
152 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n");
154 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n");
156
157 mutex_unlock(&mdev->state_lock);
158}
159
160static void mlx4_en_clear_list(struct net_device *dev)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct dev_mc_list *plist = priv->mc_list;
164 struct dev_mc_list *next;
165
166 while (plist) {
167 next = plist->next;
168 kfree(plist);
169 plist = next;
170 }
171 priv->mc_list = NULL;
172}
173
174static void mlx4_en_cache_mclist(struct net_device *dev)
175{
176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL;
181
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev);
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 }
197}
198
199
200static void mlx4_en_set_multicast(struct net_device *dev)
201{
202 struct mlx4_en_priv *priv = netdev_priv(dev);
203
204 if (!priv->port_up)
205 return;
206
207 queue_work(priv->mdev->workqueue, &priv->mcast_task);
208}
209
210static void mlx4_en_do_set_multicast(struct work_struct *work)
211{
212 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
213 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0;
218 int err;
219
220 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring "
223 "multicast change.\n");
224 goto out;
225 }
226 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring "
228 "multicast change.\n");
229 goto out;
230 }
231
232 /*
233 * Promsicuous mode: disable all filters
234 */
235
236 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC;
242
243 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1);
246 if (err)
247 mlx4_err(mdev, "Failed enabling "
248 "promiscous mode\n");
249
250 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE);
253 if (err)
254 mlx4_err(mdev, "Failed disabling "
255 "multicast filter\n");
256
257 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err)
260 mlx4_err(mdev, "Failed disabling "
261 "VLAN filter\n");
262 }
263 goto out;
264 }
265
266 /*
267 * Not in promiscous mode
268 */
269
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275
276 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0);
279 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n");
281
282 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n");
286 }
287
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
289 if (dev->flags & IFF_ALLMULTI) {
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE);
292 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n");
294 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE);
297 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n");
299
300 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
302 1, MLX4_MCAST_CONFIG);
303
304 /* Update multicast list - we cache all addresses so they won't
305 * change while HW is updated holding the command semaphor */
306 netif_tx_lock_bh(dev);
307 mlx4_en_cache_mclist(dev);
308 netif_tx_unlock_bh(dev);
309 for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
310 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
311 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
312 mcast_addr, 0, MLX4_MCAST_CONFIG);
313 }
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE);
316 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n");
318
319 mlx4_en_clear_list(dev);
320 }
321out:
322 mutex_unlock(&mdev->state_lock);
323}
324
325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void mlx4_en_netpoll(struct net_device *dev)
327{
328 struct mlx4_en_priv *priv = netdev_priv(dev);
329 struct mlx4_en_cq *cq;
330 unsigned long flags;
331 int i;
332
333 for (i = 0; i < priv->rx_ring_num; i++) {
334 cq = &priv->rx_cq[i];
335 spin_lock_irqsave(&cq->lock, flags);
336 napi_synchronize(&cq->napi);
337 mlx4_en_process_rx_cq(dev, cq, 0);
338 spin_unlock_irqrestore(&cq->lock, flags);
339 }
340}
341#endif
342
343static void mlx4_en_tx_timeout(struct net_device *dev)
344{
345 struct mlx4_en_priv *priv = netdev_priv(dev);
346 struct mlx4_en_dev *mdev = priv->mdev;
347
348 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
350
351 if (netif_carrier_ok(dev)) {
352 priv->port_stats.tx_timeout++;
353 mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
354 queue_work(mdev->workqueue, &priv->watchdog_task);
355 }
356}
357
358
359static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
360{
361 struct mlx4_en_priv *priv = netdev_priv(dev);
362
363 spin_lock_bh(&priv->stats_lock);
364 memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
365 spin_unlock_bh(&priv->stats_lock);
366
367 return &priv->ret_stats;
368}
369
370static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
371{
372 struct mlx4_en_dev *mdev = priv->mdev;
373 struct mlx4_en_cq *cq;
374 int i;
375
376 /* If we haven't received a specific coalescing setting
377 * (module param), we set the moderation paramters as follows:
378 * - moder_cnt is set to the number of mtu sized packets to
379 * satisfy our coelsing target.
380 * - moder_time is set to a fixed value.
381 */
382 priv->rx_frames = (mdev->profile.rx_moder_cnt ==
383 MLX4_EN_AUTO_CONF) ?
384 MLX4_EN_RX_COAL_TARGET /
385 priv->dev->mtu + 1 :
386 mdev->profile.rx_moder_cnt;
387 priv->rx_usecs = (mdev->profile.rx_moder_time ==
388 MLX4_EN_AUTO_CONF) ?
389 MLX4_EN_RX_COAL_TIME :
390 mdev->profile.rx_moder_time;
391 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
392 "rx_frames:%d rx_usecs:%d\n",
393 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
394
395 /* Setup cq moderation params */
396 for (i = 0; i < priv->rx_ring_num; i++) {
397 cq = &priv->rx_cq[i];
398 cq->moder_cnt = priv->rx_frames;
399 cq->moder_time = priv->rx_usecs;
400 }
401
402 for (i = 0; i < priv->tx_ring_num; i++) {
403 cq = &priv->tx_cq[i];
404 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
405 cq->moder_time = MLX4_EN_TX_COAL_TIME;
406 }
407
408 /* Reset auto-moderation params */
409 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
410 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
411 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
412 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
413 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
414 priv->adaptive_rx_coal = mdev->profile.auto_moder;
415 priv->last_moder_time = MLX4_EN_AUTO_CONF;
416 priv->last_moder_jiffies = 0;
417 priv->last_moder_packets = 0;
418 priv->last_moder_tx_packets = 0;
419 priv->last_moder_bytes = 0;
420}
421
422static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
423{
424 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
425 struct mlx4_en_dev *mdev = priv->mdev;
426 struct mlx4_en_cq *cq;
427 unsigned long packets;
428 unsigned long rate;
429 unsigned long avg_pkt_size;
430 unsigned long rx_packets;
431 unsigned long rx_bytes;
432 unsigned long tx_packets;
433 unsigned long tx_pkt_diff;
434 unsigned long rx_pkt_diff;
435 int moder_time;
436 int i, err;
437
438 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
439 return;
440
441 spin_lock_bh(&priv->stats_lock);
442 rx_packets = priv->stats.rx_packets;
443 rx_bytes = priv->stats.rx_bytes;
444 tx_packets = priv->stats.tx_packets;
445 spin_unlock_bh(&priv->stats_lock);
446
447 if (!priv->last_moder_jiffies || !period)
448 goto out;
449
450 tx_pkt_diff = ((unsigned long) (tx_packets -
451 priv->last_moder_tx_packets));
452 rx_pkt_diff = ((unsigned long) (rx_packets -
453 priv->last_moder_packets));
454 packets = max(tx_pkt_diff, rx_pkt_diff);
455 rate = packets * HZ / period;
456 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
457 priv->last_moder_bytes)) / packets : 0;
458
459 /* Apply auto-moderation only when packet rate exceeds a rate that
460 * it matters */
461 if (rate > MLX4_EN_RX_RATE_THRESH) {
462 /* If tx and rx packet rates are not balanced, assume that
463 * traffic is mainly BW bound and apply maximum moderation.
464 * Otherwise, moderate according to packet rate */
465 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
466 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
467 moder_time = priv->rx_usecs_high;
468 } else {
469 if (rate < priv->pkt_rate_low)
470 moder_time = priv->rx_usecs_low;
471 else if (rate > priv->pkt_rate_high)
472 moder_time = priv->rx_usecs_high;
473 else
474 moder_time = (rate - priv->pkt_rate_low) *
475 (priv->rx_usecs_high - priv->rx_usecs_low) /
476 (priv->pkt_rate_high - priv->pkt_rate_low) +
477 priv->rx_usecs_low;
478 }
479 } else {
480 /* When packet rate is low, use default moderation rather than
481 * 0 to prevent interrupt storms if traffic suddenly increases */
482 moder_time = priv->rx_usecs;
483 }
484
485 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
486 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
487
488 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
489 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
490 priv->last_moder_time, moder_time, period, packets,
491 avg_pkt_size, rate);
492
493 if (moder_time != priv->last_moder_time) {
494 priv->last_moder_time = moder_time;
495 for (i = 0; i < priv->rx_ring_num; i++) {
496 cq = &priv->rx_cq[i];
497 cq->moder_time = moder_time;
498 err = mlx4_en_set_cq_moder(priv, cq);
499 if (err) {
500 mlx4_err(mdev, "Failed modifying moderation for cq:%d "
501 "on port:%d\n", i, priv->port);
502 break;
503 }
504 }
505 }
506
507out:
508 priv->last_moder_packets = rx_packets;
509 priv->last_moder_tx_packets = tx_packets;
510 priv->last_moder_bytes = rx_bytes;
511 priv->last_moder_jiffies = jiffies;
512}
513
514static void mlx4_en_do_get_stats(struct work_struct *work)
515{
516 struct delayed_work *delay = container_of(work, struct delayed_work, work);
517 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
518 stats_task);
519 struct mlx4_en_dev *mdev = priv->mdev;
520 int err;
521
522 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
523 if (err)
524 mlx4_dbg(HW, priv, "Could not update stats for "
525 "port:%d\n", priv->port);
526
527 mutex_lock(&mdev->state_lock);
528 if (mdev->device_up) {
529 if (priv->port_up)
530 mlx4_en_auto_moderation(priv);
531
532 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
533 }
534 mutex_unlock(&mdev->state_lock);
535}
536
537static void mlx4_en_linkstate(struct work_struct *work)
538{
539 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
540 linkstate_task);
541 struct mlx4_en_dev *mdev = priv->mdev;
542 int linkstate = priv->link_state;
543
544 mutex_lock(&mdev->state_lock);
545 /* If observable port state changed set carrier state and
546 * report to system log */
547 if (priv->last_link_state != linkstate) {
548 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
549 if (netif_msg_link(priv))
550 mlx4_info(mdev, "Port %d - link down\n", priv->port);
551 netif_carrier_off(priv->dev);
552 } else {
553 if (netif_msg_link(priv))
554 mlx4_info(mdev, "Port %d - link up\n", priv->port);
555 netif_carrier_on(priv->dev);
556 }
557 }
558 priv->last_link_state = linkstate;
559 mutex_unlock(&mdev->state_lock);
560}
561
562
563static int mlx4_en_start_port(struct net_device *dev)
564{
565 struct mlx4_en_priv *priv = netdev_priv(dev);
566 struct mlx4_en_dev *mdev = priv->mdev;
567 struct mlx4_en_cq *cq;
568 struct mlx4_en_tx_ring *tx_ring;
569 struct mlx4_en_rx_ring *rx_ring;
570 int rx_index = 0;
571 int tx_index = 0;
572 u16 stride;
573 int err = 0;
574 int i;
575 int j;
576
577 if (priv->port_up) {
578 mlx4_dbg(DRV, priv, "start port called while port already up\n");
579 return 0;
580 }
581
582 /* Calculate Rx buf size */
583 dev->mtu = min(dev->mtu, priv->max_mtu);
584 mlx4_en_calc_rx_buf(dev);
585 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
586 stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
587 DS_SIZE * priv->num_frags);
588 /* Configure rx cq's and rings */
589 for (i = 0; i < priv->rx_ring_num; i++) {
590 cq = &priv->rx_cq[i];
591 rx_ring = &priv->rx_ring[i];
592
593 err = mlx4_en_activate_cq(priv, cq);
594 if (err) {
595 mlx4_err(mdev, "Failed activating Rx CQ\n");
596 goto rx_err;
597 }
598 for (j = 0; j < cq->size; j++)
599 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
600 err = mlx4_en_set_cq_moder(priv, cq);
601 if (err) {
602 mlx4_err(mdev, "Failed setting cq moderation parameters");
603 mlx4_en_deactivate_cq(priv, cq);
604 goto cq_err;
605 }
606 mlx4_en_arm_cq(priv, cq);
607
608 ++rx_index;
609 }
610
611 err = mlx4_en_activate_rx_rings(priv);
612 if (err) {
613 mlx4_err(mdev, "Failed to activate RX rings\n");
614 goto cq_err;
615 }
616
617 err = mlx4_en_config_rss_steer(priv);
618 if (err) {
619 mlx4_err(mdev, "Failed configuring rss steering\n");
620 goto rx_err;
621 }
622
623 /* Configure tx cq's and rings */
624 for (i = 0; i < priv->tx_ring_num; i++) {
625 /* Configure cq */
626 cq = &priv->tx_cq[i];
627 err = mlx4_en_activate_cq(priv, cq);
628 if (err) {
629 mlx4_err(mdev, "Failed allocating Tx CQ\n");
630 goto tx_err;
631 }
632 err = mlx4_en_set_cq_moder(priv, cq);
633 if (err) {
634 mlx4_err(mdev, "Failed setting cq moderation parameters");
635 mlx4_en_deactivate_cq(priv, cq);
636 goto tx_err;
637 }
638 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
639 cq->buf->wqe_index = cpu_to_be16(0xffff);
640
641 /* Configure ring */
642 tx_ring = &priv->tx_ring[i];
643 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
644 priv->rx_ring[0].srq.srqn);
645 if (err) {
646 mlx4_err(mdev, "Failed allocating Tx ring\n");
647 mlx4_en_deactivate_cq(priv, cq);
648 goto tx_err;
649 }
650 /* Set initial ownership of all Tx TXBBs to SW (1) */
651 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
652 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
653 ++tx_index;
654 }
655
656 /* Configure port */
657 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
658 priv->rx_skb_size + ETH_FCS_LEN,
659 mdev->profile.tx_pause,
660 mdev->profile.tx_ppp,
661 mdev->profile.rx_pause,
662 mdev->profile.rx_ppp);
663 if (err) {
664 mlx4_err(mdev, "Failed setting port general configurations"
665 " for port %d, with error %d\n", priv->port, err);
666 goto tx_err;
667 }
668 /* Set default qp number */
669 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
670 if (err) {
671 mlx4_err(mdev, "Failed setting default qp numbers\n");
672 goto tx_err;
673 }
674 /* Set port mac number */
675 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
676 err = mlx4_register_mac(mdev->dev, priv->port,
677 priv->mac, &priv->mac_index);
678 if (err) {
679 mlx4_err(mdev, "Failed setting port mac\n");
680 goto tx_err;
681 }
682
683 /* Init port */
684 mlx4_dbg(HW, priv, "Initializing port\n");
685 err = mlx4_INIT_PORT(mdev->dev, priv->port);
686 if (err) {
687 mlx4_err(mdev, "Failed Initializing port\n");
688 goto mac_err;
689 }
690
691 /* Schedule multicast task to populate multicast list */
692 queue_work(mdev->workqueue, &priv->mcast_task);
693
694 priv->port_up = true;
695 netif_start_queue(dev);
696 return 0;
697
698mac_err:
699 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
700tx_err:
701 while (tx_index--) {
702 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
703 mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
704 }
705
706 mlx4_en_release_rss_steer(priv);
707rx_err:
708 for (i = 0; i < priv->rx_ring_num; i++)
709 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]);
710cq_err:
711 while (rx_index--)
712 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
713
714 return err; /* need to close devices */
715}
716
717
718static void mlx4_en_stop_port(struct net_device *dev)
719{
720 struct mlx4_en_priv *priv = netdev_priv(dev);
721 struct mlx4_en_dev *mdev = priv->mdev;
722 int i;
723
724 if (!priv->port_up) {
725 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
726 priv->port);
727 return;
728 }
729 netif_stop_queue(dev);
730
731 /* Synchronize with tx routine */
732 netif_tx_lock_bh(dev);
733 priv->port_up = false;
734 netif_tx_unlock_bh(dev);
735
736 /* close port*/
737 mlx4_CLOSE_PORT(mdev->dev, priv->port);
738
739 /* Unregister Mac address for the port */
740 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
741
742 /* Free TX Rings */
743 for (i = 0; i < priv->tx_ring_num; i++) {
744 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
745 mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
746 }
747 msleep(10);
748
749 for (i = 0; i < priv->tx_ring_num; i++)
750 mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
751
752 /* Free RSS qps */
753 mlx4_en_release_rss_steer(priv);
754
755 /* Free RX Rings */
756 for (i = 0; i < priv->rx_ring_num; i++) {
757 mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
758 while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
759 msleep(1);
760 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
761 }
762}
763
764static void mlx4_en_restart(struct work_struct *work)
765{
766 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
767 watchdog_task);
768 struct mlx4_en_dev *mdev = priv->mdev;
769 struct net_device *dev = priv->dev;
770
771 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
772 mlx4_en_stop_port(dev);
773 if (mlx4_en_start_port(dev))
774 mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
775}
776
777
778static int mlx4_en_open(struct net_device *dev)
779{
780 struct mlx4_en_priv *priv = netdev_priv(dev);
781 struct mlx4_en_dev *mdev = priv->mdev;
782 int i;
783 int err = 0;
784
785 mutex_lock(&mdev->state_lock);
786
787 if (!mdev->device_up) {
788 mlx4_err(mdev, "Cannot open - device down/disabled\n");
789 err = -EBUSY;
790 goto out;
791 }
792
793 /* Reset HW statistics and performance counters */
794 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
795 mlx4_dbg(HW, priv, "Failed dumping statistics\n");
796
797 memset(&priv->stats, 0, sizeof(priv->stats));
798 memset(&priv->pstats, 0, sizeof(priv->pstats));
799
800 for (i = 0; i < priv->tx_ring_num; i++) {
801 priv->tx_ring[i].bytes = 0;
802 priv->tx_ring[i].packets = 0;
803 }
804 for (i = 0; i < priv->rx_ring_num; i++) {
805 priv->rx_ring[i].bytes = 0;
806 priv->rx_ring[i].packets = 0;
807 }
808
809 mlx4_en_set_default_moderation(priv);
810 err = mlx4_en_start_port(dev);
811 if (err)
812 mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
813
814out:
815 mutex_unlock(&mdev->state_lock);
816 return err;
817}
818
819
820static int mlx4_en_close(struct net_device *dev)
821{
822 struct mlx4_en_priv *priv = netdev_priv(dev);
823 struct mlx4_en_dev *mdev = priv->mdev;
824
825 if (netif_msg_ifdown(priv))
826 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
827
828 mutex_lock(&mdev->state_lock);
829
830 mlx4_en_stop_port(dev);
831 netif_carrier_off(dev);
832
833 mutex_unlock(&mdev->state_lock);
834 return 0;
835}
836
837static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
838{
839 int i;
840
841 for (i = 0; i < priv->tx_ring_num; i++) {
842 if (priv->tx_ring[i].tx_info)
843 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
844 if (priv->tx_cq[i].buf)
845 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
846 }
847
848 for (i = 0; i < priv->rx_ring_num; i++) {
849 if (priv->rx_ring[i].rx_info)
850 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
851 if (priv->rx_cq[i].buf)
852 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
853 }
854}
855
856static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
857{
858 struct mlx4_en_dev *mdev = priv->mdev;
859 struct mlx4_en_port_profile *prof = priv->prof;
860 int i;
861
862 /* Create tx Rings */
863 for (i = 0; i < priv->tx_ring_num; i++) {
864 if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
865 prof->tx_ring_size, i, TX))
866 goto err;
867
868 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
869 prof->tx_ring_size, TXBB_SIZE))
870 goto err;
871 }
872
873 /* Create rx Rings */
874 for (i = 0; i < priv->rx_ring_num; i++) {
875 if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
876 prof->rx_ring_size, i, RX))
877 goto err;
878
879 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
880 prof->rx_ring_size, priv->stride))
881 goto err;
882 }
883
884 return 0;
885
886err:
887 mlx4_err(mdev, "Failed to allocate NIC resources\n");
888 return -ENOMEM;
889}
890
891
892void mlx4_en_destroy_netdev(struct net_device *dev)
893{
894 struct mlx4_en_priv *priv = netdev_priv(dev);
895 struct mlx4_en_dev *mdev = priv->mdev;
896
897 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
898
899 /* Unregister device - this will close the port if it was up */
900 if (priv->registered)
901 unregister_netdev(dev);
902
903 if (priv->allocated)
904 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
905
906 cancel_delayed_work(&priv->stats_task);
907 cancel_delayed_work(&priv->refill_task);
908 /* flush any pending task for this netdev */
909 flush_workqueue(mdev->workqueue);
910
911 /* Detach the netdev so tasks would not attempt to access it */
912 mutex_lock(&mdev->state_lock);
913 mdev->pndev[priv->port] = NULL;
914 mutex_unlock(&mdev->state_lock);
915
916 mlx4_en_free_resources(priv);
917 free_netdev(dev);
918}
919
920static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
921{
922 struct mlx4_en_priv *priv = netdev_priv(dev);
923 struct mlx4_en_dev *mdev = priv->mdev;
924 int err = 0;
925
926 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
927 dev->mtu, new_mtu);
928
929 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
930 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
931 return -EPERM;
932 }
933 dev->mtu = new_mtu;
934
935 if (netif_running(dev)) {
936 mutex_lock(&mdev->state_lock);
937 if (!mdev->device_up) {
938 /* NIC is probably restarting - let watchdog task reset
939 * the port */
940 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
941 } else {
942 mlx4_en_stop_port(dev);
943 mlx4_en_set_default_moderation(priv);
944 err = mlx4_en_start_port(dev);
945 if (err) {
946 mlx4_err(mdev, "Failed restarting port:%d\n",
947 priv->port);
948 queue_work(mdev->workqueue, &priv->watchdog_task);
949 }
950 }
951 mutex_unlock(&mdev->state_lock);
952 }
953 return 0;
954}
955
956int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
957 struct mlx4_en_port_profile *prof)
958{
959 struct net_device *dev;
960 struct mlx4_en_priv *priv;
961 int i;
962 int err;
963
964 dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
965 if (dev == NULL) {
966 mlx4_err(mdev, "Net device allocation failed\n");
967 return -ENOMEM;
968 }
969
970 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
971
972 /*
973 * Initialize driver private data
974 */
975
976 priv = netdev_priv(dev);
977 memset(priv, 0, sizeof(struct mlx4_en_priv));
978 priv->dev = dev;
979 priv->mdev = mdev;
980 priv->prof = prof;
981 priv->port = port;
982 priv->port_up = false;
983 priv->rx_csum = 1;
984 priv->flags = prof->flags;
985 priv->tx_ring_num = prof->tx_ring_num;
986 priv->rx_ring_num = prof->rx_ring_num;
987 priv->mc_list = NULL;
988 priv->mac_index = -1;
989 priv->msg_enable = MLX4_EN_MSG_LEVEL;
990 spin_lock_init(&priv->stats_lock);
991 INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
992 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
993 INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
994 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
995 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
996 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
997
998 /* Query for default mac and max mtu */
999 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1000 priv->mac = mdev->dev->caps.def_mac[priv->port];
1001 if (ILLEGAL_MAC(priv->mac)) {
1002 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1003 priv->port, priv->mac);
1004 err = -EINVAL;
1005 goto out;
1006 }
1007
1008 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
1009 DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
1010 err = mlx4_en_alloc_resources(priv);
1011 if (err)
1012 goto out;
1013
1014 /* Populate Rx default RSS mappings */
1015 mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
1016 RSS_FACTOR, priv->rx_ring_num);
1017 /* Allocate page for receive rings */
1018 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1019 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1020 if (err) {
1021 mlx4_err(mdev, "Failed to allocate page for rx qps\n");
1022 goto out;
1023 }
1024 priv->allocated = 1;
1025
1026 /* Populate Tx priority mappings */
1027 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1028
1029 /*
1030 * Initialize netdev entry points
1031 */
1032
1033 dev->open = &mlx4_en_open;
1034 dev->stop = &mlx4_en_close;
1035 dev->hard_start_xmit = &mlx4_en_xmit;
1036 dev->get_stats = &mlx4_en_get_stats;
1037 dev->set_multicast_list = &mlx4_en_set_multicast;
1038 dev->set_mac_address = &mlx4_en_set_mac;
1039 dev->change_mtu = &mlx4_en_change_mtu;
1040 dev->tx_timeout = &mlx4_en_tx_timeout;
1041 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1042 dev->vlan_rx_register = mlx4_en_vlan_rx_register;
1043 dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
1044 dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
1045#ifdef CONFIG_NET_POLL_CONTROLLER
1046 dev->poll_controller = mlx4_en_netpoll;
1047#endif
1048 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1049
1050 /* Set defualt MAC */
1051 dev->addr_len = ETH_ALEN;
1052 for (i = 0; i < ETH_ALEN; i++)
1053 dev->dev_addr[ETH_ALEN - 1 - i] =
1054 (u8) (priv->mac >> (8 * i));
1055
1056 /*
1057 * Set driver features
1058 */
1059 dev->features |= NETIF_F_SG;
1060 dev->features |= NETIF_F_HW_CSUM;
1061 dev->features |= NETIF_F_HIGHDMA;
1062 dev->features |= NETIF_F_HW_VLAN_TX |
1063 NETIF_F_HW_VLAN_RX |
1064 NETIF_F_HW_VLAN_FILTER;
1065 if (mdev->profile.num_lro)
1066 dev->features |= NETIF_F_LRO;
1067 if (mdev->LSO_support) {
1068 dev->features |= NETIF_F_TSO;
1069 dev->features |= NETIF_F_TSO6;
1070 }
1071
1072 mdev->pndev[port] = dev;
1073
1074 netif_carrier_off(dev);
1075 err = register_netdev(dev);
1076 if (err) {
1077 mlx4_err(mdev, "Netdev registration failed\n");
1078 goto out;
1079 }
1080 priv->registered = 1;
1081 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1082 return 0;
1083
1084out:
1085 mlx4_en_destroy_netdev(dev);
1086 return err;
1087}
1088
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 000000000000..c2e69b1bcd0a
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,480 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/ethtool.h>
36#include <linux/netdevice.h>
37
38#include "mlx4_en.h"
39#include "en_port.h"
40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
64 "Pause policy on TX: 0 never generate pause frames "
65 "1 generate pause frames according to RX buffer threshold");
66MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
67 "Pause policy on RX: 0 ignore received pause frames "
68 "1 respect received pause frames");
69MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
70 " Per priority bit mask");
71MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
72 " Per priority bit mask");
73
74/* Interrupt moderation tunning */
75MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
76 "Max coalesced descriptors for Rx interrupt moderation");
77MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
78 "Timeout following last packet for Rx interrupt moderation");
79MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
80
81MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
82MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
83
84MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
85MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
86MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
87MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
88
89
90int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
91{
92 struct mlx4_en_profile *params = &mdev->profile;
93
94 params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
95 params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
96 params->auto_moder = auto_moder;
97 params->rss_xor = (rss_xor != 0);
98 params->rss_mask = rss_mask & 0x1f;
99 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
100 params->rx_pause = pprx;
101 params->rx_ppp = pfcrx;
102 params->tx_pause = pptx;
103 params->tx_ppp = pfctx;
104 if (params->rx_ppp || params->tx_ppp) {
105 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
106 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
107 } else {
108 params->prof[1].tx_ring_num = 1;
109 params->prof[2].tx_ring_num = 1;
110 }
111 params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
112 params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
113
114 if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
115 tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
116 params->prof[1].tx_ring_size =
117 (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
118 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
119
120 if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
121 tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
122 params->prof[2].tx_ring_size =
123 (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
124 MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
125
126 if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
127 rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
128 params->prof[1].rx_ring_size =
129 (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
130 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
131
132 if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
133 rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
134 params->prof[2].rx_ring_size =
135 (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
136 MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
137 return 0;
138}
139
140
141/*
142 * Ethtool support
143 */
144
145static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
146{
147 int i;
148
149 priv->port_stats.lro_aggregated = 0;
150 priv->port_stats.lro_flushed = 0;
151 priv->port_stats.lro_no_desc = 0;
152
153 for (i = 0; i < priv->rx_ring_num; i++) {
154 priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
155 priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
156 priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
157 }
158}
159
160static void
161mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
162{
163 struct mlx4_en_priv *priv = netdev_priv(dev);
164 struct mlx4_en_dev *mdev = priv->mdev;
165
166 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
167 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
168 sprintf(drvinfo->fw_version, "%d.%d.%d",
169 (u16) (mdev->dev->caps.fw_ver >> 32),
170 (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
171 (u16) (mdev->dev->caps.fw_ver & 0xffff));
172 strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
173 drvinfo->n_stats = 0;
174 drvinfo->regdump_len = 0;
175 drvinfo->eedump_len = 0;
176}
177
178static u32 mlx4_en_get_tso(struct net_device *dev)
179{
180 return (dev->features & NETIF_F_TSO) != 0;
181}
182
183static int mlx4_en_set_tso(struct net_device *dev, u32 data)
184{
185 struct mlx4_en_priv *priv = netdev_priv(dev);
186
187 if (data) {
188 if (!priv->mdev->LSO_support)
189 return -EPERM;
190 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
191 } else
192 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
193 return 0;
194}
195
196static u32 mlx4_en_get_rx_csum(struct net_device *dev)
197{
198 struct mlx4_en_priv *priv = netdev_priv(dev);
199 return priv->rx_csum;
200}
201
202static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
203{
204 struct mlx4_en_priv *priv = netdev_priv(dev);
205 priv->rx_csum = (data != 0);
206 return 0;
207}
208
209static const char main_strings[][ETH_GSTRING_LEN] = {
210 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
211 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
212 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
213 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
214 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
215 "tx_heartbeat_errors", "tx_window_errors",
216
217 /* port statistics */
218 "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
219 "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
220 "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
221
222 /* packet statistics */
223 "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
224 "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
225 "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
226 "tx_prio_6", "tx_prio_7",
227};
228#define NUM_MAIN_STATS 21
229#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
230
231static u32 mlx4_en_get_msglevel(struct net_device *dev)
232{
233 return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
234}
235
236static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
237{
238 ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
239}
240
241static void mlx4_en_get_wol(struct net_device *netdev,
242 struct ethtool_wolinfo *wol)
243{
244 wol->supported = 0;
245 wol->wolopts = 0;
246
247 return;
248}
249
250static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
251{
252 struct mlx4_en_priv *priv = netdev_priv(dev);
253
254 if (sset != ETH_SS_STATS)
255 return -EOPNOTSUPP;
256
257 return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
258}
259
260static void mlx4_en_get_ethtool_stats(struct net_device *dev,
261 struct ethtool_stats *stats, uint64_t *data)
262{
263 struct mlx4_en_priv *priv = netdev_priv(dev);
264 int index = 0;
265 int i;
266
267 spin_lock_bh(&priv->stats_lock);
268
269 mlx4_en_update_lro_stats(priv);
270
271 for (i = 0; i < NUM_MAIN_STATS; i++)
272 data[index++] = ((unsigned long *) &priv->stats)[i];
273 for (i = 0; i < NUM_PORT_STATS; i++)
274 data[index++] = ((unsigned long *) &priv->port_stats)[i];
275 for (i = 0; i < priv->tx_ring_num; i++) {
276 data[index++] = priv->tx_ring[i].packets;
277 data[index++] = priv->tx_ring[i].bytes;
278 }
279 for (i = 0; i < priv->rx_ring_num; i++) {
280 data[index++] = priv->rx_ring[i].packets;
281 data[index++] = priv->rx_ring[i].bytes;
282 }
283 for (i = 0; i < NUM_PKT_STATS; i++)
284 data[index++] = ((unsigned long *) &priv->pkstats)[i];
285 spin_unlock_bh(&priv->stats_lock);
286
287}
288
289static void mlx4_en_get_strings(struct net_device *dev,
290 uint32_t stringset, uint8_t *data)
291{
292 struct mlx4_en_priv *priv = netdev_priv(dev);
293 int index = 0;
294 int i;
295
296 if (stringset != ETH_SS_STATS)
297 return;
298
299 /* Add main counters */
300 for (i = 0; i < NUM_MAIN_STATS; i++)
301 strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
302 for (i = 0; i < NUM_PORT_STATS; i++)
303 strcpy(data + (index++) * ETH_GSTRING_LEN,
304 main_strings[i + NUM_MAIN_STATS]);
305 for (i = 0; i < priv->tx_ring_num; i++) {
306 sprintf(data + (index++) * ETH_GSTRING_LEN,
307 "tx%d_packets", i);
308 sprintf(data + (index++) * ETH_GSTRING_LEN,
309 "tx%d_bytes", i);
310 }
311 for (i = 0; i < priv->rx_ring_num; i++) {
312 sprintf(data + (index++) * ETH_GSTRING_LEN,
313 "rx%d_packets", i);
314 sprintf(data + (index++) * ETH_GSTRING_LEN,
315 "rx%d_bytes", i);
316 }
317 for (i = 0; i < NUM_PKT_STATS; i++)
318 strcpy(data + (index++) * ETH_GSTRING_LEN,
319 main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
320}
321
322static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
323{
324 cmd->autoneg = AUTONEG_DISABLE;
325 cmd->supported = SUPPORTED_10000baseT_Full;
326 cmd->advertising = SUPPORTED_10000baseT_Full;
327 if (netif_carrier_ok(dev)) {
328 cmd->speed = SPEED_10000;
329 cmd->duplex = DUPLEX_FULL;
330 } else {
331 cmd->speed = -1;
332 cmd->duplex = -1;
333 }
334 return 0;
335}
336
337static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
338{
339 if ((cmd->autoneg == AUTONEG_ENABLE) ||
340 (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
341 return -EINVAL;
342
343 /* Nothing to change */
344 return 0;
345}
346
347static int mlx4_en_get_coalesce(struct net_device *dev,
348 struct ethtool_coalesce *coal)
349{
350 struct mlx4_en_priv *priv = netdev_priv(dev);
351
352 coal->tx_coalesce_usecs = 0;
353 coal->tx_max_coalesced_frames = 0;
354 coal->rx_coalesce_usecs = priv->rx_usecs;
355 coal->rx_max_coalesced_frames = priv->rx_frames;
356
357 coal->pkt_rate_low = priv->pkt_rate_low;
358 coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
359 coal->pkt_rate_high = priv->pkt_rate_high;
360 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
361 coal->rate_sample_interval = priv->sample_interval;
362 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
363 return 0;
364}
365
366static int mlx4_en_set_coalesce(struct net_device *dev,
367 struct ethtool_coalesce *coal)
368{
369 struct mlx4_en_priv *priv = netdev_priv(dev);
370 int err, i;
371
372 priv->rx_frames = (coal->rx_max_coalesced_frames ==
373 MLX4_EN_AUTO_CONF) ?
374 MLX4_EN_RX_COAL_TARGET /
375 priv->dev->mtu + 1 :
376 coal->rx_max_coalesced_frames;
377 priv->rx_usecs = (coal->rx_coalesce_usecs ==
378 MLX4_EN_AUTO_CONF) ?
379 MLX4_EN_RX_COAL_TIME :
380 coal->rx_coalesce_usecs;
381
382 /* Set adaptive coalescing params */
383 priv->pkt_rate_low = coal->pkt_rate_low;
384 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
385 priv->pkt_rate_high = coal->pkt_rate_high;
386 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
387 priv->sample_interval = coal->rate_sample_interval;
388 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
389 priv->last_moder_time = MLX4_EN_AUTO_CONF;
390 if (priv->adaptive_rx_coal)
391 return 0;
392
393 for (i = 0; i < priv->rx_ring_num; i++) {
394 priv->rx_cq[i].moder_cnt = priv->rx_frames;
395 priv->rx_cq[i].moder_time = priv->rx_usecs;
396 err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
397 if (err)
398 return err;
399 }
400 return 0;
401}
402
403static int mlx4_en_set_pauseparam(struct net_device *dev,
404 struct ethtool_pauseparam *pause)
405{
406 struct mlx4_en_priv *priv = netdev_priv(dev);
407 struct mlx4_en_dev *mdev = priv->mdev;
408 int err;
409
410 mdev->profile.tx_pause = pause->tx_pause != 0;
411 mdev->profile.rx_pause = pause->rx_pause != 0;
412 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
413 priv->rx_skb_size + ETH_FCS_LEN,
414 mdev->profile.tx_pause,
415 mdev->profile.tx_ppp,
416 mdev->profile.rx_pause,
417 mdev->profile.rx_ppp);
418 if (err)
419 mlx4_err(mdev, "Failed setting pause params to\n");
420
421 return err;
422}
423
424static void mlx4_en_get_pauseparam(struct net_device *dev,
425 struct ethtool_pauseparam *pause)
426{
427 struct mlx4_en_priv *priv = netdev_priv(dev);
428 struct mlx4_en_dev *mdev = priv->mdev;
429
430 pause->tx_pause = mdev->profile.tx_pause;
431 pause->rx_pause = mdev->profile.rx_pause;
432}
433
434static void mlx4_en_get_ringparam(struct net_device *dev,
435 struct ethtool_ringparam *param)
436{
437 struct mlx4_en_priv *priv = netdev_priv(dev);
438 struct mlx4_en_dev *mdev = priv->mdev;
439
440 memset(param, 0, sizeof(*param));
441 param->rx_max_pending = mdev->dev->caps.max_rq_sg;
442 param->tx_max_pending = mdev->dev->caps.max_sq_sg;
443 param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
444 param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
445}
446
447const struct ethtool_ops mlx4_en_ethtool_ops = {
448 .get_drvinfo = mlx4_en_get_drvinfo,
449 .get_settings = mlx4_en_get_settings,
450 .set_settings = mlx4_en_set_settings,
451#ifdef NETIF_F_TSO
452 .get_tso = mlx4_en_get_tso,
453 .set_tso = mlx4_en_set_tso,
454#endif
455 .get_sg = ethtool_op_get_sg,
456 .set_sg = ethtool_op_set_sg,
457 .get_link = ethtool_op_get_link,
458 .get_rx_csum = mlx4_en_get_rx_csum,
459 .set_rx_csum = mlx4_en_set_rx_csum,
460 .get_tx_csum = ethtool_op_get_tx_csum,
461 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
462 .get_strings = mlx4_en_get_strings,
463 .get_sset_count = mlx4_en_get_sset_count,
464 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
465 .get_wol = mlx4_en_get_wol,
466 .get_msglevel = mlx4_en_get_msglevel,
467 .set_msglevel = mlx4_en_set_msglevel,
468 .get_coalesce = mlx4_en_get_coalesce,
469 .set_coalesce = mlx4_en_set_coalesce,
470 .get_pauseparam = mlx4_en_get_pauseparam,
471 .set_pauseparam = mlx4_en_set_pauseparam,
472 .get_ringparam = mlx4_en_get_ringparam,
473 .get_flags = ethtool_op_get_flags,
474 .set_flags = ethtool_op_set_flags,
475};
476
477
478
479
480
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 000000000000..c5a4c0389752
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34
35#include <linux/if_vlan.h>
36
37#include <linux/mlx4/device.h>
38#include <linux/mlx4/cmd.h>
39
40#include "en_port.h"
41#include "mlx4_en.h"
42
43
44int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
45 u64 mac, u64 clear, u8 mode)
46{
47 return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
48 MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
49}
50
51int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
52{
53 struct mlx4_cmd_mailbox *mailbox;
54 struct mlx4_set_vlan_fltr_mbox *filter;
55 int i;
56 int j;
57 int index = 0;
58 u32 entry;
59 int err = 0;
60
61 mailbox = mlx4_alloc_cmd_mailbox(dev);
62 if (IS_ERR(mailbox))
63 return PTR_ERR(mailbox);
64
65 filter = mailbox->buf;
66 if (grp) {
67 memset(filter, 0, sizeof *filter);
68 for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
69 entry = 0;
70 for (j = 0; j < 32; j++)
71 if (vlan_group_get_device(grp, index++))
72 entry |= 1 << j;
73 filter->entry[i] = cpu_to_be32(entry);
74 }
75 } else {
76 /* When no vlans are configured we block all vlans */
77 memset(filter, 0, sizeof(*filter));
78 }
79 err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
80 MLX4_CMD_TIME_CLASS_B);
81 mlx4_free_cmd_mailbox(dev, mailbox);
82 return err;
83}
84
85
86int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
87 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
88{
89 struct mlx4_cmd_mailbox *mailbox;
90 struct mlx4_set_port_general_context *context;
91 int err;
92 u32 in_mod;
93
94 mailbox = mlx4_alloc_cmd_mailbox(dev);
95 if (IS_ERR(mailbox))
96 return PTR_ERR(mailbox);
97 context = mailbox->buf;
98 memset(context, 0, sizeof *context);
99
100 context->flags = SET_PORT_GEN_ALL_VALID;
101 context->mtu = cpu_to_be16(mtu);
102 context->pptx = (pptx * (!pfctx)) << 7;
103 context->pfctx = pfctx;
104 context->pprx = (pprx * (!pfcrx)) << 7;
105 context->pfcrx = pfcrx;
106
107 in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
108 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
109 MLX4_CMD_TIME_CLASS_B);
110
111 mlx4_free_cmd_mailbox(dev, mailbox);
112 return err;
113}
114
115int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
116 u8 promisc)
117{
118 struct mlx4_cmd_mailbox *mailbox;
119 struct mlx4_set_port_rqp_calc_context *context;
120 int err;
121 u32 in_mod;
122
123 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox))
125 return PTR_ERR(mailbox);
126 context = mailbox->buf;
127 memset(context, 0, sizeof *context);
128
129 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
132 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0;
135 context->vlan_miss = MLX4_VLAN_MISS_IDX;
136
137 in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
138 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
139 MLX4_CMD_TIME_CLASS_B);
140
141 mlx4_free_cmd_mailbox(dev, mailbox);
142 return err;
143}
144
145
146int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
147{
148 struct mlx4_en_stat_out_mbox *mlx4_en_stats;
149 struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
150 struct net_device_stats *stats = &priv->stats;
151 struct mlx4_cmd_mailbox *mailbox;
152 u64 in_mod = reset << 8 | port;
153 int err;
154
155 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
156 if (IS_ERR(mailbox))
157 return PTR_ERR(mailbox);
158 memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
159 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
160 MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
161 if (err)
162 goto out;
163
164 mlx4_en_stats = mailbox->buf;
165
166 spin_lock_bh(&priv->stats_lock);
167
168 stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
169 be32_to_cpu(mlx4_en_stats->RDROP);
170 stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
171 be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
172 be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
173 be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
174 be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
175 be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
176 be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
177 be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
178 be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
179 be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
180 stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
181 be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
182 be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
183 be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
184 be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
185 be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
186 be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
187 be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
188 be64_to_cpu(mlx4_en_stats->ROCT_novlan);
189
190 stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
191 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
192 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
193 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
194 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
195 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
196 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
197 be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
198 be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
199 be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
200
201 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
202 be32_to_cpu(mlx4_en_stats->RdropLength) +
203 be32_to_cpu(mlx4_en_stats->RJBBR) +
204 be32_to_cpu(mlx4_en_stats->RCRC) +
205 be32_to_cpu(mlx4_en_stats->RRUNT);
206 stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
207 stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
208 be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
209 be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
210 be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
211 be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
212 be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
213 be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
214 be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
215 be64_to_cpu(mlx4_en_stats->MCAST_novlan);
216 stats->collisions = 0;
217 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
218 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
219 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
220 stats->rx_frame_errors = 0;
221 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
222 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
223 stats->tx_aborted_errors = 0;
224 stats->tx_carrier_errors = 0;
225 stats->tx_fifo_errors = 0;
226 stats->tx_heartbeat_errors = 0;
227 stats->tx_window_errors = 0;
228
229 priv->pkstats.broadcast =
230 be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
231 be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
232 be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
233 be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
234 be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
235 be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
236 be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
237 be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
238 be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
239 priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
240 priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
241 priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
242 priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
243 priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
244 priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
245 priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
246 priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
247 priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
248 priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
249 priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
250 priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
251 priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
252 priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
253 priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
254 priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
255 spin_unlock_bh(&priv->stats_lock);
256
257out:
258 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
259 return err;
260}
261
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 000000000000..e6477f12beb5
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_PORT_H_
35#define _MLX4_EN_PORT_H_
36
37
38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_SHIFT 31
40
41enum {
42 MLX4_CMD_SET_VLAN_FLTR = 0x47,
43 MLX4_CMD_SET_MCAST_FLTR = 0x48,
44 MLX4_CMD_DUMP_ETH_STATS = 0x49,
45};
46
47struct mlx4_set_port_general_context {
48 u8 reserved[3];
49 u8 flags;
50 u16 reserved2;
51 __be16 mtu;
52 u8 pptx;
53 u8 pfctx;
54 u16 reserved3;
55 u8 pprx;
56 u8 pfcrx;
57 u16 reserved4;
58};
59
60struct mlx4_set_port_rqp_calc_context {
61 __be32 base_qpn;
62 __be32 flags;
63 u8 reserved[3];
64 u8 mac_miss;
65 u8 intra_no_vlan;
66 u8 no_vlan;
67 u8 intra_vlan_miss;
68 u8 vlan_miss;
69 u8 reserved2[3];
70 u8 no_vlan_prio;
71 __be32 promisc;
72 __be32 mcast;
73};
74
75#define VLAN_FLTR_SIZE 128
76struct mlx4_set_vlan_fltr_mbox {
77 __be32 entry[VLAN_FLTR_SIZE];
78};
79
80
81enum {
82 MLX4_MCAST_CONFIG = 0,
83 MLX4_MCAST_DISABLE = 1,
84 MLX4_MCAST_ENABLE = 2,
85};
86
87
88struct mlx4_en_stat_out_mbox {
89 /* Received frames with a length of 64 octets */
90 __be64 R64_prio_0;
91 __be64 R64_prio_1;
92 __be64 R64_prio_2;
93 __be64 R64_prio_3;
94 __be64 R64_prio_4;
95 __be64 R64_prio_5;
96 __be64 R64_prio_6;
97 __be64 R64_prio_7;
98 __be64 R64_novlan;
99 /* Received frames with a length of 127 octets */
100 __be64 R127_prio_0;
101 __be64 R127_prio_1;
102 __be64 R127_prio_2;
103 __be64 R127_prio_3;
104 __be64 R127_prio_4;
105 __be64 R127_prio_5;
106 __be64 R127_prio_6;
107 __be64 R127_prio_7;
108 __be64 R127_novlan;
109 /* Received frames with a length of 255 octets */
110 __be64 R255_prio_0;
111 __be64 R255_prio_1;
112 __be64 R255_prio_2;
113 __be64 R255_prio_3;
114 __be64 R255_prio_4;
115 __be64 R255_prio_5;
116 __be64 R255_prio_6;
117 __be64 R255_prio_7;
118 __be64 R255_novlan;
119 /* Received frames with a length of 511 octets */
120 __be64 R511_prio_0;
121 __be64 R511_prio_1;
122 __be64 R511_prio_2;
123 __be64 R511_prio_3;
124 __be64 R511_prio_4;
125 __be64 R511_prio_5;
126 __be64 R511_prio_6;
127 __be64 R511_prio_7;
128 __be64 R511_novlan;
129 /* Received frames with a length of 1023 octets */
130 __be64 R1023_prio_0;
131 __be64 R1023_prio_1;
132 __be64 R1023_prio_2;
133 __be64 R1023_prio_3;
134 __be64 R1023_prio_4;
135 __be64 R1023_prio_5;
136 __be64 R1023_prio_6;
137 __be64 R1023_prio_7;
138 __be64 R1023_novlan;
139 /* Received frames with a length of 1518 octets */
140 __be64 R1518_prio_0;
141 __be64 R1518_prio_1;
142 __be64 R1518_prio_2;
143 __be64 R1518_prio_3;
144 __be64 R1518_prio_4;
145 __be64 R1518_prio_5;
146 __be64 R1518_prio_6;
147 __be64 R1518_prio_7;
148 __be64 R1518_novlan;
149 /* Received frames with a length of 1522 octets */
150 __be64 R1522_prio_0;
151 __be64 R1522_prio_1;
152 __be64 R1522_prio_2;
153 __be64 R1522_prio_3;
154 __be64 R1522_prio_4;
155 __be64 R1522_prio_5;
156 __be64 R1522_prio_6;
157 __be64 R1522_prio_7;
158 __be64 R1522_novlan;
159 /* Received frames with a length of 1548 octets */
160 __be64 R1548_prio_0;
161 __be64 R1548_prio_1;
162 __be64 R1548_prio_2;
163 __be64 R1548_prio_3;
164 __be64 R1548_prio_4;
165 __be64 R1548_prio_5;
166 __be64 R1548_prio_6;
167 __be64 R1548_prio_7;
168 __be64 R1548_novlan;
169 /* Received frames with a length of 1548 < octets < MTU */
170 __be64 R2MTU_prio_0;
171 __be64 R2MTU_prio_1;
172 __be64 R2MTU_prio_2;
173 __be64 R2MTU_prio_3;
174 __be64 R2MTU_prio_4;
175 __be64 R2MTU_prio_5;
176 __be64 R2MTU_prio_6;
177 __be64 R2MTU_prio_7;
178 __be64 R2MTU_novlan;
179 /* Received frames with a length of MTU< octets and good CRC */
180 __be64 RGIANT_prio_0;
181 __be64 RGIANT_prio_1;
182 __be64 RGIANT_prio_2;
183 __be64 RGIANT_prio_3;
184 __be64 RGIANT_prio_4;
185 __be64 RGIANT_prio_5;
186 __be64 RGIANT_prio_6;
187 __be64 RGIANT_prio_7;
188 __be64 RGIANT_novlan;
189 /* Received broadcast frames with good CRC */
190 __be64 RBCAST_prio_0;
191 __be64 RBCAST_prio_1;
192 __be64 RBCAST_prio_2;
193 __be64 RBCAST_prio_3;
194 __be64 RBCAST_prio_4;
195 __be64 RBCAST_prio_5;
196 __be64 RBCAST_prio_6;
197 __be64 RBCAST_prio_7;
198 __be64 RBCAST_novlan;
199 /* Received multicast frames with good CRC */
200 __be64 MCAST_prio_0;
201 __be64 MCAST_prio_1;
202 __be64 MCAST_prio_2;
203 __be64 MCAST_prio_3;
204 __be64 MCAST_prio_4;
205 __be64 MCAST_prio_5;
206 __be64 MCAST_prio_6;
207 __be64 MCAST_prio_7;
208 __be64 MCAST_novlan;
209 /* Received unicast not short or GIANT frames with good CRC */
210 __be64 RTOTG_prio_0;
211 __be64 RTOTG_prio_1;
212 __be64 RTOTG_prio_2;
213 __be64 RTOTG_prio_3;
214 __be64 RTOTG_prio_4;
215 __be64 RTOTG_prio_5;
216 __be64 RTOTG_prio_6;
217 __be64 RTOTG_prio_7;
218 __be64 RTOTG_novlan;
219
220 /* Count of total octets of received frames, includes framing characters */
221 __be64 RTTLOCT_prio_0;
222 /* Count of total octets of received frames, not including framing
223 characters */
224 __be64 RTTLOCT_NOFRM_prio_0;
225 /* Count of Total number of octets received
226 (only for frames without errors) */
227 __be64 ROCT_prio_0;
228
229 __be64 RTTLOCT_prio_1;
230 __be64 RTTLOCT_NOFRM_prio_1;
231 __be64 ROCT_prio_1;
232
233 __be64 RTTLOCT_prio_2;
234 __be64 RTTLOCT_NOFRM_prio_2;
235 __be64 ROCT_prio_2;
236
237 __be64 RTTLOCT_prio_3;
238 __be64 RTTLOCT_NOFRM_prio_3;
239 __be64 ROCT_prio_3;
240
241 __be64 RTTLOCT_prio_4;
242 __be64 RTTLOCT_NOFRM_prio_4;
243 __be64 ROCT_prio_4;
244
245 __be64 RTTLOCT_prio_5;
246 __be64 RTTLOCT_NOFRM_prio_5;
247 __be64 ROCT_prio_5;
248
249 __be64 RTTLOCT_prio_6;
250 __be64 RTTLOCT_NOFRM_prio_6;
251 __be64 ROCT_prio_6;
252
253 __be64 RTTLOCT_prio_7;
254 __be64 RTTLOCT_NOFRM_prio_7;
255 __be64 ROCT_prio_7;
256
257 __be64 RTTLOCT_novlan;
258 __be64 RTTLOCT_NOFRM_novlan;
259 __be64 ROCT_novlan;
260
261 /* Count of Total received frames including bad frames */
262 __be64 RTOT_prio_0;
263 /* Count of Total number of received frames with 802.1Q encapsulation */
264 __be64 R1Q_prio_0;
265 __be64 reserved1;
266
267 __be64 RTOT_prio_1;
268 __be64 R1Q_prio_1;
269 __be64 reserved2;
270
271 __be64 RTOT_prio_2;
272 __be64 R1Q_prio_2;
273 __be64 reserved3;
274
275 __be64 RTOT_prio_3;
276 __be64 R1Q_prio_3;
277 __be64 reserved4;
278
279 __be64 RTOT_prio_4;
280 __be64 R1Q_prio_4;
281 __be64 reserved5;
282
283 __be64 RTOT_prio_5;
284 __be64 R1Q_prio_5;
285 __be64 reserved6;
286
287 __be64 RTOT_prio_6;
288 __be64 R1Q_prio_6;
289 __be64 reserved7;
290
291 __be64 RTOT_prio_7;
292 __be64 R1Q_prio_7;
293 __be64 reserved8;
294
295 __be64 RTOT_novlan;
296 __be64 R1Q_novlan;
297 __be64 reserved9;
298
299 /* Total number of Successfully Received Control Frames */
300 __be64 RCNTL;
301 __be64 reserved10;
302 __be64 reserved11;
303 __be64 reserved12;
304 /* Count of received frames with a length/type field value between 46
305 (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
306 inclusive */
307 __be64 RInRangeLengthErr;
308 /* Count of received frames with length/type field between 1501 and 1535
309 decimal, inclusive */
310 __be64 ROutRangeLengthErr;
311 /* Count of received frames that are longer than max allowed size for
312 802.3 frames (1518/1522) */
313 __be64 RFrmTooLong;
314 /* Count frames received with PCS error */
315 __be64 PCS;
316
317 /* Transmit frames with a length of 64 octets */
318 __be64 T64_prio_0;
319 __be64 T64_prio_1;
320 __be64 T64_prio_2;
321 __be64 T64_prio_3;
322 __be64 T64_prio_4;
323 __be64 T64_prio_5;
324 __be64 T64_prio_6;
325 __be64 T64_prio_7;
326 __be64 T64_novlan;
327 __be64 T64_loopbk;
328 /* Transmit frames with a length of 65 to 127 octets. */
329 __be64 T127_prio_0;
330 __be64 T127_prio_1;
331 __be64 T127_prio_2;
332 __be64 T127_prio_3;
333 __be64 T127_prio_4;
334 __be64 T127_prio_5;
335 __be64 T127_prio_6;
336 __be64 T127_prio_7;
337 __be64 T127_novlan;
338 __be64 T127_loopbk;
339 /* Transmit frames with a length of 128 to 255 octets */
340 __be64 T255_prio_0;
341 __be64 T255_prio_1;
342 __be64 T255_prio_2;
343 __be64 T255_prio_3;
344 __be64 T255_prio_4;
345 __be64 T255_prio_5;
346 __be64 T255_prio_6;
347 __be64 T255_prio_7;
348 __be64 T255_novlan;
349 __be64 T255_loopbk;
350 /* Transmit frames with a length of 256 to 511 octets */
351 __be64 T511_prio_0;
352 __be64 T511_prio_1;
353 __be64 T511_prio_2;
354 __be64 T511_prio_3;
355 __be64 T511_prio_4;
356 __be64 T511_prio_5;
357 __be64 T511_prio_6;
358 __be64 T511_prio_7;
359 __be64 T511_novlan;
360 __be64 T511_loopbk;
361 /* Transmit frames with a length of 512 to 1023 octets */
362 __be64 T1023_prio_0;
363 __be64 T1023_prio_1;
364 __be64 T1023_prio_2;
365 __be64 T1023_prio_3;
366 __be64 T1023_prio_4;
367 __be64 T1023_prio_5;
368 __be64 T1023_prio_6;
369 __be64 T1023_prio_7;
370 __be64 T1023_novlan;
371 __be64 T1023_loopbk;
372 /* Transmit frames with a length of 1024 to 1518 octets */
373 __be64 T1518_prio_0;
374 __be64 T1518_prio_1;
375 __be64 T1518_prio_2;
376 __be64 T1518_prio_3;
377 __be64 T1518_prio_4;
378 __be64 T1518_prio_5;
379 __be64 T1518_prio_6;
380 __be64 T1518_prio_7;
381 __be64 T1518_novlan;
382 __be64 T1518_loopbk;
383 /* Counts transmit frames with a length of 1519 to 1522 bytes */
384 __be64 T1522_prio_0;
385 __be64 T1522_prio_1;
386 __be64 T1522_prio_2;
387 __be64 T1522_prio_3;
388 __be64 T1522_prio_4;
389 __be64 T1522_prio_5;
390 __be64 T1522_prio_6;
391 __be64 T1522_prio_7;
392 __be64 T1522_novlan;
393 __be64 T1522_loopbk;
394 /* Transmit frames with a length of 1523 to 1548 octets */
395 __be64 T1548_prio_0;
396 __be64 T1548_prio_1;
397 __be64 T1548_prio_2;
398 __be64 T1548_prio_3;
399 __be64 T1548_prio_4;
400 __be64 T1548_prio_5;
401 __be64 T1548_prio_6;
402 __be64 T1548_prio_7;
403 __be64 T1548_novlan;
404 __be64 T1548_loopbk;
405 /* Counts transmit frames with a length of 1549 to MTU bytes */
406 __be64 T2MTU_prio_0;
407 __be64 T2MTU_prio_1;
408 __be64 T2MTU_prio_2;
409 __be64 T2MTU_prio_3;
410 __be64 T2MTU_prio_4;
411 __be64 T2MTU_prio_5;
412 __be64 T2MTU_prio_6;
413 __be64 T2MTU_prio_7;
414 __be64 T2MTU_novlan;
415 __be64 T2MTU_loopbk;
416 /* Transmit frames with a length greater than MTU octets and a good CRC. */
417 __be64 TGIANT_prio_0;
418 __be64 TGIANT_prio_1;
419 __be64 TGIANT_prio_2;
420 __be64 TGIANT_prio_3;
421 __be64 TGIANT_prio_4;
422 __be64 TGIANT_prio_5;
423 __be64 TGIANT_prio_6;
424 __be64 TGIANT_prio_7;
425 __be64 TGIANT_novlan;
426 __be64 TGIANT_loopbk;
427 /* Transmit broadcast frames with a good CRC */
428 __be64 TBCAST_prio_0;
429 __be64 TBCAST_prio_1;
430 __be64 TBCAST_prio_2;
431 __be64 TBCAST_prio_3;
432 __be64 TBCAST_prio_4;
433 __be64 TBCAST_prio_5;
434 __be64 TBCAST_prio_6;
435 __be64 TBCAST_prio_7;
436 __be64 TBCAST_novlan;
437 __be64 TBCAST_loopbk;
438 /* Transmit multicast frames with a good CRC */
439 __be64 TMCAST_prio_0;
440 __be64 TMCAST_prio_1;
441 __be64 TMCAST_prio_2;
442 __be64 TMCAST_prio_3;
443 __be64 TMCAST_prio_4;
444 __be64 TMCAST_prio_5;
445 __be64 TMCAST_prio_6;
446 __be64 TMCAST_prio_7;
447 __be64 TMCAST_novlan;
448 __be64 TMCAST_loopbk;
449 /* Transmit good frames that are neither broadcast nor multicast */
450 __be64 TTOTG_prio_0;
451 __be64 TTOTG_prio_1;
452 __be64 TTOTG_prio_2;
453 __be64 TTOTG_prio_3;
454 __be64 TTOTG_prio_4;
455 __be64 TTOTG_prio_5;
456 __be64 TTOTG_prio_6;
457 __be64 TTOTG_prio_7;
458 __be64 TTOTG_novlan;
459 __be64 TTOTG_loopbk;
460
461 /* total octets of transmitted frames, including framing characters */
462 __be64 TTTLOCT_prio_0;
463 /* total octets of transmitted frames, not including framing characters */
464 __be64 TTTLOCT_NOFRM_prio_0;
465 /* ifOutOctets */
466 __be64 TOCT_prio_0;
467
468 __be64 TTTLOCT_prio_1;
469 __be64 TTTLOCT_NOFRM_prio_1;
470 __be64 TOCT_prio_1;
471
472 __be64 TTTLOCT_prio_2;
473 __be64 TTTLOCT_NOFRM_prio_2;
474 __be64 TOCT_prio_2;
475
476 __be64 TTTLOCT_prio_3;
477 __be64 TTTLOCT_NOFRM_prio_3;
478 __be64 TOCT_prio_3;
479
480 __be64 TTTLOCT_prio_4;
481 __be64 TTTLOCT_NOFRM_prio_4;
482 __be64 TOCT_prio_4;
483
484 __be64 TTTLOCT_prio_5;
485 __be64 TTTLOCT_NOFRM_prio_5;
486 __be64 TOCT_prio_5;
487
488 __be64 TTTLOCT_prio_6;
489 __be64 TTTLOCT_NOFRM_prio_6;
490 __be64 TOCT_prio_6;
491
492 __be64 TTTLOCT_prio_7;
493 __be64 TTTLOCT_NOFRM_prio_7;
494 __be64 TOCT_prio_7;
495
496 __be64 TTTLOCT_novlan;
497 __be64 TTTLOCT_NOFRM_novlan;
498 __be64 TOCT_novlan;
499
500 __be64 TTTLOCT_loopbk;
501 __be64 TTTLOCT_NOFRM_loopbk;
502 __be64 TOCT_loopbk;
503
504 /* Total frames transmitted with a good CRC that are not aborted */
505 __be64 TTOT_prio_0;
506 /* Total number of frames transmitted with 802.1Q encapsulation */
507 __be64 T1Q_prio_0;
508 __be64 reserved13;
509
510 __be64 TTOT_prio_1;
511 __be64 T1Q_prio_1;
512 __be64 reserved14;
513
514 __be64 TTOT_prio_2;
515 __be64 T1Q_prio_2;
516 __be64 reserved15;
517
518 __be64 TTOT_prio_3;
519 __be64 T1Q_prio_3;
520 __be64 reserved16;
521
522 __be64 TTOT_prio_4;
523 __be64 T1Q_prio_4;
524 __be64 reserved17;
525
526 __be64 TTOT_prio_5;
527 __be64 T1Q_prio_5;
528 __be64 reserved18;
529
530 __be64 TTOT_prio_6;
531 __be64 T1Q_prio_6;
532 __be64 reserved19;
533
534 __be64 TTOT_prio_7;
535 __be64 T1Q_prio_7;
536 __be64 reserved20;
537
538 __be64 TTOT_novlan;
539 __be64 T1Q_novlan;
540 __be64 reserved21;
541
542 __be64 TTOT_loopbk;
543 __be64 T1Q_loopbk;
544 __be64 reserved22;
545
546 /* Received frames with a length greater than MTU octets and a bad CRC */
547 __be32 RJBBR;
548 /* Received frames with a bad CRC that are not runts, jabbers,
549 or alignment errors */
550 __be32 RCRC;
551 /* Received frames with SFD with a length of less than 64 octets and a
552 bad CRC */
553 __be32 RRUNT;
554 /* Received frames with a length less than 64 octets and a good CRC */
555 __be32 RSHORT;
556 /* Total Number of Received Packets Dropped */
557 __be32 RDROP;
558 /* Drop due to overflow */
559 __be32 RdropOvflw;
560 /* Drop due to overflow */
561 __be32 RdropLength;
562 /* Total of good frames. Does not include frames received with
563 frame-too-long, FCS, or length errors */
564 __be32 RTOTFRMS;
565 /* Total dropped Xmited packets */
566 __be32 TDROP;
567};
568
569
570#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 000000000000..a0545209e507
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/vmalloc.h>
35#include <linux/mlx4/qp.h>
36
37#include "mlx4_en.h"
38
39void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
40 int is_tx, int rss, int qpn, int cqn, int srqn,
41 struct mlx4_qp_context *context)
42{
43 struct mlx4_en_dev *mdev = priv->mdev;
44
45 memset(context, 0, sizeof *context);
46 context->flags = cpu_to_be32(7 << 16 | rss << 13);
47 context->pd = cpu_to_be32(mdev->priv_pdn);
48 context->mtu_msgmax = 0xff;
49 context->rq_size_stride = 0;
50 if (is_tx)
51 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
52 else
53 context->sq_size_stride = 1;
54 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
55 context->local_qpn = cpu_to_be32(qpn);
56 context->pri_path.ackto = 1 & 0x07;
57 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
58 context->pri_path.counter_index = 0xff;
59 context->cqn_send = cpu_to_be32(cqn);
60 context->cqn_recv = cpu_to_be32(cqn);
61 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
62 if (!rss)
63 context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
64}
65
66
67int mlx4_en_map_buffer(struct mlx4_buf *buf)
68{
69 struct page **pages;
70 int i;
71
72 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
73 return 0;
74
75 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
76 if (!pages)
77 return -ENOMEM;
78
79 for (i = 0; i < buf->nbufs; ++i)
80 pages[i] = virt_to_page(buf->page_list[i].buf);
81
82 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
83 kfree(pages);
84 if (!buf->direct.buf)
85 return -ENOMEM;
86
87 return 0;
88}
89
90void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
91{
92 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
93 return;
94
95 vunmap(buf->direct.buf);
96}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 000000000000..6232227f56c3
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/skbuff.h>
37#include <linux/if_ether.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
44{
45 int offset = n << ring->srq.wqe_shift;
46 return ring->buf + offset;
47}
48
49static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
50{
51 return;
52}
53
54static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
55 void **ip_hdr, void **tcpudp_hdr,
56 u64 *hdr_flags, void *priv)
57{
58 *mac_hdr = page_address(frags->page) + frags->page_offset;
59 *ip_hdr = *mac_hdr + ETH_HLEN;
60 *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
61 *hdr_flags = LRO_IPV4 | LRO_TCP;
62
63 return 0;
64}
65
66static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
67 struct mlx4_en_rx_desc *rx_desc,
68 struct skb_frag_struct *skb_frags,
69 struct mlx4_en_rx_alloc *ring_alloc,
70 int i)
71{
72 struct mlx4_en_dev *mdev = priv->mdev;
73 struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
74 struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
75 struct page *page;
76 dma_addr_t dma;
77
78 if (page_alloc->offset == frag_info->last_offset) {
79 /* Allocate new page */
80 page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
81 if (!page)
82 return -ENOMEM;
83
84 skb_frags[i].page = page_alloc->page;
85 skb_frags[i].page_offset = page_alloc->offset;
86 page_alloc->page = page;
87 page_alloc->offset = frag_info->frag_align;
88 } else {
89 page = page_alloc->page;
90 get_page(page);
91
92 skb_frags[i].page = page;
93 skb_frags[i].page_offset = page_alloc->offset;
94 page_alloc->offset += frag_info->frag_stride;
95 }
96 dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
97 skb_frags[i].page_offset, frag_info->frag_size,
98 PCI_DMA_FROMDEVICE);
99 rx_desc->data[i].addr = cpu_to_be64(dma);
100 return 0;
101}
102
103static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
104 struct mlx4_en_rx_ring *ring)
105{
106 struct mlx4_en_rx_alloc *page_alloc;
107 int i;
108
109 for (i = 0; i < priv->num_frags; i++) {
110 page_alloc = &ring->page_alloc[i];
111 page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
112 MLX4_EN_ALLOC_ORDER);
113 if (!page_alloc->page)
114 goto out;
115
116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page);
119 }
120 return 0;
121
122out:
123 while (i--) {
124 page_alloc = &ring->page_alloc[i];
125 put_page(page_alloc->page);
126 page_alloc->page = NULL;
127 }
128 return -ENOMEM;
129}
130
131static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
132 struct mlx4_en_rx_ring *ring)
133{
134 struct mlx4_en_rx_alloc *page_alloc;
135 int i;
136
137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page));
141
142 put_page(page_alloc->page);
143 page_alloc->page = NULL;
144 }
145}
146
147
148static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
149 struct mlx4_en_rx_ring *ring, int index)
150{
151 struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
152 struct skb_frag_struct *skb_frags = ring->rx_info +
153 (index << priv->log_rx_info);
154 int possible_frags;
155 int i;
156
157 /* Pre-link descriptor */
158 rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
159
160 /* Set size and memtype fields */
161 for (i = 0; i < priv->num_frags; i++) {
162 skb_frags[i].size = priv->frag_info[i].frag_size;
163 rx_desc->data[i].byte_count =
164 cpu_to_be32(priv->frag_info[i].frag_size);
165 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
166 }
167
168 /* If the number of used fragments does not fill up the ring stride,
169 * remaining (unused) fragments must be padded with null address/size
170 * and a special memory key */
171 possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
172 for (i = priv->num_frags; i < possible_frags; i++) {
173 rx_desc->data[i].byte_count = 0;
174 rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
175 rx_desc->data[i].addr = 0;
176 }
177}
178
179
180static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
181 struct mlx4_en_rx_ring *ring, int index)
182{
183 struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
184 struct skb_frag_struct *skb_frags = ring->rx_info +
185 (index << priv->log_rx_info);
186 int i;
187
188 for (i = 0; i < priv->num_frags; i++)
189 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
190 goto err;
191
192 return 0;
193
194err:
195 while (i--)
196 put_page(skb_frags[i].page);
197 return -ENOMEM;
198}
199
200static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
201{
202 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
203}
204
205static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
206{
207 struct mlx4_en_dev *mdev = priv->mdev;
208 struct mlx4_en_rx_ring *ring;
209 int ring_ind;
210 int buf_ind;
211
212 for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
213 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
214 ring = &priv->rx_ring[ring_ind];
215
216 if (mlx4_en_prepare_rx_desc(priv, ring,
217 ring->actual_size)) {
218 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
219 mlx4_err(mdev, "Failed to allocate "
220 "enough rx buffers\n");
221 return -ENOMEM;
222 } else {
223 if (netif_msg_rx_err(priv))
224 mlx4_warn(mdev,
225 "Only %d buffers allocated\n",
226 ring->actual_size);
227 goto out;
228 }
229 }
230 ring->actual_size++;
231 ring->prod++;
232 }
233 }
234out:
235 return 0;
236}
237
238static int mlx4_en_fill_rx_buf(struct net_device *dev,
239 struct mlx4_en_rx_ring *ring)
240{
241 struct mlx4_en_priv *priv = netdev_priv(dev);
242 int num = 0;
243 int err;
244
245 while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
246 err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
247 ring->size_mask);
248 if (err) {
249 if (netif_msg_rx_err(priv))
250 mlx4_warn(priv->mdev,
251 "Failed preparing rx descriptor\n");
252 priv->port_stats.rx_alloc_failed++;
253 break;
254 }
255 ++num;
256 ++ring->prod;
257 }
258 if ((u32) (ring->prod - ring->cons) == ring->size)
259 ring->full = 1;
260
261 return num;
262}
263
264static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
265 struct mlx4_en_rx_ring *ring)
266{
267 struct mlx4_en_dev *mdev = priv->mdev;
268 struct skb_frag_struct *skb_frags;
269 struct mlx4_en_rx_desc *rx_desc;
270 dma_addr_t dma;
271 int index;
272 int nr;
273
274 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
275 ring->cons, ring->prod);
276
277 /* Unmap and free Rx buffers */
278 BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
279 while (ring->cons != ring->prod) {
280 index = ring->cons & ring->size_mask;
281 rx_desc = ring->buf + (index << ring->log_stride);
282 skb_frags = ring->rx_info + (index << priv->log_rx_info);
283 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
284
285 for (nr = 0; nr < priv->num_frags; nr++) {
286 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
287 dma = be64_to_cpu(rx_desc->data[nr].addr);
288
289 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
290 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
291 PCI_DMA_FROMDEVICE);
292 put_page(skb_frags[nr].page);
293 }
294 ++ring->cons;
295 }
296}
297
298
299void mlx4_en_rx_refill(struct work_struct *work)
300{
301 struct delayed_work *delay = container_of(work, struct delayed_work, work);
302 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
303 refill_task);
304 struct mlx4_en_dev *mdev = priv->mdev;
305 struct net_device *dev = priv->dev;
306 struct mlx4_en_rx_ring *ring;
307 int need_refill = 0;
308 int i;
309
310 mutex_lock(&mdev->state_lock);
311 if (!mdev->device_up || !priv->port_up)
312 goto out;
313
314 /* We only get here if there are no receive buffers, so we can't race
315 * with Rx interrupts while filling buffers */
316 for (i = 0; i < priv->rx_ring_num; i++) {
317 ring = &priv->rx_ring[i];
318 if (ring->need_refill) {
319 if (mlx4_en_fill_rx_buf(dev, ring)) {
320 ring->need_refill = 0;
321 mlx4_en_update_rx_prod_db(ring);
322 } else
323 need_refill = 1;
324 }
325 }
326 if (need_refill)
327 queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
328
329out:
330 mutex_unlock(&mdev->state_lock);
331}
332
333
334int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
335 struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
336{
337 struct mlx4_en_dev *mdev = priv->mdev;
338 int err;
339 int tmp;
340
341 /* Sanity check SRQ size before proceeding */
342 if (size >= mdev->dev->caps.max_srq_wqes)
343 return -EINVAL;
344
345 ring->prod = 0;
346 ring->cons = 0;
347 ring->size = size;
348 ring->size_mask = size - 1;
349 ring->stride = stride;
350 ring->log_stride = ffs(ring->stride) - 1;
351 ring->buf_size = ring->size * ring->stride;
352
353 tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
354 sizeof(struct skb_frag_struct));
355 ring->rx_info = vmalloc(tmp);
356 if (!ring->rx_info) {
357 mlx4_err(mdev, "Failed allocating rx_info ring\n");
358 return -ENOMEM;
359 }
360 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
361 ring->rx_info, tmp);
362
363 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
364 ring->buf_size, 2 * PAGE_SIZE);
365 if (err)
366 goto err_ring;
367
368 err = mlx4_en_map_buffer(&ring->wqres.buf);
369 if (err) {
370 mlx4_err(mdev, "Failed to map RX buffer\n");
371 goto err_hwq;
372 }
373 ring->buf = ring->wqres.buf.direct.buf;
374
375 /* Configure lro mngr */
376 memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
377 ring->lro.dev = priv->dev;
378 ring->lro.features = LRO_F_NAPI;
379 ring->lro.frag_align_pad = NET_IP_ALIGN;
380 ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
381 ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
382 ring->lro.max_desc = mdev->profile.num_lro;
383 ring->lro.max_aggr = MAX_SKB_FRAGS;
384 ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
385 sizeof(struct net_lro_desc),
386 GFP_KERNEL);
387 if (!ring->lro.lro_arr) {
388 mlx4_err(mdev, "Failed to allocate lro array\n");
389 goto err_map;
390 }
391 ring->lro.get_frag_header = mlx4_en_get_frag_header;
392
393 return 0;
394
395err_map:
396 mlx4_en_unmap_buffer(&ring->wqres.buf);
397err_hwq:
398 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
399err_ring:
400 vfree(ring->rx_info);
401 ring->rx_info = NULL;
402 return err;
403}
404
405int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
406{
407 struct mlx4_en_dev *mdev = priv->mdev;
408 struct mlx4_wqe_srq_next_seg *next;
409 struct mlx4_en_rx_ring *ring;
410 int i;
411 int ring_ind;
412 int err;
413 int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
414 DS_SIZE * priv->num_frags);
415 int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
416
417 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
418 ring = &priv->rx_ring[ring_ind];
419
420 ring->prod = 0;
421 ring->cons = 0;
422 ring->actual_size = 0;
423 ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
424
425 ring->stride = stride;
426 ring->log_stride = ffs(ring->stride) - 1;
427 ring->buf_size = ring->size * ring->stride;
428
429 memset(ring->buf, 0, ring->buf_size);
430 mlx4_en_update_rx_prod_db(ring);
431
432 /* Initailize all descriptors */
433 for (i = 0; i < ring->size; i++)
434 mlx4_en_init_rx_desc(priv, ring, i);
435
436 /* Initialize page allocators */
437 err = mlx4_en_init_allocator(priv, ring);
438 if (err) {
439 mlx4_err(mdev, "Failed initializing ring allocator\n");
440 goto err_allocator;
441 }
442
443 /* Fill Rx buffers */
444 ring->full = 0;
445 }
446 if (mlx4_en_fill_rx_buffers(priv))
447 goto err_buffers;
448
449 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
450 ring = &priv->rx_ring[ring_ind];
451
452 mlx4_en_update_rx_prod_db(ring);
453
454 /* Configure SRQ representing the ring */
455 ring->srq.max = ring->size;
456 ring->srq.max_gs = max_gs;
457 ring->srq.wqe_shift = ilog2(ring->stride);
458
459 for (i = 0; i < ring->srq.max; ++i) {
460 next = get_wqe(ring, i);
461 next->next_wqe_index =
462 cpu_to_be16((i + 1) & (ring->srq.max - 1));
463 }
464
465 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
466 ring->wqres.db.dma, &ring->srq);
467 if (err){
468 mlx4_err(mdev, "Failed to allocate srq\n");
469 goto err_srq;
470 }
471 ring->srq.event = mlx4_en_srq_event;
472 }
473
474 return 0;
475
476err_srq:
477 while (ring_ind >= 0) {
478 ring = &priv->rx_ring[ring_ind];
479 mlx4_srq_free(mdev->dev, &ring->srq);
480 ring_ind--;
481 }
482
483err_buffers:
484 for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
485 mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
486
487 ring_ind = priv->rx_ring_num - 1;
488err_allocator:
489 while (ring_ind >= 0) {
490 mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
491 ring_ind--;
492 }
493 return err;
494}
495
496void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
497 struct mlx4_en_rx_ring *ring)
498{
499 struct mlx4_en_dev *mdev = priv->mdev;
500
501 kfree(ring->lro.lro_arr);
502 mlx4_en_unmap_buffer(&ring->wqres.buf);
503 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
504 vfree(ring->rx_info);
505 ring->rx_info = NULL;
506}
507
508void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_rx_ring *ring)
510{
511 struct mlx4_en_dev *mdev = priv->mdev;
512
513 mlx4_srq_free(mdev->dev, &ring->srq);
514 mlx4_en_free_rx_buf(priv, ring);
515 mlx4_en_destroy_allocator(priv, ring);
516}
517
518
519/* Unmap a completed descriptor and free unused pages */
520static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
521 struct mlx4_en_rx_desc *rx_desc,
522 struct skb_frag_struct *skb_frags,
523 struct skb_frag_struct *skb_frags_rx,
524 struct mlx4_en_rx_alloc *page_alloc,
525 int length)
526{
527 struct mlx4_en_dev *mdev = priv->mdev;
528 struct mlx4_en_frag_info *frag_info;
529 int nr;
530 dma_addr_t dma;
531
532 /* Collect used fragments while replacing them in the HW descirptors */
533 for (nr = 0; nr < priv->num_frags; nr++) {
534 frag_info = &priv->frag_info[nr];
535 if (length <= frag_info->frag_prefix_size)
536 break;
537
538 /* Save page reference in skb */
539 skb_frags_rx[nr].page = skb_frags[nr].page;
540 skb_frags_rx[nr].size = skb_frags[nr].size;
541 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
542 dma = be64_to_cpu(rx_desc->data[nr].addr);
543
544 /* Allocate a replacement page */
545 if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
546 goto fail;
547
548 /* Unmap buffer */
549 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
550 PCI_DMA_FROMDEVICE);
551 }
552 /* Adjust size of last fragment to match actual length */
553 skb_frags_rx[nr - 1].size = length -
554 priv->frag_info[nr - 1].frag_prefix_size;
555 return nr;
556
557fail:
558 /* Drop all accumulated fragments (which have already been replaced in
559 * the descriptor) of this packet; remaining fragments are reused... */
560 while (nr > 0) {
561 nr--;
562 put_page(skb_frags_rx[nr].page);
563 }
564 return 0;
565}
566
567
568static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
569 struct mlx4_en_rx_desc *rx_desc,
570 struct skb_frag_struct *skb_frags,
571 struct mlx4_en_rx_alloc *page_alloc,
572 unsigned int length)
573{
574 struct mlx4_en_dev *mdev = priv->mdev;
575 struct sk_buff *skb;
576 void *va;
577 int used_frags;
578 dma_addr_t dma;
579
580 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
581 if (!skb) {
582 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
583 return NULL;
584 }
585 skb->dev = priv->dev;
586 skb_reserve(skb, NET_IP_ALIGN);
587 skb->len = length;
588 skb->truesize = length + sizeof(struct sk_buff);
589
590 /* Get pointer to first fragment so we could copy the headers into the
591 * (linear part of the) skb */
592 va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
593
594 if (length <= SMALL_PACKET_SIZE) {
595 /* We are copying all relevant data to the skb - temporarily
596 * synch buffers for the copy */
597 dma = be64_to_cpu(rx_desc->data[0].addr);
598 dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
599 length, DMA_FROM_DEVICE);
600 skb_copy_to_linear_data(skb, va, length);
601 dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
602 length, DMA_FROM_DEVICE);
603 skb->tail += length;
604 } else {
605
606 /* Move relevant fragments to skb */
607 used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
608 skb_shinfo(skb)->frags,
609 page_alloc, length);
610 skb_shinfo(skb)->nr_frags = used_frags;
611
612 /* Copy headers into the skb linear buffer */
613 memcpy(skb->data, va, HEADER_COPY_SIZE);
614 skb->tail += HEADER_COPY_SIZE;
615
616 /* Skip headers in first fragment */
617 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
618
619 /* Adjust size of first fragment */
620 skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
621 skb->data_len = length - HEADER_COPY_SIZE;
622 }
623 return skb;
624}
625
626static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
627 struct mlx4_en_rx_ring *ring,
628 int from, int to, int num)
629{
630 struct skb_frag_struct *skb_frags_from;
631 struct skb_frag_struct *skb_frags_to;
632 struct mlx4_en_rx_desc *rx_desc_from;
633 struct mlx4_en_rx_desc *rx_desc_to;
634 int from_index, to_index;
635 int nr, i;
636
637 for (i = 0; i < num; i++) {
638 from_index = (from + i) & ring->size_mask;
639 to_index = (to + i) & ring->size_mask;
640 skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
641 skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
642 rx_desc_from = ring->buf + (from_index << ring->log_stride);
643 rx_desc_to = ring->buf + (to_index << ring->log_stride);
644
645 for (nr = 0; nr < priv->num_frags; nr++) {
646 skb_frags_to[nr].page = skb_frags_from[nr].page;
647 skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
648 rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
649 }
650 }
651}
652
653
654int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
655{
656 struct mlx4_en_priv *priv = netdev_priv(dev);
657 struct mlx4_en_dev *mdev = priv->mdev;
658 struct mlx4_cqe *cqe;
659 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
660 struct skb_frag_struct *skb_frags;
661 struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
662 struct mlx4_en_rx_desc *rx_desc;
663 struct sk_buff *skb;
664 int index;
665 int nr;
666 unsigned int length;
667 int polled = 0;
668 int ip_summed;
669
670 if (!priv->port_up)
671 return 0;
672
673 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
674 * descriptor offset can be deduced from the CQE index instead of
675 * reading 'cqe->index' */
676 index = cq->mcq.cons_index & ring->size_mask;
677 cqe = &cq->buf[index];
678
679 /* Process all completed CQEs */
680 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
681 cq->mcq.cons_index & cq->size)) {
682
683 skb_frags = ring->rx_info + (index << priv->log_rx_info);
684 rx_desc = ring->buf + (index << ring->log_stride);
685
686 /*
687 * make sure we read the CQE after we read the ownership bit
688 */
689 rmb();
690
691 /* Drop packet on bad receive or bad checksum */
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
693 MLX4_CQE_OPCODE_ERROR)) {
694 mlx4_err(mdev, "CQE completed in error - vendor "
695 "syndrom:%d syndrom:%d\n",
696 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
697 ((struct mlx4_err_cqe *) cqe)->syndrome);
698 goto next;
699 }
700 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
701 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
702 goto next;
703 }
704
705 /*
706 * Packet is OK - process it.
707 */
708 length = be32_to_cpu(cqe->byte_cnt);
709 ring->bytes += length;
710 ring->packets++;
711
712 if (likely(priv->rx_csum)) {
713 if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
714 (cqe->checksum == cpu_to_be16(0xffff))) {
715 priv->port_stats.rx_chksum_good++;
716 /* This packet is eligible for LRO if it is:
717 * - DIX Ethernet (type interpretation)
718 * - TCP/IP (v4)
719 * - without IP options
720 * - not an IP fragment */
721 if (mlx4_en_can_lro(cqe->status) &&
722 dev->features & NETIF_F_LRO) {
723
724 nr = mlx4_en_complete_rx_desc(
725 priv, rx_desc,
726 skb_frags, lro_frags,
727 ring->page_alloc, length);
728 if (!nr)
729 goto next;
730
731 if (priv->vlgrp && (cqe->vlan_my_qpn &
732 cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
733 lro_vlan_hwaccel_receive_frags(
734 &ring->lro, lro_frags,
735 length, length,
736 priv->vlgrp,
737 be16_to_cpu(cqe->sl_vid),
738 NULL, 0);
739 } else
740 lro_receive_frags(&ring->lro,
741 lro_frags,
742 length,
743 length,
744 NULL, 0);
745
746 goto next;
747 }
748
749 /* LRO not possible, complete processing here */
750 ip_summed = CHECKSUM_UNNECESSARY;
751 INC_PERF_COUNTER(priv->pstats.lro_misses);
752 } else {
753 ip_summed = CHECKSUM_NONE;
754 priv->port_stats.rx_chksum_none++;
755 }
756 } else {
757 ip_summed = CHECKSUM_NONE;
758 priv->port_stats.rx_chksum_none++;
759 }
760
761 skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
762 ring->page_alloc, length);
763 if (!skb) {
764 priv->stats.rx_dropped++;
765 goto next;
766 }
767
768 skb->ip_summed = ip_summed;
769 skb->protocol = eth_type_trans(skb, dev);
770
771 /* Push it up the stack */
772 if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
773 MLX4_CQE_VLAN_PRESENT_MASK)) {
774 vlan_hwaccel_receive_skb(skb, priv->vlgrp,
775 be16_to_cpu(cqe->sl_vid));
776 } else
777 netif_receive_skb(skb);
778
779 dev->last_rx = jiffies;
780
781next:
782 ++cq->mcq.cons_index;
783 index = (cq->mcq.cons_index) & ring->size_mask;
784 cqe = &cq->buf[index];
785 if (++polled == budget) {
786 /* We are here because we reached the NAPI budget -
787 * flush only pending LRO sessions */
788 lro_flush_all(&ring->lro);
789 goto out;
790 }
791 }
792
793 /* If CQ is empty flush all LRO sessions unconditionally */
794 lro_flush_all(&ring->lro);
795
796out:
797 AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
798 mlx4_cq_set_ci(&cq->mcq);
799 wmb(); /* ensure HW sees CQ consumer before we post new buffers */
800 ring->cons = cq->mcq.cons_index;
801 ring->prod += polled; /* Polled descriptors were realocated in place */
802 if (unlikely(!ring->full)) {
803 mlx4_en_copy_desc(priv, ring, ring->cons - polled,
804 ring->prod - polled, polled);
805 mlx4_en_fill_rx_buf(dev, ring);
806 }
807 mlx4_en_update_rx_prod_db(ring);
808 return polled;
809}
810
811
812void mlx4_en_rx_irq(struct mlx4_cq *mcq)
813{
814 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
815 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
816
817 if (priv->port_up)
818 netif_rx_schedule(cq->dev, &cq->napi);
819 else
820 mlx4_en_arm_cq(priv, cq);
821}
822
823/* Rx CQ polling - called by NAPI */
824int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
825{
826 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
827 struct net_device *dev = cq->dev;
828 struct mlx4_en_priv *priv = netdev_priv(dev);
829 int done;
830
831 done = mlx4_en_process_rx_cq(dev, cq, budget);
832
833 /* If we used up all the quota - we're probably not done yet... */
834 if (done == budget)
835 INC_PERF_COUNTER(priv->pstats.napi_quota);
836 else {
837 /* Done for now */
838 netif_rx_complete(dev, napi);
839 mlx4_en_arm_cq(priv, cq);
840 }
841 return done;
842}
843
844
845/* Calculate the last offset position that accomodates a full fragment
846 * (assuming fagment size = stride-align) */
847static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
848{
849 u16 res = MLX4_EN_ALLOC_SIZE % stride;
850 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
851
852 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
853 "res:%d offset:%d\n", stride, align, res, offset);
854 return offset;
855}
856
857
858static int frag_sizes[] = {
859 FRAG_SZ0,
860 FRAG_SZ1,
861 FRAG_SZ2,
862 FRAG_SZ3
863};
864
865void mlx4_en_calc_rx_buf(struct net_device *dev)
866{
867 struct mlx4_en_priv *priv = netdev_priv(dev);
868 int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
869 int buf_size = 0;
870 int i = 0;
871
872 while (buf_size < eff_mtu) {
873 priv->frag_info[i].frag_size =
874 (eff_mtu > buf_size + frag_sizes[i]) ?
875 frag_sizes[i] : eff_mtu - buf_size;
876 priv->frag_info[i].frag_prefix_size = buf_size;
877 if (!i) {
878 priv->frag_info[i].frag_align = NET_IP_ALIGN;
879 priv->frag_info[i].frag_stride =
880 ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
881 } else {
882 priv->frag_info[i].frag_align = 0;
883 priv->frag_info[i].frag_stride =
884 ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
885 }
886 priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
887 priv, priv->frag_info[i].frag_stride,
888 priv->frag_info[i].frag_align);
889 buf_size += priv->frag_info[i].frag_size;
890 i++;
891 }
892
893 priv->num_frags = i;
894 priv->rx_skb_size = eff_mtu;
895 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
896
897 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
898 "num_frags:%d):\n", eff_mtu, priv->num_frags);
899 for (i = 0; i < priv->num_frags; i++) {
900 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
901 "stride:%d last_offset:%d\n", i,
902 priv->frag_info[i].frag_size,
903 priv->frag_info[i].frag_prefix_size,
904 priv->frag_info[i].frag_align,
905 priv->frag_info[i].frag_stride,
906 priv->frag_info[i].last_offset);
907 }
908}
909
910/* RSS related functions */
911
912/* Calculate rss size and map each entry in rss table to rx ring */
913void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
914 struct mlx4_en_rss_map *rss_map,
915 int num_entries, int num_rings)
916{
917 int i;
918
919 rss_map->size = roundup_pow_of_two(num_entries);
920 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
921 rss_map->size);
922
923 for (i = 0; i < rss_map->size; i++) {
924 rss_map->map[i] = i % num_rings;
925 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
926 }
927}
928
929static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
930{
931 return;
932}
933
934
935static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
936 int qpn, int srqn, int cqn,
937 enum mlx4_qp_state *state,
938 struct mlx4_qp *qp)
939{
940 struct mlx4_en_dev *mdev = priv->mdev;
941 struct mlx4_qp_context *context;
942 int err = 0;
943
944 context = kmalloc(sizeof *context , GFP_KERNEL);
945 if (!context) {
946 mlx4_err(mdev, "Failed to allocate qp context\n");
947 return -ENOMEM;
948 }
949
950 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
951 if (err) {
952 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
953 goto out;
954 return err;
955 }
956 qp->event = mlx4_en_sqp_event;
957
958 memset(context, 0, sizeof *context);
959 mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
960
961 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
962 if (err) {
963 mlx4_qp_remove(mdev->dev, qp);
964 mlx4_qp_free(mdev->dev, qp);
965 }
966out:
967 kfree(context);
968 return err;
969}
970
971/* Allocate rx qp's and configure them according to rss map */
972int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
973{
974 struct mlx4_en_dev *mdev = priv->mdev;
975 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
976 struct mlx4_qp_context context;
977 struct mlx4_en_rss_context *rss_context;
978 void *ptr;
979 int rss_xor = mdev->profile.rss_xor;
980 u8 rss_mask = mdev->profile.rss_mask;
981 int i, srqn, qpn, cqn;
982 int err = 0;
983 int good_qps = 0;
984
985 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
986 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
987 rss_map->size, &rss_map->base_qpn);
988 if (err) {
989 mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
990 rss_map->size, priv->port);
991 return err;
992 }
993
994 for (i = 0; i < rss_map->size; i++) {
995 cqn = priv->rx_ring[rss_map->map[i]].cqn;
996 srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
997 qpn = rss_map->base_qpn + i;
998 err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
999 &rss_map->state[i],
1000 &rss_map->qps[i]);
1001 if (err)
1002 goto rss_err;
1003
1004 ++good_qps;
1005 }
1006
1007 /* Configure RSS indirection qp */
1008 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1009 if (err) {
1010 mlx4_err(mdev, "Failed to reserve range for RSS "
1011 "indirection qp\n");
1012 goto rss_err;
1013 }
1014 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1015 if (err) {
1016 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
1017 goto reserve_err;
1018 }
1019 rss_map->indir_qp.event = mlx4_en_sqp_event;
1020 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1021 priv->rx_ring[0].cqn, 0, &context);
1022
1023 ptr = ((void *) &context) + 0x3c;
1024 rss_context = (struct mlx4_en_rss_context *) ptr;
1025 rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
1026 (rss_map->base_qpn));
1027 rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1028 rss_context->hash_fn = rss_xor & 0x3;
1029 rss_context->flags = rss_mask << 2;
1030
1031 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1032 &rss_map->indir_qp, &rss_map->indir_state);
1033 if (err)
1034 goto indir_err;
1035
1036 return 0;
1037
1038indir_err:
1039 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1040 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1041 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1042 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1043reserve_err:
1044 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1045rss_err:
1046 for (i = 0; i < good_qps; i++) {
1047 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1048 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1049 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1050 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1051 }
1052 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1053 return err;
1054}
1055
1056void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1057{
1058 struct mlx4_en_dev *mdev = priv->mdev;
1059 struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1060 int i;
1061
1062 mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1063 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
1064 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
1065 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
1066 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
1067
1068 for (i = 0; i < rss_map->size; i++) {
1069 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1070 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1071 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1072 mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1073 }
1074 mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
1075}
1076
1077
1078
1079
1080
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 000000000000..8592f8fb8475
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <asm/page.h>
35#include <linux/mlx4/cq.h>
36#include <linux/mlx4/qp.h>
37#include <linux/skbuff.h>
38#include <linux/if_vlan.h>
39#include <linux/vmalloc.h>
40
41#include "mlx4_en.h"
42
43enum {
44 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
45};
46
47static int inline_thold __read_mostly = MAX_INLINE;
48
49module_param_named(inline_thold, inline_thold, int, 0444);
50MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
51
52int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
53 struct mlx4_en_tx_ring *ring, u32 size,
54 u16 stride)
55{
56 struct mlx4_en_dev *mdev = priv->mdev;
57 int tmp;
58 int err;
59
60 ring->size = size;
61 ring->size_mask = size - 1;
62 ring->stride = stride;
63
64 inline_thold = min(inline_thold, MAX_INLINE);
65
66 spin_lock_init(&ring->comp_lock);
67
68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n");
72 return -ENOMEM;
73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp);
76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n");
80 err = -ENOMEM;
81 goto err_tx;
82 }
83 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE);
87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n");
89 goto err_bounce;
90 }
91
92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n");
95 goto err_hwq_res;
96 }
97
98 ring->buf = ring->wqres.buf.direct.buf;
99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
107 goto err_map;
108 }
109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve;
114 }
115
116 return 0;
117
118err_reserve:
119 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
120err_map:
121 mlx4_en_unmap_buffer(&ring->wqres.buf);
122err_hwq_res:
123 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
124err_bounce:
125 kfree(ring->bounce_buf);
126 ring->bounce_buf = NULL;
127err_tx:
128 vfree(ring->tx_info);
129 ring->tx_info = NULL;
130 return err;
131}
132
133void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
134 struct mlx4_en_tx_ring *ring)
135{
136 struct mlx4_en_dev *mdev = priv->mdev;
137 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
138
139 mlx4_qp_remove(mdev->dev, &ring->qp);
140 mlx4_qp_free(mdev->dev, &ring->qp);
141 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
142 mlx4_en_unmap_buffer(&ring->wqres.buf);
143 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
144 kfree(ring->bounce_buf);
145 ring->bounce_buf = NULL;
146 vfree(ring->tx_info);
147 ring->tx_info = NULL;
148}
149
150int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
151 struct mlx4_en_tx_ring *ring,
152 int cq, int srqn)
153{
154 struct mlx4_en_dev *mdev = priv->mdev;
155 int err;
156
157 ring->cqn = cq;
158 ring->prod = 0;
159 ring->cons = 0xffffffff;
160 ring->last_nr_txbb = 1;
161 ring->poll_cnt = 0;
162 ring->blocked = 0;
163 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
164 memset(ring->buf, 0, ring->buf_size);
165
166 ring->qp_state = MLX4_QP_STATE_RST;
167 ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
168
169 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
170 ring->cqn, srqn, &ring->context);
171
172 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
173 &ring->qp, &ring->qp_state);
174
175 return err;
176}
177
178void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
179 struct mlx4_en_tx_ring *ring)
180{
181 struct mlx4_en_dev *mdev = priv->mdev;
182
183 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
184 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
185}
186
187
188static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
189 struct mlx4_en_tx_ring *ring,
190 int index, u8 owner)
191{
192 struct mlx4_en_dev *mdev = priv->mdev;
193 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
194 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
195 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
196 struct sk_buff *skb = tx_info->skb;
197 struct skb_frag_struct *frag;
198 void *end = ring->buf + ring->buf_size;
199 int frags = skb_shinfo(skb)->nr_frags;
200 int i;
201 __be32 *ptr = (__be32 *)tx_desc;
202 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
203
204 /* Optimize the common case when there are no wraparounds */
205 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
206 if (tx_info->linear) {
207 pci_unmap_single(mdev->pdev,
208 (dma_addr_t) be64_to_cpu(data->addr),
209 be32_to_cpu(data->byte_count),
210 PCI_DMA_TODEVICE);
211 ++data;
212 }
213
214 for (i = 0; i < frags; i++) {
215 frag = &skb_shinfo(skb)->frags[i];
216 pci_unmap_page(mdev->pdev,
217 (dma_addr_t) be64_to_cpu(data[i].addr),
218 frag->size, PCI_DMA_TODEVICE);
219 }
220 /* Stamp the freed descriptor */
221 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
222 *ptr = stamp;
223 ptr += STAMP_DWORDS;
224 }
225
226 } else {
227 if ((void *) data >= end) {
228 data = (struct mlx4_wqe_data_seg *)
229 (ring->buf + ((void *) data - end));
230 }
231
232 if (tx_info->linear) {
233 pci_unmap_single(mdev->pdev,
234 (dma_addr_t) be64_to_cpu(data->addr),
235 be32_to_cpu(data->byte_count),
236 PCI_DMA_TODEVICE);
237 ++data;
238 }
239
240 for (i = 0; i < frags; i++) {
241 /* Check for wraparound before unmapping */
242 if ((void *) data >= end)
243 data = (struct mlx4_wqe_data_seg *) ring->buf;
244 frag = &skb_shinfo(skb)->frags[i];
245 pci_unmap_page(mdev->pdev,
246 (dma_addr_t) be64_to_cpu(data->addr),
247 frag->size, PCI_DMA_TODEVICE);
248 }
249 /* Stamp the freed descriptor */
250 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
251 *ptr = stamp;
252 ptr += STAMP_DWORDS;
253 if ((void *) ptr >= end) {
254 ptr = ring->buf;
255 stamp ^= cpu_to_be32(0x80000000);
256 }
257 }
258
259 }
260 dev_kfree_skb_any(skb);
261 return tx_info->nr_txbb;
262}
263
264
265int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
266{
267 struct mlx4_en_priv *priv = netdev_priv(dev);
268 int cnt = 0;
269
270 /* Skip last polled descriptor */
271 ring->cons += ring->last_nr_txbb;
272 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
273 ring->cons, ring->prod);
274
275 if ((u32) (ring->prod - ring->cons) > ring->size) {
276 if (netif_msg_tx_err(priv))
277 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
278 return 0;
279 }
280
281 while (ring->cons != ring->prod) {
282 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
283 ring->cons & ring->size_mask,
284 !!(ring->cons & ring->size));
285 ring->cons += ring->last_nr_txbb;
286 cnt++;
287 }
288
289 if (cnt)
290 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
291
292 return cnt;
293}
294
295void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
296{
297 int block = 8 / ring_num;
298 int extra = 8 - (block * ring_num);
299 int num = 0;
300 u16 ring = 1;
301 int prio;
302
303 if (ring_num == 1) {
304 for (prio = 0; prio < 8; prio++)
305 prio_map[prio] = 0;
306 return;
307 }
308
309 for (prio = 0; prio < 8; prio++) {
310 if (extra && (num == block + 1)) {
311 ring++;
312 num = 0;
313 extra--;
314 } else if (!extra && (num == block)) {
315 ring++;
316 num = 0;
317 }
318 prio_map[prio] = ring;
319 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
320 num++;
321 }
322}
323
324static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
325{
326 struct mlx4_en_priv *priv = netdev_priv(dev);
327 struct mlx4_cq *mcq = &cq->mcq;
328 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
329 struct mlx4_cqe *cqe = cq->buf;
330 u16 index;
331 u16 new_index;
332 u32 txbbs_skipped = 0;
333 u32 cq_last_sav;
334
335 /* index always points to the first TXBB of the last polled descriptor */
336 index = ring->cons & ring->size_mask;
337 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
338 if (index == new_index)
339 return;
340
341 if (!priv->port_up)
342 return;
343
344 /*
345 * We use a two-stage loop:
346 * - the first samples the HW-updated CQE
347 * - the second frees TXBBs until the last sample
348 * This lets us amortize CQE cache misses, while still polling the CQ
349 * until is quiescent.
350 */
351 cq_last_sav = mcq->cons_index;
352 do {
353 do {
354 /* Skip over last polled CQE */
355 index = (index + ring->last_nr_txbb) & ring->size_mask;
356 txbbs_skipped += ring->last_nr_txbb;
357
358 /* Poll next CQE */
359 ring->last_nr_txbb = mlx4_en_free_tx_desc(
360 priv, ring, index,
361 !!((ring->cons + txbbs_skipped) &
362 ring->size));
363 ++mcq->cons_index;
364
365 } while (index != new_index);
366
367 new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
368 } while (index != new_index);
369 AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
370 (u32) (mcq->cons_index - cq_last_sav));
371
372 /*
373 * To prevent CQ overflow we first update CQ consumer and only then
374 * the ring consumer.
375 */
376 mlx4_cq_set_ci(mcq);
377 wmb();
378 ring->cons += txbbs_skipped;
379
380 /* Wakeup Tx queue if this ring stopped it */
381 if (unlikely(ring->blocked)) {
382 if (((u32) (ring->prod - ring->cons) <=
383 ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
384
385 /* TODO: support multiqueue netdevs. Currently, we block
386 * when *any* ring is full. Note that:
387 * - 2 Tx rings can unblock at the same time and call
388 * netif_wake_queue(), which is OK since this
389 * operation is idempotent.
390 * - We might wake the queue just after another ring
391 * stopped it. This is no big deal because the next
392 * transmission on that ring would stop the queue.
393 */
394 ring->blocked = 0;
395 netif_wake_queue(dev);
396 priv->port_stats.wake_queue++;
397 }
398 }
399}
400
401void mlx4_en_tx_irq(struct mlx4_cq *mcq)
402{
403 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406
407 spin_lock_irq(&ring->comp_lock);
408 cq->armed = 0;
409 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked)
411 mlx4_en_arm_cq(priv, cq);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415}
416
417
418void mlx4_en_poll_tx_cq(unsigned long data)
419{
420 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
421 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
422 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
423 u32 inflight;
424
425 INC_PERF_COUNTER(priv->pstats.tx_poll);
426
427 netif_tx_lock(priv->dev);
428 spin_lock_irq(&ring->comp_lock);
429 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431
432 /* If there are still packets in flight and the timer has not already
433 * been scheduled by the Tx routine then schedule it here to guarantee
434 * completion processing of these packets */
435 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437
438 spin_unlock_irq(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440}
441
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
443 struct mlx4_en_tx_ring *ring,
444 u32 index,
445 unsigned int desc_size)
446{
447 u32 copy = (ring->size - index) * TXBB_SIZE;
448 int i;
449
450 for (i = desc_size - copy - 4; i >= 0; i -= 4) {
451 if ((i & (TXBB_SIZE - 1)) == 0)
452 wmb();
453
454 *((u32 *) (ring->buf + i)) =
455 *((u32 *) (ring->bounce_buf + copy + i));
456 }
457
458 for (i = copy - 4; i >= 4 ; i -= 4) {
459 if ((i & (TXBB_SIZE - 1)) == 0)
460 wmb();
461
462 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
463 *((u32 *) (ring->bounce_buf + i));
464 }
465
466 /* Return real descriptor location */
467 return ring->buf + index * TXBB_SIZE;
468}
469
470static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
471{
472 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
473 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
474
475 /* If we don't have a pending timer, set one up to catch our recent
476 post in case the interface becomes idle */
477 if (!timer_pending(&cq->timer))
478 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
479
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq);
483}
484
485static void *get_frag_ptr(struct sk_buff *skb)
486{
487 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
488 struct page *page = frag->page;
489 void *ptr;
490
491 ptr = page_address(page);
492 if (unlikely(!ptr))
493 return NULL;
494
495 return ptr + frag->page_offset;
496}
497
498static int is_inline(struct sk_buff *skb, void **pfrag)
499{
500 void *ptr;
501
502 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
503 if (skb_shinfo(skb)->nr_frags == 1) {
504 ptr = get_frag_ptr(skb);
505 if (unlikely(!ptr))
506 return 0;
507
508 if (pfrag)
509 *pfrag = ptr;
510
511 return 1;
512 } else if (unlikely(skb_shinfo(skb)->nr_frags))
513 return 0;
514 else
515 return 1;
516 }
517
518 return 0;
519}
520
521static int inline_size(struct sk_buff *skb)
522{
523 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
524 <= MLX4_INLINE_ALIGN)
525 return ALIGN(skb->len + CTRL_SIZE +
526 sizeof(struct mlx4_wqe_inline_seg), 16);
527 else
528 return ALIGN(skb->len + CTRL_SIZE + 2 *
529 sizeof(struct mlx4_wqe_inline_seg), 16);
530}
531
532static int get_real_size(struct sk_buff *skb, struct net_device *dev,
533 int *lso_header_size)
534{
535 struct mlx4_en_priv *priv = netdev_priv(dev);
536 struct mlx4_en_dev *mdev = priv->mdev;
537 int real_size;
538
539 if (skb_is_gso(skb)) {
540 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
541 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
542 ALIGN(*lso_header_size + 4, DS_SIZE);
543 if (unlikely(*lso_header_size != skb_headlen(skb))) {
544 /* We add a segment for the skb linear buffer only if
545 * it contains data */
546 if (*lso_header_size < skb_headlen(skb))
547 real_size += DS_SIZE;
548 else {
549 if (netif_msg_tx_err(priv))
550 mlx4_warn(mdev, "Non-linear headers\n");
551 dev_kfree_skb_any(skb);
552 return 0;
553 }
554 }
555 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
556 if (netif_msg_tx_err(priv))
557 mlx4_warn(mdev, "LSO header size too big\n");
558 dev_kfree_skb_any(skb);
559 return 0;
560 }
561 } else {
562 *lso_header_size = 0;
563 if (!is_inline(skb, NULL))
564 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
565 else
566 real_size = inline_size(skb);
567 }
568
569 return real_size;
570}
571
572static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
573 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
574{
575 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
576 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
577
578 if (skb->len <= spc) {
579 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
580 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
581 if (skb_shinfo(skb)->nr_frags)
582 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
583 skb_shinfo(skb)->frags[0].size);
584
585 } else {
586 inl->byte_count = cpu_to_be32(1 << 31 | spc);
587 if (skb_headlen(skb) <= spc) {
588 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
589 if (skb_headlen(skb) < spc) {
590 memcpy(((void *)(inl + 1)) + skb_headlen(skb),
591 fragptr, spc - skb_headlen(skb));
592 fragptr += spc - skb_headlen(skb);
593 }
594 inl = (void *) (inl + 1) + spc;
595 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
596 } else {
597 skb_copy_from_linear_data(skb, inl + 1, spc);
598 inl = (void *) (inl + 1) + spc;
599 skb_copy_from_linear_data_offset(skb, spc, inl + 1,
600 skb_headlen(skb) - spc);
601 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
603 fragptr, skb_shinfo(skb)->frags[0].size);
604 }
605
606 wmb();
607 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
608 }
609 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
610 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
611 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
612}
613
614static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
615 u16 *vlan_tag)
616{
617 int tx_ind;
618
619 /* Obtain VLAN information if present */
620 if (priv->vlgrp && vlan_tx_tag_present(skb)) {
621 *vlan_tag = vlan_tx_tag_get(skb);
622 /* Set the Tx ring to use according to vlan priority */
623 tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
624 } else {
625 *vlan_tag = 0;
626 tx_ind = 0;
627 }
628 return tx_ind;
629}
630
631int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
632{
633 struct mlx4_en_priv *priv = netdev_priv(dev);
634 struct mlx4_en_dev *mdev = priv->mdev;
635 struct mlx4_en_tx_ring *ring;
636 struct mlx4_en_cq *cq;
637 struct mlx4_en_tx_desc *tx_desc;
638 struct mlx4_wqe_data_seg *data;
639 struct skb_frag_struct *frag;
640 struct mlx4_en_tx_info *tx_info;
641 int tx_ind = 0;
642 int nr_txbb;
643 int desc_size;
644 int real_size;
645 dma_addr_t dma;
646 u32 index;
647 __be32 op_own;
648 u16 vlan_tag;
649 int i;
650 int lso_header_size;
651 void *fragptr;
652
653 if (unlikely(!skb->len)) {
654 dev_kfree_skb_any(skb);
655 return NETDEV_TX_OK;
656 }
657 real_size = get_real_size(skb, dev, &lso_header_size);
658 if (unlikely(!real_size))
659 return NETDEV_TX_OK;
660
661 /* Allign descriptor to TXBB size */
662 desc_size = ALIGN(real_size, TXBB_SIZE);
663 nr_txbb = desc_size / TXBB_SIZE;
664 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
665 if (netif_msg_tx_err(priv))
666 mlx4_warn(mdev, "Oversized header or SG list\n");
667 dev_kfree_skb_any(skb);
668 return NETDEV_TX_OK;
669 }
670
671 tx_ind = get_vlan_info(priv, skb, &vlan_tag);
672 ring = &priv->tx_ring[tx_ind];
673
674 /* Check available TXBBs And 2K spare for prefetch */
675 if (unlikely(((int)(ring->prod - ring->cons)) >
676 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
677 /* every full Tx ring stops queue.
678 * TODO: implement multi-queue support (per-queue stop) */
679 netif_stop_queue(dev);
680 ring->blocked = 1;
681 priv->port_stats.queue_stopped++;
682
683 /* Use interrupts to find out when queue opened */
684 cq = &priv->tx_cq[tx_ind];
685 mlx4_en_arm_cq(priv, cq);
686 return NETDEV_TX_BUSY;
687 }
688
689 /* Now that we know what Tx ring to use */
690 if (unlikely(!priv->port_up)) {
691 if (netif_msg_tx_err(priv))
692 mlx4_warn(mdev, "xmit: port down!\n");
693 dev_kfree_skb_any(skb);
694 return NETDEV_TX_OK;
695 }
696
697 /* Track current inflight packets for performance analysis */
698 AVG_PERF_COUNTER(priv->pstats.inflight_avg,
699 (u32) (ring->prod - ring->cons - 1));
700
701 /* Packet is good - grab an index and transmit it */
702 index = ring->prod & ring->size_mask;
703
704 /* See if we have enough space for whole descriptor TXBB for setting
705 * SW ownership on next descriptor; if not, use a bounce buffer. */
706 if (likely(index + nr_txbb <= ring->size))
707 tx_desc = ring->buf + index * TXBB_SIZE;
708 else
709 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
710
711 /* Save skb in tx_info ring */
712 tx_info = &ring->tx_info[index];
713 tx_info->skb = skb;
714 tx_info->nr_txbb = nr_txbb;
715
716 /* Prepare ctrl segement apart opcode+ownership, which depends on
717 * whether LSO is used */
718 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
719 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
720 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
721 tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
722 MLX4_WQE_CTRL_SOLICITED);
723 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
724 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
725 MLX4_WQE_CTRL_TCP_UDP_CSUM);
726 priv->port_stats.tx_chksum_offload++;
727 }
728
729 /* Handle LSO (TSO) packets */
730 if (lso_header_size) {
731 /* Mark opcode as LSO */
732 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
733 ((ring->prod & ring->size) ?
734 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
735
736 /* Fill in the LSO prefix */
737 tx_desc->lso.mss_hdr_size = cpu_to_be32(
738 skb_shinfo(skb)->gso_size << 16 | lso_header_size);
739
740 /* Copy headers;
741 * note that we already verified that it is linear */
742 memcpy(tx_desc->lso.header, skb->data, lso_header_size);
743 data = ((void *) &tx_desc->lso +
744 ALIGN(lso_header_size + 4, DS_SIZE));
745
746 priv->port_stats.tso_packets++;
747 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
748 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
749 ring->bytes += skb->len + (i - 1) * lso_header_size;
750 ring->packets += i;
751 } else {
752 /* Normal (Non LSO) packet */
753 op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
754 ((ring->prod & ring->size) ?
755 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
756 data = &tx_desc->data;
757 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
758 ring->packets++;
759
760 }
761 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
762
763
764 /* valid only for none inline segments */
765 tx_info->data_offset = (void *) data - (void *) tx_desc;
766
767 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
768 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
769
770 if (!is_inline(skb, &fragptr)) {
771 /* Map fragments */
772 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
773 frag = &skb_shinfo(skb)->frags[i];
774 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
775 frag->size, PCI_DMA_TODEVICE);
776 data->addr = cpu_to_be64(dma);
777 data->lkey = cpu_to_be32(mdev->mr.key);
778 wmb();
779 data->byte_count = cpu_to_be32(frag->size);
780 --data;
781 }
782
783 /* Map linear part */
784 if (tx_info->linear) {
785 dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
786 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
787 data->addr = cpu_to_be64(dma);
788 data->lkey = cpu_to_be32(mdev->mr.key);
789 wmb();
790 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
791 }
792 } else
793 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
794
795 ring->prod += nr_txbb;
796
797 /* If we used a bounce buffer then copy descriptor back into place */
798 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
799 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
800
801 /* Run destructor before passing skb to HW */
802 if (likely(!skb_shared(skb)))
803 skb_orphan(skb);
804
805 /* Ensure new descirptor hits memory
806 * before setting ownership of this descriptor to HW */
807 wmb();
808 tx_desc->ctrl.owner_opcode = op_own;
809
810 /* Ring doorbell! */
811 wmb();
812 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
813 dev->trans_start = jiffies;
814
815 /* Poll CQ here */
816 mlx4_en_xmit_poll(priv, tx_ind);
817
818 return 0;
819}
820
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..11fb17c6e97b
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_H_
35#define _MLX4_EN_H_
36
37#include <linux/compiler.h>
38#include <linux/list.h>
39#include <linux/mutex.h>
40#include <linux/netdevice.h>
41#include <linux/inet_lro.h>
42
43#include <linux/mlx4/device.h>
44#include <linux/mlx4/qp.h>
45#include <linux/mlx4/cq.h>
46#include <linux/mlx4/srq.h>
47#include <linux/mlx4/doorbell.h>
48
49#include "en_port.h"
50
51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0"
53#define DRV_RELDATE "Sep 2008"
54
55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57
58#define mlx4_dbg(mlevel, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
61 (&priv->mdev->pdev->dev)->bus_id , ## arg)
62
63#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (&mdev->pdev->dev)->bus_id , ## arg)
66#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (&mdev->pdev->dev)->bus_id , ## arg)
69#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (&mdev->pdev->dev)->bus_id , ## arg)
72
73/*
74 * Device constants
75 */
76
77
78#define MLX4_EN_PAGE_SHIFT 12
79#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
80#define MAX_TX_RINGS 16
81#define MAX_RX_RINGS 16
82#define MAX_RSS_MAP_SIZE 64
83#define RSS_FACTOR 2
84#define TXBB_SIZE 64
85#define HEADROOM (2048 / TXBB_SIZE + 1)
86#define MAX_LSO_HDR_SIZE 92
87#define STAMP_STRIDE 64
88#define STAMP_DWORDS (STAMP_STRIDE / 4)
89#define STAMP_SHIFT 31
90#define STAMP_VAL 0x7fffffff
91#define STATS_DELAY (HZ / 4)
92
93/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
94#define MAX_DESC_SIZE 512
95#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
96
97/*
98 * OS related constants and tunables
99 */
100
101#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
102
103#define MLX4_EN_ALLOC_ORDER 2
104#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
105
106#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
107
108/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
109 * and 4K allocations) */
110enum {
111 FRAG_SZ0 = 512 - NET_IP_ALIGN,
112 FRAG_SZ1 = 1024,
113 FRAG_SZ2 = 4096,
114 FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
115};
116#define MLX4_EN_MAX_RX_FRAGS 4
117
118/* Minimum ring size for our page-allocation sceme to work */
119#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
120#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
121
122#define MLX4_EN_TX_RING_NUM 9
123#define MLX4_EN_DEF_TX_RING_SIZE 1024
124#define MLX4_EN_DEF_RX_RING_SIZE 1024
125
126/* Target number of bytes to coalesce with interrupt moderation */
127#define MLX4_EN_RX_COAL_TARGET 0x20000
128#define MLX4_EN_RX_COAL_TIME 0x10
129
130#define MLX4_EN_TX_COAL_PKTS 5
131#define MLX4_EN_TX_COAL_TIME 0x80
132
133#define MLX4_EN_RX_RATE_LOW 400000
134#define MLX4_EN_RX_COAL_TIME_LOW 0
135#define MLX4_EN_RX_RATE_HIGH 450000
136#define MLX4_EN_RX_COAL_TIME_HIGH 128
137#define MLX4_EN_RX_SIZE_THRESH 1024
138#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
139#define MLX4_EN_SAMPLE_INTERVAL 0
140
141#define MLX4_EN_AUTO_CONF 0xffff
142
143#define MLX4_EN_DEF_RX_PAUSE 1
144#define MLX4_EN_DEF_TX_PAUSE 1
145
146/* Interval between sucessive polls in the Tx routine when polling is used
147 instead of interrupts (in per-core Tx rings) - should be power of 2 */
148#define MLX4_EN_TX_POLL_MODER 16
149#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
150
151#define ETH_LLC_SNAP_SIZE 8
152
153#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
154#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
155
156#define MLX4_EN_MIN_MTU 46
157#define ETH_BCAST 0xffffffffffffULL
158
159#ifdef MLX4_EN_PERF_STAT
160/* Number of samples to 'average' */
161#define AVG_SIZE 128
162#define AVG_FACTOR 1024
163#define NUM_PERF_STATS NUM_PERF_COUNTERS
164
165#define INC_PERF_COUNTER(cnt) (++(cnt))
166#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
167#define AVG_PERF_COUNTER(cnt, sample) \
168 ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
169#define GET_PERF_COUNTER(cnt) (cnt)
170#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
171
172#else
173
174#define NUM_PERF_STATS 0
175#define INC_PERF_COUNTER(cnt) do {} while (0)
176#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
177#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
178#define GET_PERF_COUNTER(cnt) (0)
179#define GET_AVG_PERF_COUNTER(cnt) (0)
180#endif /* MLX4_EN_PERF_STAT */
181
182/*
183 * Configurables
184 */
185
186enum cq_type {
187 RX = 0,
188 TX = 1,
189};
190
191
192/*
193 * Useful macros
194 */
195#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
196#define XNOR(x, y) (!(x) == !(y))
197#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
198
199
200struct mlx4_en_tx_info {
201 struct sk_buff *skb;
202 u32 nr_txbb;
203 u8 linear;
204 u8 data_offset;
205};
206
207
208#define MLX4_EN_BIT_DESC_OWN 0x80000000
209#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
210#define MLX4_EN_MEMTYPE_PAD 0x100
211#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
212
213
214struct mlx4_en_tx_desc {
215 struct mlx4_wqe_ctrl_seg ctrl;
216 union {
217 struct mlx4_wqe_data_seg data; /* at least one data segment */
218 struct mlx4_wqe_lso_seg lso;
219 struct mlx4_wqe_inline_seg inl;
220 };
221};
222
223#define MLX4_EN_USE_SRQ 0x01000000
224
225struct mlx4_en_rx_alloc {
226 struct page *page;
227 u16 offset;
228};
229
230struct mlx4_en_tx_ring {
231 struct mlx4_hwq_resources wqres;
232 u32 size ; /* number of TXBBs */
233 u32 size_mask;
234 u16 stride;
235 u16 cqn; /* index of port CQ associated with this ring */
236 u32 prod;
237 u32 cons;
238 u32 buf_size;
239 u32 doorbell_qpn;
240 void *buf;
241 u16 poll_cnt;
242 int blocked;
243 struct mlx4_en_tx_info *tx_info;
244 u8 *bounce_buf;
245 u32 last_nr_txbb;
246 struct mlx4_qp qp;
247 struct mlx4_qp_context context;
248 int qpn;
249 enum mlx4_qp_state qp_state;
250 struct mlx4_srq dummy;
251 unsigned long bytes;
252 unsigned long packets;
253 spinlock_t comp_lock;
254};
255
256struct mlx4_en_rx_desc {
257 struct mlx4_wqe_srq_next_seg next;
258 /* actual number of entries depends on rx ring stride */
259 struct mlx4_wqe_data_seg data[0];
260};
261
262struct mlx4_en_rx_ring {
263 struct mlx4_srq srq;
264 struct mlx4_hwq_resources wqres;
265 struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
266 struct net_lro_mgr lro;
267 u32 size ; /* number of Rx descs*/
268 u32 actual_size;
269 u32 size_mask;
270 u16 stride;
271 u16 log_stride;
272 u16 cqn; /* index of port CQ associated with this ring */
273 u32 prod;
274 u32 cons;
275 u32 buf_size;
276 int need_refill;
277 int full;
278 void *buf;
279 void *rx_info;
280 unsigned long bytes;
281 unsigned long packets;
282};
283
284
285static inline int mlx4_en_can_lro(__be16 status)
286{
287 return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
288 MLX4_CQE_STATUS_IPV4F |
289 MLX4_CQE_STATUS_IPV6 |
290 MLX4_CQE_STATUS_IPV4OPT |
291 MLX4_CQE_STATUS_TCP |
292 MLX4_CQE_STATUS_UDP |
293 MLX4_CQE_STATUS_IPOK)) ==
294 cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
295 MLX4_CQE_STATUS_IPOK |
296 MLX4_CQE_STATUS_TCP);
297}
298
299struct mlx4_en_cq {
300 struct mlx4_cq mcq;
301 struct mlx4_hwq_resources wqres;
302 int ring;
303 spinlock_t lock;
304 struct net_device *dev;
305 struct napi_struct napi;
306 /* Per-core Tx cq processing support */
307 struct timer_list timer;
308 int size;
309 int buf_size;
310 unsigned vector;
311 enum cq_type is_tx;
312 u16 moder_time;
313 u16 moder_cnt;
314 int armed;
315 struct mlx4_cqe *buf;
316#define MLX4_EN_OPCODE_ERROR 0x1e
317};
318
319struct mlx4_en_port_profile {
320 u32 flags;
321 u32 tx_ring_num;
322 u32 rx_ring_num;
323 u32 tx_ring_size;
324 u32 rx_ring_size;
325};
326
327struct mlx4_en_profile {
328 int rss_xor;
329 int num_lro;
330 u8 rss_mask;
331 u32 active_ports;
332 u32 small_pkt_int;
333 int rx_moder_cnt;
334 int rx_moder_time;
335 int auto_moder;
336 u8 rx_pause;
337 u8 rx_ppp;
338 u8 tx_pause;
339 u8 tx_ppp;
340 u8 no_reset;
341 struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
342};
343
344struct mlx4_en_dev {
345 struct mlx4_dev *dev;
346 struct pci_dev *pdev;
347 struct mutex state_lock;
348 struct net_device *pndev[MLX4_MAX_PORTS + 1];
349 u32 port_cnt;
350 bool device_up;
351 struct mlx4_en_profile profile;
352 u32 LSO_support;
353 struct workqueue_struct *workqueue;
354 struct device *dma_device;
355 void __iomem *uar_map;
356 struct mlx4_uar priv_uar;
357 struct mlx4_mr mr;
358 u32 priv_pdn;
359 spinlock_t uar_lock;
360};
361
362
363struct mlx4_en_rss_map {
364 int size;
365 int base_qpn;
366 u16 map[MAX_RSS_MAP_SIZE];
367 struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
368 enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
369 struct mlx4_qp indir_qp;
370 enum mlx4_qp_state indir_state;
371};
372
373struct mlx4_en_rss_context {
374 __be32 base_qpn;
375 __be32 default_qpn;
376 u16 reserved;
377 u8 hash_fn;
378 u8 flags;
379 __be32 rss_key[10];
380};
381
382struct mlx4_en_pkt_stats {
383 unsigned long broadcast;
384 unsigned long rx_prio[8];
385 unsigned long tx_prio[8];
386#define NUM_PKT_STATS 17
387};
388
389struct mlx4_en_port_stats {
390 unsigned long lro_aggregated;
391 unsigned long lro_flushed;
392 unsigned long lro_no_desc;
393 unsigned long tso_packets;
394 unsigned long queue_stopped;
395 unsigned long wake_queue;
396 unsigned long tx_timeout;
397 unsigned long rx_alloc_failed;
398 unsigned long rx_chksum_good;
399 unsigned long rx_chksum_none;
400 unsigned long tx_chksum_offload;
401#define NUM_PORT_STATS 11
402};
403
404struct mlx4_en_perf_stats {
405 u32 tx_poll;
406 u64 tx_pktsz_avg;
407 u32 inflight_avg;
408 u16 tx_coal_avg;
409 u16 rx_coal_avg;
410 u32 napi_quota;
411#define NUM_PERF_COUNTERS 6
412};
413
414struct mlx4_en_frag_info {
415 u16 frag_size;
416 u16 frag_prefix_size;
417 u16 frag_stride;
418 u16 frag_align;
419 u16 last_offset;
420
421};
422
423struct mlx4_en_priv {
424 struct mlx4_en_dev *mdev;
425 struct mlx4_en_port_profile *prof;
426 struct net_device *dev;
427 struct vlan_group *vlgrp;
428 struct net_device_stats stats;
429 struct net_device_stats ret_stats;
430 spinlock_t stats_lock;
431
432 unsigned long last_moder_packets;
433 unsigned long last_moder_tx_packets;
434 unsigned long last_moder_bytes;
435 unsigned long last_moder_jiffies;
436 int last_moder_time;
437 u16 rx_usecs;
438 u16 rx_frames;
439 u16 tx_usecs;
440 u16 tx_frames;
441 u32 pkt_rate_low;
442 u16 rx_usecs_low;
443 u32 pkt_rate_high;
444 u16 rx_usecs_high;
445 u16 sample_interval;
446 u16 adaptive_rx_coal;
447 u32 msg_enable;
448
449 struct mlx4_hwq_resources res;
450 int link_state;
451 int last_link_state;
452 bool port_up;
453 int port;
454 int registered;
455 int allocated;
456 int stride;
457 int rx_csum;
458 u64 mac;
459 int mac_index;
460 unsigned max_mtu;
461 int base_qpn;
462
463 struct mlx4_en_rss_map rss_map;
464 u16 tx_prio_map[8];
465 u32 flags;
466#define MLX4_EN_FLAG_PROMISC 0x1
467 u32 tx_ring_num;
468 u32 rx_ring_num;
469 u32 rx_skb_size;
470 struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
471 u16 num_frags;
472 u16 log_rx_info;
473
474 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
475 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
476 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
477 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
478 struct work_struct mcast_task;
479 struct work_struct mac_task;
480 struct delayed_work refill_task;
481 struct work_struct watchdog_task;
482 struct work_struct linkstate_task;
483 struct delayed_work stats_task;
484 struct mlx4_en_perf_stats pstats;
485 struct mlx4_en_pkt_stats pkstats;
486 struct mlx4_en_port_stats port_stats;
487 struct dev_mc_list *mc_list;
488 struct mlx4_en_stat_out_mbox hw_stats;
489};
490
491
492void mlx4_en_destroy_netdev(struct net_device *dev);
493int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
494 struct mlx4_en_port_profile *prof);
495
496int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
497
498int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
499 int entries, int ring, enum cq_type mode);
500void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
501int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
502void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
503int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
504int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
505
506void mlx4_en_poll_tx_cq(unsigned long data);
507void mlx4_en_tx_irq(struct mlx4_cq *mcq);
508int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
509
510int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
511 u32 size, u16 stride);
512void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
513int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
514 struct mlx4_en_tx_ring *ring,
515 int cq, int srqn);
516void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
517 struct mlx4_en_tx_ring *ring);
518
519int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
520 struct mlx4_en_rx_ring *ring,
521 u32 size, u16 stride);
522void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
523 struct mlx4_en_rx_ring *ring);
524int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
525void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
526 struct mlx4_en_rx_ring *ring);
527int mlx4_en_process_rx_cq(struct net_device *dev,
528 struct mlx4_en_cq *cq,
529 int budget);
530int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
531void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
532 int is_tx, int rss, int qpn, int cqn, int srqn,
533 struct mlx4_qp_context *context);
534int mlx4_en_map_buffer(struct mlx4_buf *buf);
535void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
536
537void mlx4_en_calc_rx_buf(struct net_device *dev);
538void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
539 struct mlx4_en_rss_map *rss_map,
540 int num_entries, int num_rings);
541void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
542int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
543void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
544int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
545void mlx4_en_rx_refill(struct work_struct *work);
546void mlx4_en_rx_irq(struct mlx4_cq *mcq);
547
548int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
549int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
550int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
551 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
552int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
553 u8 promisc);
554
555int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
556
557/*
558 * Globals
559 */
560extern const struct ethtool_ops mlx4_en_ethtool_ops;
561#endif