aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/Makefile2
-rw-r--r--drivers/net/mlx4/en_ethtool.c (renamed from drivers/net/mlx4/en_params.c)67
-rw-r--r--drivers/net/mlx4/en_main.c68
-rw-r--r--drivers/net/mlx4/en_netdev.c175
-rw-r--r--drivers/net/mlx4/en_rx.c78
-rw-r--r--drivers/net/mlx4/en_tx.c120
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h49
-rw-r--r--drivers/net/mlx4/mr.c7
9 files changed, 256 insertions, 314 deletions
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 21040a0d81fe..1fd068e1d930 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -5,5 +5,5 @@ mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
5 5
6obj-$(CONFIG_MLX4_EN) += mlx4_en.o 6obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o 9 en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_ethtool.c
index c1bd040b9e05..091f99052c91 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -38,64 +38,6 @@
38#include "mlx4_en.h" 38#include "mlx4_en.h"
39#include "en_port.h" 39#include "en_port.h"
40 40
41#define MLX4_EN_PARM_INT(X, def_val, desc) \
42 static unsigned int X = def_val;\
43 module_param(X , uint, 0444); \
44 MODULE_PARM_DESC(X, desc);
45
46
47/*
48 * Device scope module parameters
49 */
50
51
52/* Use a XOR rathern than Toeplitz hash function for RSS */
53MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
54
55/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
56MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
57
58/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
59MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
60 "Number of LRO sessions per ring or disabled (0)");
61
62/* Priority pausing */
63MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
64 " Per priority bit mask");
65MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
66 " Per priority bit mask");
67
68int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
69{
70 struct mlx4_en_profile *params = &mdev->profile;
71 int i;
72
73 params->rss_xor = (rss_xor != 0);
74 params->rss_mask = rss_mask & 0x1f;
75 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
76 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
77 params->prof[i].rx_pause = 1;
78 params->prof[i].rx_ppp = pfcrx;
79 params->prof[i].tx_pause = 1;
80 params->prof[i].tx_ppp = pfctx;
81 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
82 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
83 }
84 if (pfcrx || pfctx) {
85 params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
86 params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
87 } else {
88 params->prof[1].tx_ring_num = 1;
89 params->prof[2].tx_ring_num = 1;
90 }
91
92 return 0;
93}
94
95
96/*
97 * Ethtool support
98 */
99 41
100static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv) 42static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
101{ 43{
@@ -326,8 +268,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
326 268
327 priv->rx_frames = (coal->rx_max_coalesced_frames == 269 priv->rx_frames = (coal->rx_max_coalesced_frames ==
328 MLX4_EN_AUTO_CONF) ? 270 MLX4_EN_AUTO_CONF) ?
329 MLX4_EN_RX_COAL_TARGET / 271 MLX4_EN_RX_COAL_TARGET :
330 priv->dev->mtu + 1 :
331 coal->rx_max_coalesced_frames; 272 coal->rx_max_coalesced_frames;
332 priv->rx_usecs = (coal->rx_coalesce_usecs == 273 priv->rx_usecs = (coal->rx_coalesce_usecs ==
333 MLX4_EN_AUTO_CONF) ? 274 MLX4_EN_AUTO_CONF) ?
@@ -371,7 +312,7 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
371 priv->prof->rx_pause, 312 priv->prof->rx_pause,
372 priv->prof->rx_ppp); 313 priv->prof->rx_ppp);
373 if (err) 314 if (err)
374 mlx4_err(mdev, "Failed setting pause params to\n"); 315 en_err(priv, "Failed setting pause params\n");
375 316
376 return err; 317 return err;
377} 318}
@@ -421,13 +362,13 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
421 362
422 err = mlx4_en_alloc_resources(priv); 363 err = mlx4_en_alloc_resources(priv);
423 if (err) { 364 if (err) {
424 mlx4_err(mdev, "Failed reallocating port resources\n"); 365 en_err(priv, "Failed reallocating port resources\n");
425 goto out; 366 goto out;
426 } 367 }
427 if (port_up) { 368 if (port_up) {
428 err = mlx4_en_start_port(dev); 369 err = mlx4_en_start_port(dev);
429 if (err) 370 if (err)
430 mlx4_err(mdev, "Failed starting port\n"); 371 en_err(priv, "Failed starting port\n");
431 } 372 }
432 373
433out: 374out:
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 510633fd57f6..9ed4a158f895 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -51,6 +51,55 @@ static const char mlx4_en_version[] =
51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v" 51 DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
52 DRV_VERSION " (" DRV_RELDATE ")\n"; 52 DRV_VERSION " (" DRV_RELDATE ")\n";
53 53
54#define MLX4_EN_PARM_INT(X, def_val, desc) \
55 static unsigned int X = def_val;\
56 module_param(X , uint, 0444); \
57 MODULE_PARM_DESC(X, desc);
58
59
60/*
61 * Device scope module parameters
62 */
63
64
65/* Use a XOR rathern than Toeplitz hash function for RSS */
66MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
67
68/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
69MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
70
71/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
72MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
73 "Number of LRO sessions per ring or disabled (0)");
74
75/* Priority pausing */
76MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
77 " Per priority bit mask");
78MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
79 " Per priority bit mask");
80
81static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
82{
83 struct mlx4_en_profile *params = &mdev->profile;
84 int i;
85
86 params->rss_xor = (rss_xor != 0);
87 params->rss_mask = rss_mask & 0x1f;
88 params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
89 for (i = 1; i <= MLX4_MAX_PORTS; i++) {
90 params->prof[i].rx_pause = 1;
91 params->prof[i].rx_ppp = pfcrx;
92 params->prof[i].tx_pause = 1;
93 params->prof[i].tx_ppp = pfctx;
94 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
95 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
96 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
97 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
98 }
99
100 return 0;
101}
102
54static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, 103static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
55 enum mlx4_dev_event event, int port) 104 enum mlx4_dev_event event, int port)
56{ 105{
@@ -194,28 +243,11 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
194 /* Create a netdev for each port */ 243 /* Create a netdev for each port */
195 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
196 mlx4_info(mdev, "Activating port:%d\n", i); 245 mlx4_info(mdev, "Activating port:%d\n", i);
197 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) { 246 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
198 mdev->pndev[i] = NULL; 247 mdev->pndev[i] = NULL;
199 goto err_free_netdev;
200 }
201 } 248 }
202 return mdev; 249 return mdev;
203 250
204
205err_free_netdev:
206 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
207 if (mdev->pndev[i])
208 mlx4_en_destroy_netdev(mdev->pndev[i]);
209 }
210
211 mutex_lock(&mdev->state_lock);
212 mdev->device_up = false;
213 mutex_unlock(&mdev->state_lock);
214 flush_workqueue(mdev->workqueue);
215
216 /* Stop event queue before we drop down to release shared SW state */
217 destroy_workqueue(mdev->workqueue);
218
219err_mr: 251err_mr:
220 mlx4_mr_free(dev, &mdev->mr); 252 mlx4_mr_free(dev, &mdev->mr);
221err_uar: 253err_uar:
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 0cd185a2e089..0a7e78ade63f 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *
51 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err; 52 int err;
53 53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); 54 en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp; 55 priv->vlgrp = grp;
56 56
57 mutex_lock(&mdev->state_lock); 57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) { 58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); 59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err) 60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 61 en_err(priv, "Failed configuring VLAN filter\n");
62 } 62 }
63 mutex_unlock(&mdev->state_lock); 63 mutex_unlock(&mdev->state_lock);
64} 64}
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
72 if (!priv->vlgrp) 72 if (!priv->vlgrp)
73 return; 73 return;
74 74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", 75 en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid)); 76 vid, vlan_group_get_device(priv->vlgrp, vid));
77 77
78 /* Add VID to port VLAN filter */ 78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock); 79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) { 80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err) 82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 83 en_err(priv, "Failed configuring VLAN filter\n");
84 } 84 }
85 mutex_unlock(&mdev->state_lock); 85 mutex_unlock(&mdev->state_lock);
86} 86}
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
94 if (!priv->vlgrp) 94 if (!priv->vlgrp)
95 return; 95 return;
96 96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " 97 en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
98 "entry:%p)\n", vid, priv->vlgrp, 98 vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL); 99 vlan_group_set_device(priv->vlgrp, vid, NULL);
101 100
102 /* Remove VID from port VLAN filter */ 101 /* Remove VID from port VLAN filter */
@@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
104 if (mdev->device_up && priv->port_up) { 103 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 104 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 105 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 106 en_err(priv, "Failed configuring VLAN filter\n");
108 } 107 }
109 mutex_unlock(&mdev->state_lock); 108 mutex_unlock(&mdev->state_lock);
110} 109}
@@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
150 err = mlx4_register_mac(mdev->dev, priv->port, 149 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index); 150 priv->mac, &priv->mac_index);
152 if (err) 151 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n"); 152 en_err(priv, "Failed changing HW MAC address\n");
154 } else 153 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n"); 154 en_dbg(HW, priv, "Port is down while "
155 "registering mac, exiting...\n");
156 156
157 mutex_unlock(&mdev->state_lock); 157 mutex_unlock(&mdev->state_lock);
158} 158}
@@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev)
174static void mlx4_en_cache_mclist(struct net_device *dev) 174static void mlx4_en_cache_mclist(struct net_device *dev)
175{ 175{
176 struct mlx4_en_priv *priv = netdev_priv(dev); 176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist; 177 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp; 178 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL; 179 struct dev_mc_list *plist = NULL;
@@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 181 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 182 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) { 183 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n"); 184 en_err(priv, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev); 185 mlx4_en_clear_list(dev);
187 return; 186 return;
188 } 187 }
@@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
219 218
220 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) { 220 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring " 221 en_dbg(HW, priv, "Card is not up, "
223 "multicast change.\n"); 222 "ignoring multicast change.\n");
224 goto out; 223 goto out;
225 } 224 }
226 if (!priv->port_up) { 225 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring " 226 en_dbg(HW, priv, "Port is down, "
228 "multicast change.\n"); 227 "ignoring multicast change.\n");
229 goto out; 228 goto out;
230 } 229 }
231 230
@@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
236 if (dev->flags & IFF_PROMISC) { 235 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 236 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv)) 237 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", 238 en_warn(priv, "Entering promiscuous mode\n");
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
242 240
243 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1); 243 priv->base_qpn, 1);
246 if (err) 244 if (err)
247 mlx4_err(mdev, "Failed enabling " 245 en_err(priv, "Failed enabling "
248 "promiscous mode\n"); 246 "promiscous mode\n");
249 247
250 /* Disable port multicast filter (unconditionally) */ 248 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 249 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE); 250 0, MLX4_MCAST_DISABLE);
253 if (err) 251 if (err)
254 mlx4_err(mdev, "Failed disabling " 252 en_err(priv, "Failed disabling "
255 "multicast filter\n"); 253 "multicast filter\n");
256 254
257 /* Disable port VLAN filter */ 255 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err) 257 if (err)
260 mlx4_err(mdev, "Failed disabling " 258 en_err(priv, "Failed disabling VLAN filter\n");
261 "VLAN filter\n");
262 } 259 }
263 goto out; 260 goto out;
264 } 261 }
@@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
269 266
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 267 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv)) 268 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", 269 en_warn(priv, "Leaving promiscuous mode\n");
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 270 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275 271
276 /* Disable promiscouos mode */ 272 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0); 274 priv->base_qpn, 0);
279 if (err) 275 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n"); 276 en_err(priv, "Failed disabling promiscous mode\n");
281 277
282 /* Enable port VLAN filter */ 278 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err) 280 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n"); 281 en_err(priv, "Failed enabling VLAN filter\n");
286 } 282 }
287 283
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 284 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
@@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 286 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE); 287 0, MLX4_MCAST_DISABLE);
292 if (err) 288 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n"); 289 en_err(priv, "Failed disabling multicast filter\n");
294 } else { 290 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 291 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE); 292 0, MLX4_MCAST_DISABLE);
297 if (err) 293 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n"); 294 en_err(priv, "Failed disabling multicast filter\n");
299 295
300 /* Flush mcast filter and init it with broadcast address */ 296 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 297 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
@@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 310 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE); 311 0, MLX4_MCAST_ENABLE);
316 if (err) 312 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n"); 313 en_err(priv, "Failed enabling multicast filter\n");
318 314
319 mlx4_en_clear_list(dev); 315 mlx4_en_clear_list(dev);
320 } 316 }
@@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
346 struct mlx4_en_dev *mdev = priv->mdev; 342 struct mlx4_en_dev *mdev = priv->mdev;
347 343
348 if (netif_msg_timer(priv)) 344 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); 345 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
350 346
351 priv->port_stats.tx_timeout++; 347 priv->port_stats.tx_timeout++;
352 mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); 348 en_dbg(DRV, priv, "Scheduling watchdog\n");
353 queue_work(mdev->workqueue, &priv->watchdog_task); 349 queue_work(mdev->workqueue, &priv->watchdog_task);
354} 350}
355 351
@@ -376,10 +372,10 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
376 * satisfy our coelsing target. 372 * satisfy our coelsing target.
377 * - moder_time is set to a fixed value. 373 * - moder_time is set to a fixed value.
378 */ 374 */
379 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1; 375 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
380 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 376 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
381 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 377 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
382 "rx_frames:%d rx_usecs:%d\n", 378 "rx_frames:%d rx_usecs:%d\n",
383 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 379 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
384 380
385 /* Setup cq moderation params */ 381 /* Setup cq moderation params */
@@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
412static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 408static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
413{ 409{
414 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 410 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
415 struct mlx4_en_dev *mdev = priv->mdev;
416 struct mlx4_en_cq *cq; 411 struct mlx4_en_cq *cq;
417 unsigned long packets; 412 unsigned long packets;
418 unsigned long rate; 413 unsigned long rate;
@@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
472 moder_time = priv->rx_usecs; 467 moder_time = priv->rx_usecs;
473 } 468 }
474 469
475 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 470 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
476 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); 471 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
477 472
478 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " 473 en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
479 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", 474 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
480 priv->last_moder_time, moder_time, period, packets, 475 priv->last_moder_time, moder_time, period, packets,
481 avg_pkt_size, rate); 476 avg_pkt_size, rate);
482 477
@@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
487 cq->moder_time = moder_time; 482 cq->moder_time = moder_time;
488 err = mlx4_en_set_cq_moder(priv, cq); 483 err = mlx4_en_set_cq_moder(priv, cq);
489 if (err) { 484 if (err) {
490 mlx4_err(mdev, "Failed modifying moderation for cq:%d " 485 en_err(priv, "Failed modifying moderation for cq:%d\n", i);
491 "on port:%d\n", i, priv->port);
492 break; 486 break;
493 } 487 }
494 } 488 }
@@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
511 505
512 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 506 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
513 if (err) 507 if (err)
514 mlx4_dbg(HW, priv, "Could not update stats for " 508 en_dbg(HW, priv, "Could not update stats \n");
515 "port:%d\n", priv->port);
516 509
517 mutex_lock(&mdev->state_lock); 510 mutex_lock(&mdev->state_lock);
518 if (mdev->device_up) { 511 if (mdev->device_up) {
@@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
536 * report to system log */ 529 * report to system log */
537 if (priv->last_link_state != linkstate) { 530 if (priv->last_link_state != linkstate) {
538 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 531 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
539 if (netif_msg_link(priv)) 532 en_dbg(LINK, priv, "Link Down\n");
540 mlx4_info(mdev, "Port %d - link down\n", priv->port);
541 netif_carrier_off(priv->dev); 533 netif_carrier_off(priv->dev);
542 } else { 534 } else {
543 if (netif_msg_link(priv)) 535 en_dbg(LINK, priv, "Link Up\n");
544 mlx4_info(mdev, "Port %d - link up\n", priv->port);
545 netif_carrier_on(priv->dev); 536 netif_carrier_on(priv->dev);
546 } 537 }
547 } 538 }
@@ -563,19 +554,19 @@ int mlx4_en_start_port(struct net_device *dev)
563 int j; 554 int j;
564 555
565 if (priv->port_up) { 556 if (priv->port_up) {
566 mlx4_dbg(DRV, priv, "start port called while port already up\n"); 557 en_dbg(DRV, priv, "start port called while port already up\n");
567 return 0; 558 return 0;
568 } 559 }
569 560
570 /* Calculate Rx buf size */ 561 /* Calculate Rx buf size */
571 dev->mtu = min(dev->mtu, priv->max_mtu); 562 dev->mtu = min(dev->mtu, priv->max_mtu);
572 mlx4_en_calc_rx_buf(dev); 563 mlx4_en_calc_rx_buf(dev);
573 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 564 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
574 565
575 /* Configure rx cq's and rings */ 566 /* Configure rx cq's and rings */
576 err = mlx4_en_activate_rx_rings(priv); 567 err = mlx4_en_activate_rx_rings(priv);
577 if (err) { 568 if (err) {
578 mlx4_err(mdev, "Failed to activate RX rings\n"); 569 en_err(priv, "Failed to activate RX rings\n");
579 return err; 570 return err;
580 } 571 }
581 for (i = 0; i < priv->rx_ring_num; i++) { 572 for (i = 0; i < priv->rx_ring_num; i++) {
@@ -583,14 +574,14 @@ int mlx4_en_start_port(struct net_device *dev)
583 574
584 err = mlx4_en_activate_cq(priv, cq); 575 err = mlx4_en_activate_cq(priv, cq);
585 if (err) { 576 if (err) {
586 mlx4_err(mdev, "Failed activating Rx CQ\n"); 577 en_err(priv, "Failed activating Rx CQ\n");
587 goto cq_err; 578 goto cq_err;
588 } 579 }
589 for (j = 0; j < cq->size; j++) 580 for (j = 0; j < cq->size; j++)
590 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 581 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
591 err = mlx4_en_set_cq_moder(priv, cq); 582 err = mlx4_en_set_cq_moder(priv, cq);
592 if (err) { 583 if (err) {
593 mlx4_err(mdev, "Failed setting cq moderation parameters"); 584 en_err(priv, "Failed setting cq moderation parameters");
594 mlx4_en_deactivate_cq(priv, cq); 585 mlx4_en_deactivate_cq(priv, cq);
595 goto cq_err; 586 goto cq_err;
596 } 587 }
@@ -601,7 +592,7 @@ int mlx4_en_start_port(struct net_device *dev)
601 592
602 err = mlx4_en_config_rss_steer(priv); 593 err = mlx4_en_config_rss_steer(priv);
603 if (err) { 594 if (err) {
604 mlx4_err(mdev, "Failed configuring rss steering\n"); 595 en_err(priv, "Failed configuring rss steering\n");
605 goto cq_err; 596 goto cq_err;
606 } 597 }
607 598
@@ -611,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev)
611 cq = &priv->tx_cq[i]; 602 cq = &priv->tx_cq[i];
612 err = mlx4_en_activate_cq(priv, cq); 603 err = mlx4_en_activate_cq(priv, cq);
613 if (err) { 604 if (err) {
614 mlx4_err(mdev, "Failed allocating Tx CQ\n"); 605 en_err(priv, "Failed allocating Tx CQ\n");
615 goto tx_err; 606 goto tx_err;
616 } 607 }
617 err = mlx4_en_set_cq_moder(priv, cq); 608 err = mlx4_en_set_cq_moder(priv, cq);
618 if (err) { 609 if (err) {
619 mlx4_err(mdev, "Failed setting cq moderation parameters"); 610 en_err(priv, "Failed setting cq moderation parameters");
620 mlx4_en_deactivate_cq(priv, cq); 611 mlx4_en_deactivate_cq(priv, cq);
621 goto tx_err; 612 goto tx_err;
622 } 613 }
623 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 614 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
624 cq->buf->wqe_index = cpu_to_be16(0xffff); 615 cq->buf->wqe_index = cpu_to_be16(0xffff);
625 616
626 /* Configure ring */ 617 /* Configure ring */
@@ -628,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev)
628 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 619 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
629 priv->rx_ring[0].srq.srqn); 620 priv->rx_ring[0].srq.srqn);
630 if (err) { 621 if (err) {
631 mlx4_err(mdev, "Failed allocating Tx ring\n"); 622 en_err(priv, "Failed allocating Tx ring\n");
632 mlx4_en_deactivate_cq(priv, cq); 623 mlx4_en_deactivate_cq(priv, cq);
633 goto tx_err; 624 goto tx_err;
634 } 625 }
@@ -646,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev)
646 priv->prof->rx_pause, 637 priv->prof->rx_pause,
647 priv->prof->rx_ppp); 638 priv->prof->rx_ppp);
648 if (err) { 639 if (err) {
649 mlx4_err(mdev, "Failed setting port general configurations" 640 en_err(priv, "Failed setting port general configurations "
650 " for port %d, with error %d\n", priv->port, err); 641 "for port %d, with error %d\n", priv->port, err);
651 goto tx_err; 642 goto tx_err;
652 } 643 }
653 /* Set default qp number */ 644 /* Set default qp number */
654 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 645 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
655 if (err) { 646 if (err) {
656 mlx4_err(mdev, "Failed setting default qp numbers\n"); 647 en_err(priv, "Failed setting default qp numbers\n");
657 goto tx_err; 648 goto tx_err;
658 } 649 }
659 /* Set port mac number */ 650 /* Set port mac number */
660 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 651 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
661 err = mlx4_register_mac(mdev->dev, priv->port, 652 err = mlx4_register_mac(mdev->dev, priv->port,
662 priv->mac, &priv->mac_index); 653 priv->mac, &priv->mac_index);
663 if (err) { 654 if (err) {
664 mlx4_err(mdev, "Failed setting port mac\n"); 655 en_err(priv, "Failed setting port mac\n");
665 goto tx_err; 656 goto tx_err;
666 } 657 }
667 658
668 /* Init port */ 659 /* Init port */
669 mlx4_dbg(HW, priv, "Initializing port\n"); 660 en_dbg(HW, priv, "Initializing port\n");
670 err = mlx4_INIT_PORT(mdev->dev, priv->port); 661 err = mlx4_INIT_PORT(mdev->dev, priv->port);
671 if (err) { 662 if (err) {
672 mlx4_err(mdev, "Failed Initializing port\n"); 663 en_err(priv, "Failed Initializing port\n");
673 goto mac_err; 664 goto mac_err;
674 } 665 }
675 666
@@ -706,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev)
706 int i; 697 int i;
707 698
708 if (!priv->port_up) { 699 if (!priv->port_up) {
709 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", 700 en_dbg(DRV, priv, "stop port called while port already down\n");
710 priv->port);
711 return; 701 return;
712 } 702 }
713 netif_stop_queue(dev); 703 netif_stop_queue(dev);
@@ -752,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work)
752 struct mlx4_en_dev *mdev = priv->mdev; 742 struct mlx4_en_dev *mdev = priv->mdev;
753 struct net_device *dev = priv->dev; 743 struct net_device *dev = priv->dev;
754 744
755 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 745 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
756 746
757 mutex_lock(&mdev->state_lock); 747 mutex_lock(&mdev->state_lock);
758 if (priv->port_up) { 748 if (priv->port_up) {
759 mlx4_en_stop_port(dev); 749 mlx4_en_stop_port(dev);
760 if (mlx4_en_start_port(dev)) 750 if (mlx4_en_start_port(dev))
761 mlx4_err(mdev, "Failed restarting port %d\n", priv->port); 751 en_err(priv, "Failed restarting port %d\n", priv->port);
762 } 752 }
763 mutex_unlock(&mdev->state_lock); 753 mutex_unlock(&mdev->state_lock);
764} 754}
@@ -774,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev)
774 mutex_lock(&mdev->state_lock); 764 mutex_lock(&mdev->state_lock);
775 765
776 if (!mdev->device_up) { 766 if (!mdev->device_up) {
777 mlx4_err(mdev, "Cannot open - device down/disabled\n"); 767 en_err(priv, "Cannot open - device down/disabled\n");
778 err = -EBUSY; 768 err = -EBUSY;
779 goto out; 769 goto out;
780 } 770 }
781 771
782 /* Reset HW statistics and performance counters */ 772 /* Reset HW statistics and performance counters */
783 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 773 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
784 mlx4_dbg(HW, priv, "Failed dumping statistics\n"); 774 en_dbg(HW, priv, "Failed dumping statistics\n");
785 775
786 memset(&priv->stats, 0, sizeof(priv->stats)); 776 memset(&priv->stats, 0, sizeof(priv->stats));
787 memset(&priv->pstats, 0, sizeof(priv->pstats)); 777 memset(&priv->pstats, 0, sizeof(priv->pstats));
@@ -798,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev)
798 mlx4_en_set_default_moderation(priv); 788 mlx4_en_set_default_moderation(priv);
799 err = mlx4_en_start_port(dev); 789 err = mlx4_en_start_port(dev);
800 if (err) 790 if (err)
801 mlx4_err(mdev, "Failed starting port:%d\n", priv->port); 791 en_err(priv, "Failed starting port:%d\n", priv->port);
802 792
803out: 793out:
804 mutex_unlock(&mdev->state_lock); 794 mutex_unlock(&mdev->state_lock);
@@ -811,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev)
811 struct mlx4_en_priv *priv = netdev_priv(dev); 801 struct mlx4_en_priv *priv = netdev_priv(dev);
812 struct mlx4_en_dev *mdev = priv->mdev; 802 struct mlx4_en_dev *mdev = priv->mdev;
813 803
814 if (netif_msg_ifdown(priv)) 804 en_dbg(IFDOWN, priv, "Close port called\n");
815 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
816 805
817 mutex_lock(&mdev->state_lock); 806 mutex_lock(&mdev->state_lock);
818 807
@@ -844,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
844 833
845int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 834int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
846{ 835{
847 struct mlx4_en_dev *mdev = priv->mdev;
848 struct mlx4_en_port_profile *prof = priv->prof; 836 struct mlx4_en_port_profile *prof = priv->prof;
849 int i; 837 int i;
850 838
@@ -873,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
873 return 0; 861 return 0;
874 862
875err: 863err:
876 mlx4_err(mdev, "Failed to allocate NIC resources\n"); 864 en_err(priv, "Failed to allocate NIC resources\n");
877 return -ENOMEM; 865 return -ENOMEM;
878} 866}
879 867
@@ -883,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
883 struct mlx4_en_priv *priv = netdev_priv(dev); 871 struct mlx4_en_priv *priv = netdev_priv(dev);
884 struct mlx4_en_dev *mdev = priv->mdev; 872 struct mlx4_en_dev *mdev = priv->mdev;
885 873
886 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 874 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
887 875
888 /* Unregister device - this will close the port if it was up */ 876 /* Unregister device - this will close the port if it was up */
889 if (priv->registered) 877 if (priv->registered)
@@ -912,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
912 struct mlx4_en_dev *mdev = priv->mdev; 900 struct mlx4_en_dev *mdev = priv->mdev;
913 int err = 0; 901 int err = 0;
914 902
915 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 903 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
916 dev->mtu, new_mtu); 904 dev->mtu, new_mtu);
917 905
918 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 906 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
919 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); 907 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
920 return -EPERM; 908 return -EPERM;
921 } 909 }
922 dev->mtu = new_mtu; 910 dev->mtu = new_mtu;
@@ -926,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
926 if (!mdev->device_up) { 914 if (!mdev->device_up) {
927 /* NIC is probably restarting - let watchdog task reset 915 /* NIC is probably restarting - let watchdog task reset
928 * the port */ 916 * the port */
929 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); 917 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
930 } else { 918 } else {
931 mlx4_en_stop_port(dev); 919 mlx4_en_stop_port(dev);
932 mlx4_en_set_default_moderation(priv); 920 mlx4_en_set_default_moderation(priv);
933 err = mlx4_en_start_port(dev); 921 err = mlx4_en_start_port(dev);
934 if (err) { 922 if (err) {
935 mlx4_err(mdev, "Failed restarting port:%d\n", 923 en_err(priv, "Failed restarting port:%d\n",
936 priv->port); 924 priv->port);
937 queue_work(mdev->workqueue, &priv->watchdog_task); 925 queue_work(mdev->workqueue, &priv->watchdog_task);
938 } 926 }
@@ -946,6 +934,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
946 .ndo_open = mlx4_en_open, 934 .ndo_open = mlx4_en_open,
947 .ndo_stop = mlx4_en_close, 935 .ndo_stop = mlx4_en_close,
948 .ndo_start_xmit = mlx4_en_xmit, 936 .ndo_start_xmit = mlx4_en_xmit,
937 .ndo_select_queue = mlx4_en_select_queue,
949 .ndo_get_stats = mlx4_en_get_stats, 938 .ndo_get_stats = mlx4_en_get_stats,
950 .ndo_set_multicast_list = mlx4_en_set_multicast, 939 .ndo_set_multicast_list = mlx4_en_set_multicast,
951 .ndo_set_mac_address = mlx4_en_set_mac, 940 .ndo_set_mac_address = mlx4_en_set_mac,
@@ -968,7 +957,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
968 int i; 957 int i;
969 int err; 958 int err;
970 959
971 dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); 960 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
972 if (dev == NULL) { 961 if (dev == NULL) {
973 mlx4_err(mdev, "Net device allocation failed\n"); 962 mlx4_err(mdev, "Net device allocation failed\n");
974 return -ENOMEM; 963 return -ENOMEM;
@@ -1006,7 +995,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1006 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 995 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1007 priv->mac = mdev->dev->caps.def_mac[priv->port]; 996 priv->mac = mdev->dev->caps.def_mac[priv->port];
1008 if (ILLEGAL_MAC(priv->mac)) { 997 if (ILLEGAL_MAC(priv->mac)) {
1009 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 998 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1010 priv->port, priv->mac); 999 priv->port, priv->mac);
1011 err = -EINVAL; 1000 err = -EINVAL;
1012 goto out; 1001 goto out;
@@ -1025,19 +1014,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1025 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1014 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1026 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1015 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1027 if (err) { 1016 if (err) {
1028 mlx4_err(mdev, "Failed to allocate page for rx qps\n"); 1017 en_err(priv, "Failed to allocate page for rx qps\n");
1029 goto out; 1018 goto out;
1030 } 1019 }
1031 priv->allocated = 1; 1020 priv->allocated = 1;
1032 1021
1033 /* Populate Tx priority mappings */
1034 mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
1035
1036 /* 1022 /*
1037 * Initialize netdev entry points 1023 * Initialize netdev entry points
1038 */ 1024 */
1039 dev->netdev_ops = &mlx4_netdev_ops; 1025 dev->netdev_ops = &mlx4_netdev_ops;
1040 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1026 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1027 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS;
1041 1028
1042 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1029 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1043 1030
@@ -1051,7 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1051 * Set driver features 1038 * Set driver features
1052 */ 1039 */
1053 dev->features |= NETIF_F_SG; 1040 dev->features |= NETIF_F_SG;
1041 dev->vlan_features |= NETIF_F_SG;
1054 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1042 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1043 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1055 dev->features |= NETIF_F_HIGHDMA; 1044 dev->features |= NETIF_F_HIGHDMA;
1056 dev->features |= NETIF_F_HW_VLAN_TX | 1045 dev->features |= NETIF_F_HW_VLAN_TX |
1057 NETIF_F_HW_VLAN_RX | 1046 NETIF_F_HW_VLAN_RX |
@@ -1061,6 +1050,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1061 if (mdev->LSO_support) { 1050 if (mdev->LSO_support) {
1062 dev->features |= NETIF_F_TSO; 1051 dev->features |= NETIF_F_TSO;
1063 dev->features |= NETIF_F_TSO6; 1052 dev->features |= NETIF_F_TSO6;
1053 dev->vlan_features |= NETIF_F_TSO;
1054 dev->vlan_features |= NETIF_F_TSO6;
1064 } 1055 }
1065 1056
1066 mdev->pndev[port] = dev; 1057 mdev->pndev[port] = dev;
@@ -1068,9 +1059,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1068 netif_carrier_off(dev); 1059 netif_carrier_off(dev);
1069 err = register_netdev(dev); 1060 err = register_netdev(dev);
1070 if (err) { 1061 if (err) {
1071 mlx4_err(mdev, "Netdev registration failed\n"); 1062 en_err(priv, "Netdev registration failed for port %d\n", port);
1072 goto out; 1063 goto out;
1073 } 1064 }
1065
1066 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1067 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1068
1074 priv->registered = 1; 1069 priv->registered = 1;
1075 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1070 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1076 return 0; 1071 return 0;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6bfab6e5ba1d..5a14899c1e25 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
114 goto out; 114 goto out;
115 115
116 page_alloc->offset = priv->frag_info[i].frag_align; 116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 117 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page); 118 i, page_alloc->page);
119 } 119 }
120 return 0; 120 return 0;
121 121
@@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
136 136
137 for (i = 0; i < priv->num_frags; i++) { 137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i]; 138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 139 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page)); 140 i, page_count(page_alloc->page));
141 141
142 put_page(page_alloc->page); 142 put_page(page_alloc->page);
143 page_alloc->page = NULL; 143 page_alloc->page = NULL;
@@ -214,10 +214,10 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
214 214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info); 215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) { 216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 217 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr); 218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219 219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); 220 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE); 222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page); 223 put_page(skb_frags[nr].page);
@@ -226,7 +226,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
226 226
227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
228{ 228{
229 struct mlx4_en_dev *mdev = priv->mdev;
230 struct mlx4_en_rx_ring *ring; 229 struct mlx4_en_rx_ring *ring;
231 int ring_ind; 230 int ring_ind;
232 int buf_ind; 231 int buf_ind;
@@ -239,14 +238,14 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
239 if (mlx4_en_prepare_rx_desc(priv, ring, 238 if (mlx4_en_prepare_rx_desc(priv, ring,
240 ring->actual_size)) { 239 ring->actual_size)) {
241 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 240 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
242 mlx4_err(mdev, "Failed to allocate " 241 en_err(priv, "Failed to allocate "
243 "enough rx buffers\n"); 242 "enough rx buffers\n");
244 return -ENOMEM; 243 return -ENOMEM;
245 } else { 244 } else {
246 new_size = rounddown_pow_of_two(ring->actual_size); 245 new_size = rounddown_pow_of_two(ring->actual_size);
247 mlx4_warn(mdev, "Only %d buffers allocated " 246 en_warn(priv, "Only %d buffers allocated "
248 "reducing ring size to %d", 247 "reducing ring size to %d",
249 ring->actual_size, new_size); 248 ring->actual_size, new_size);
250 goto reduce_rings; 249 goto reduce_rings;
251 } 250 }
252 } 251 }
@@ -282,8 +281,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
282 ring->size_mask); 281 ring->size_mask);
283 if (err) { 282 if (err) {
284 if (netif_msg_rx_err(priv)) 283 if (netif_msg_rx_err(priv))
285 mlx4_warn(priv->mdev, 284 en_warn(priv, "Failed preparing rx descriptor\n");
286 "Failed preparing rx descriptor\n");
287 priv->port_stats.rx_alloc_failed++; 285 priv->port_stats.rx_alloc_failed++;
288 break; 286 break;
289 } 287 }
@@ -301,14 +299,14 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
301{ 299{
302 int index; 300 int index;
303 301
304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 302 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
305 ring->cons, ring->prod); 303 ring->cons, ring->prod);
306 304
307 /* Unmap and free Rx buffers */ 305 /* Unmap and free Rx buffers */
308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 306 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
309 while (ring->cons != ring->prod) { 307 while (ring->cons != ring->prod) {
310 index = ring->cons & ring->size_mask; 308 index = ring->cons & ring->size_mask;
311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 309 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
312 mlx4_en_free_rx_desc(priv, ring, index); 310 mlx4_en_free_rx_desc(priv, ring, index);
313 ++ring->cons; 311 ++ring->cons;
314 } 312 }
@@ -373,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
373 sizeof(struct skb_frag_struct)); 371 sizeof(struct skb_frag_struct));
374 ring->rx_info = vmalloc(tmp); 372 ring->rx_info = vmalloc(tmp);
375 if (!ring->rx_info) { 373 if (!ring->rx_info) {
376 mlx4_err(mdev, "Failed allocating rx_info ring\n"); 374 en_err(priv, "Failed allocating rx_info ring\n");
377 return -ENOMEM; 375 return -ENOMEM;
378 } 376 }
379 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 377 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
380 ring->rx_info, tmp); 378 ring->rx_info, tmp);
381 379
382 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 380 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -386,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
386 384
387 err = mlx4_en_map_buffer(&ring->wqres.buf); 385 err = mlx4_en_map_buffer(&ring->wqres.buf);
388 if (err) { 386 if (err) {
389 mlx4_err(mdev, "Failed to map RX buffer\n"); 387 en_err(priv, "Failed to map RX buffer\n");
390 goto err_hwq; 388 goto err_hwq;
391 } 389 }
392 ring->buf = ring->wqres.buf.direct.buf; 390 ring->buf = ring->wqres.buf.direct.buf;
@@ -404,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
404 sizeof(struct net_lro_desc), 402 sizeof(struct net_lro_desc),
405 GFP_KERNEL); 403 GFP_KERNEL);
406 if (!ring->lro.lro_arr) { 404 if (!ring->lro.lro_arr) {
407 mlx4_err(mdev, "Failed to allocate lro array\n"); 405 en_err(priv, "Failed to allocate lro array\n");
408 goto err_map; 406 goto err_map;
409 } 407 }
410 ring->lro.get_frag_header = mlx4_en_get_frag_header; 408 ring->lro.get_frag_header = mlx4_en_get_frag_header;
@@ -455,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
455 /* Initialize page allocators */ 453 /* Initialize page allocators */
456 err = mlx4_en_init_allocator(priv, ring); 454 err = mlx4_en_init_allocator(priv, ring);
457 if (err) { 455 if (err) {
458 mlx4_err(mdev, "Failed initializing ring allocator\n"); 456 en_err(priv, "Failed initializing ring allocator\n");
459 ring_ind--; 457 ring_ind--;
460 goto err_allocator; 458 goto err_allocator;
461 } 459 }
@@ -486,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
486 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, 484 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
487 ring->wqres.db.dma, &ring->srq); 485 ring->wqres.db.dma, &ring->srq);
488 if (err){ 486 if (err){
489 mlx4_err(mdev, "Failed to allocate srq\n"); 487 en_err(priv, "Failed to allocate srq\n");
490 ring_ind--; 488 ring_ind--;
491 goto err_srq; 489 goto err_srq;
492 } 490 }
@@ -601,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
601 599
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); 600 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
603 if (!skb) { 601 if (!skb) {
604 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); 602 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
605 return NULL; 603 return NULL;
606 } 604 }
607 skb->dev = priv->dev; 605 skb->dev = priv->dev;
@@ -680,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
680int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
681{ 679{
682 struct mlx4_en_priv *priv = netdev_priv(dev); 680 struct mlx4_en_priv *priv = netdev_priv(dev);
683 struct mlx4_en_dev *mdev = priv->mdev;
684 struct mlx4_cqe *cqe; 681 struct mlx4_cqe *cqe;
685 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 682 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
686 struct skb_frag_struct *skb_frags; 683 struct skb_frag_struct *skb_frags;
@@ -717,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
717 /* Drop packet on bad receive or bad checksum */ 714 /* Drop packet on bad receive or bad checksum */
718 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 715 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
719 MLX4_CQE_OPCODE_ERROR)) { 716 MLX4_CQE_OPCODE_ERROR)) {
720 mlx4_err(mdev, "CQE completed in error - vendor " 717 en_err(priv, "CQE completed in error - vendor "
721 "syndrom:%d syndrom:%d\n", 718 "syndrom:%d syndrom:%d\n",
722 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 719 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
723 ((struct mlx4_err_cqe *) cqe)->syndrome); 720 ((struct mlx4_err_cqe *) cqe)->syndrome);
724 goto next; 721 goto next;
725 } 722 }
726 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 723 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
727 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 724 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
728 goto next; 725 goto next;
729 } 726 }
730 727
@@ -874,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
874 u16 res = MLX4_EN_ALLOC_SIZE % stride; 871 u16 res = MLX4_EN_ALLOC_SIZE % stride;
875 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; 872 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
876 873
877 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " 874 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
878 "res:%d offset:%d\n", stride, align, res, offset); 875 "res:%d offset:%d\n", stride, align, res, offset);
879 return offset; 876 return offset;
880} 877}
@@ -919,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
919 priv->rx_skb_size = eff_mtu; 916 priv->rx_skb_size = eff_mtu;
920 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); 917 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
921 918
922 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 919 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
923 "num_frags:%d):\n", eff_mtu, priv->num_frags); 920 "num_frags:%d):\n", eff_mtu, priv->num_frags);
924 for (i = 0; i < priv->num_frags; i++) { 921 for (i = 0; i < priv->num_frags; i++) {
925 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 922 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
926 "stride:%d last_offset:%d\n", i, 923 "stride:%d last_offset:%d\n", i,
927 priv->frag_info[i].frag_size, 924 priv->frag_info[i].frag_size,
928 priv->frag_info[i].frag_prefix_size, 925 priv->frag_info[i].frag_prefix_size,
@@ -942,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
942 int i; 939 int i;
943 940
944 rss_map->size = roundup_pow_of_two(num_entries); 941 rss_map->size = roundup_pow_of_two(num_entries);
945 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", 942 en_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
946 rss_map->size); 943 rss_map->size);
947 944
948 for (i = 0; i < rss_map->size; i++) { 945 for (i = 0; i < rss_map->size; i++) {
949 rss_map->map[i] = i % num_rings; 946 rss_map->map[i] = i % num_rings;
950 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); 947 en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
951 } 948 }
952} 949}
953 950
@@ -962,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
962 959
963 context = kmalloc(sizeof *context , GFP_KERNEL); 960 context = kmalloc(sizeof *context , GFP_KERNEL);
964 if (!context) { 961 if (!context) {
965 mlx4_err(mdev, "Failed to allocate qp context\n"); 962 en_err(priv, "Failed to allocate qp context\n");
966 return -ENOMEM; 963 return -ENOMEM;
967 } 964 }
968 965
969 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 966 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
970 if (err) { 967 if (err) {
971 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); 968 en_err(priv, "Failed to allocate qp #%x\n", qpn);
972 goto out; 969 goto out;
973 } 970 }
974 qp->event = mlx4_en_sqp_event; 971 qp->event = mlx4_en_sqp_event;
@@ -1000,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1000 int err = 0; 997 int err = 0;
1001 int good_qps = 0; 998 int good_qps = 0;
1002 999
1003 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); 1000 en_dbg(DRV, priv, "Configuring rss steering\n");
1004 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, 1001 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
1005 rss_map->size, &rss_map->base_qpn); 1002 rss_map->size, &rss_map->base_qpn);
1006 if (err) { 1003 if (err) {
1007 mlx4_err(mdev, "Failed reserving %d qps for port %u\n", 1004 en_err(priv, "Failed reserving %d qps\n", rss_map->size);
1008 rss_map->size, priv->port);
1009 return err; 1005 return err;
1010 } 1006 }
1011 1007
@@ -1025,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1025 /* Configure RSS indirection qp */ 1021 /* Configure RSS indirection qp */
1026 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); 1022 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1027 if (err) { 1023 if (err) {
1028 mlx4_err(mdev, "Failed to reserve range for RSS " 1024 en_err(priv, "Failed to reserve range for RSS "
1029 "indirection qp\n"); 1025 "indirection qp\n");
1030 goto rss_err; 1026 goto rss_err;
1031 } 1027 }
1032 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 1028 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1033 if (err) { 1029 if (err) {
1034 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); 1030 en_err(priv, "Failed to allocate RSS indirection QP\n");
1035 goto reserve_err; 1031 goto reserve_err;
1036 } 1032 }
1037 rss_map->indir_qp.event = mlx4_en_sqp_event; 1033 rss_map->indir_qp.event = mlx4_en_sqp_event;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index ac6fc499b280..5dc7466ad035 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 tmp = size * sizeof(struct mlx4_en_tx_info); 68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp); 69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) { 70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n"); 71 en_err(priv, "Failed allocating tx_info ring\n");
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 74 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp); 75 ring->tx_info, tmp);
76 76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) { 78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n"); 79 en_err(priv, "Failed allocating bounce buffer\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto err_tx; 81 goto err_tx;
82 } 82 }
@@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 86 2 * PAGE_SIZE);
87 if (err) { 87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n"); 88 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 89 goto err_bounce;
90 } 90 }
91 91
92 err = mlx4_en_map_buffer(&ring->wqres.buf); 92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) { 93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n"); 94 en_err(priv, "Failed to map TX buffer\n");
95 goto err_hwq_res; 95 goto err_hwq_res;
96 } 96 }
97 97
98 ring->buf = ring->wqres.buf.direct.buf; 98 ring->buf = ring->wqres.buf.direct.buf;
99 99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 100 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103 103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) { 105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); 106 en_err(priv, "Failed reserving qp for tx ring.\n");
107 goto err_map; 107 goto err_map;
108 } 108 }
109 109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) { 111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); 112 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve; 113 goto err_reserve;
114 } 114 }
115 ring->qp.event = mlx4_en_sqp_event; 115 ring->qp.event = mlx4_en_sqp_event;
@@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
135 struct mlx4_en_tx_ring *ring) 135 struct mlx4_en_tx_ring *ring)
136{ 136{
137 struct mlx4_en_dev *mdev = priv->mdev; 137 struct mlx4_en_dev *mdev = priv->mdev;
138 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 138 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
139 139
140 mlx4_qp_remove(mdev->dev, &ring->qp); 140 mlx4_qp_remove(mdev->dev, &ring->qp);
141 mlx4_qp_free(mdev->dev, &ring->qp); 141 mlx4_qp_free(mdev->dev, &ring->qp);
@@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
274 274
275 /* Skip last polled descriptor */ 275 /* Skip last polled descriptor */
276 ring->cons += ring->last_nr_txbb; 276 ring->cons += ring->last_nr_txbb;
277 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 277 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
278 ring->cons, ring->prod); 278 ring->cons, ring->prod);
279 279
280 if ((u32) (ring->prod - ring->cons) > ring->size) { 280 if ((u32) (ring->prod - ring->cons) > ring->size) {
281 if (netif_msg_tx_err(priv)) 281 if (netif_msg_tx_err(priv))
282 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); 282 en_warn(priv, "Tx consumer passed producer!\n");
283 return 0; 283 return 0;
284 } 284 }
285 285
@@ -292,39 +292,11 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
292 } 292 }
293 293
294 if (cnt) 294 if (cnt)
295 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 295 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
296 296
297 return cnt; 297 return cnt;
298} 298}
299 299
300void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
301{
302 int block = 8 / ring_num;
303 int extra = 8 - (block * ring_num);
304 int num = 0;
305 u16 ring = 1;
306 int prio;
307
308 if (ring_num == 1) {
309 for (prio = 0; prio < 8; prio++)
310 prio_map[prio] = 0;
311 return;
312 }
313
314 for (prio = 0; prio < 8; prio++) {
315 if (extra && (num == block + 1)) {
316 ring++;
317 num = 0;
318 extra--;
319 } else if (!extra && (num == block)) {
320 ring++;
321 num = 0;
322 }
323 prio_map[prio] = ring;
324 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
325 num++;
326 }
327}
328 300
329static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) 301static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
330{ 302{
@@ -386,18 +358,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
386 if (unlikely(ring->blocked)) { 358 if (unlikely(ring->blocked)) {
387 if ((u32) (ring->prod - ring->cons) <= 359 if ((u32) (ring->prod - ring->cons) <=
388 ring->size - HEADROOM - MAX_DESC_TXBBS) { 360 ring->size - HEADROOM - MAX_DESC_TXBBS) {
389
390 /* TODO: support multiqueue netdevs. Currently, we block
391 * when *any* ring is full. Note that:
392 * - 2 Tx rings can unblock at the same time and call
393 * netif_wake_queue(), which is OK since this
394 * operation is idempotent.
395 * - We might wake the queue just after another ring
396 * stopped it. This is no big deal because the next
397 * transmission on that ring would stop the queue.
398 */
399 ring->blocked = 0; 361 ring->blocked = 0;
400 netif_wake_queue(dev); 362 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring));
401 priv->port_stats.wake_queue++; 363 priv->port_stats.wake_queue++;
402 } 364 }
403 } 365 }
@@ -426,7 +388,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
426 388
427 INC_PERF_COUNTER(priv->pstats.tx_poll); 389 INC_PERF_COUNTER(priv->pstats.tx_poll);
428 390
429 if (!spin_trylock(&ring->comp_lock)) { 391 if (!spin_trylock_irq(&ring->comp_lock)) {
430 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 392 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
431 return; 393 return;
432 } 394 }
@@ -439,7 +401,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
439 if (inflight && priv->port_up) 401 if (inflight && priv->port_up)
440 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 402 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
441 403
442 spin_unlock(&ring->comp_lock); 404 spin_unlock_irq(&ring->comp_lock);
443} 405}
444 406
445static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 407static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -482,9 +444,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
482 444
483 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 445 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
484 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 446 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
485 if (spin_trylock(&ring->comp_lock)) { 447 if (spin_trylock_irq(&ring->comp_lock)) {
486 mlx4_en_process_tx_cq(priv->dev, cq); 448 mlx4_en_process_tx_cq(priv->dev, cq);
487 spin_unlock(&ring->comp_lock); 449 spin_unlock_irq(&ring->comp_lock);
488 } 450 }
489} 451}
490 452
@@ -539,7 +501,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
539 int *lso_header_size) 501 int *lso_header_size)
540{ 502{
541 struct mlx4_en_priv *priv = netdev_priv(dev); 503 struct mlx4_en_priv *priv = netdev_priv(dev);
542 struct mlx4_en_dev *mdev = priv->mdev;
543 int real_size; 504 int real_size;
544 505
545 if (skb_is_gso(skb)) { 506 if (skb_is_gso(skb)) {
@@ -553,14 +514,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
553 real_size += DS_SIZE; 514 real_size += DS_SIZE;
554 else { 515 else {
555 if (netif_msg_tx_err(priv)) 516 if (netif_msg_tx_err(priv))
556 mlx4_warn(mdev, "Non-linear headers\n"); 517 en_warn(priv, "Non-linear headers\n");
557 dev_kfree_skb_any(skb); 518 dev_kfree_skb_any(skb);
558 return 0; 519 return 0;
559 } 520 }
560 } 521 }
561 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { 522 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
562 if (netif_msg_tx_err(priv)) 523 if (netif_msg_tx_err(priv))
563 mlx4_warn(mdev, "LSO header size too big\n"); 524 en_warn(priv, "LSO header size too big\n");
564 dev_kfree_skb_any(skb); 525 dev_kfree_skb_any(skb);
565 return 0; 526 return 0;
566 } 527 }
@@ -617,21 +578,20 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
617 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 578 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
618} 579}
619 580
620static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb, 581u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
621 u16 *vlan_tag)
622{ 582{
623 int tx_ind; 583 struct mlx4_en_priv *priv = netdev_priv(dev);
584 u16 vlan_tag = 0;
624 585
625 /* Obtain VLAN information if present */ 586 /* If we support per priority flow control and the packet contains
626 if (priv->vlgrp && vlan_tx_tag_present(skb)) { 587 * a vlan tag, send the packet to the TX ring assigned to that priority
627 *vlan_tag = vlan_tx_tag_get(skb); 588 */
628 /* Set the Tx ring to use according to vlan priority */ 589 if (priv->prof->rx_ppp && priv->vlgrp && vlan_tx_tag_present(skb)) {
629 tx_ind = priv->tx_prio_map[*vlan_tag >> 13]; 590 vlan_tag = vlan_tx_tag_get(skb);
630 } else { 591 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
631 *vlan_tag = 0;
632 tx_ind = 0;
633 } 592 }
634 return tx_ind; 593
594 return skb_tx_hash(dev, skb);
635} 595}
636 596
637int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 597int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -651,7 +611,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
651 dma_addr_t dma; 611 dma_addr_t dma;
652 u32 index; 612 u32 index;
653 __be32 op_own; 613 __be32 op_own;
654 u16 vlan_tag; 614 u16 vlan_tag = 0;
655 int i; 615 int i;
656 int lso_header_size; 616 int lso_header_size;
657 void *fragptr; 617 void *fragptr;
@@ -669,20 +629,21 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
669 nr_txbb = desc_size / TXBB_SIZE; 629 nr_txbb = desc_size / TXBB_SIZE;
670 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 630 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
671 if (netif_msg_tx_err(priv)) 631 if (netif_msg_tx_err(priv))
672 mlx4_warn(mdev, "Oversized header or SG list\n"); 632 en_warn(priv, "Oversized header or SG list\n");
673 dev_kfree_skb_any(skb); 633 dev_kfree_skb_any(skb);
674 return NETDEV_TX_OK; 634 return NETDEV_TX_OK;
675 } 635 }
676 636
677 tx_ind = get_vlan_info(priv, skb, &vlan_tag); 637 tx_ind = skb->queue_mapping;
678 ring = &priv->tx_ring[tx_ind]; 638 ring = &priv->tx_ring[tx_ind];
639 if (priv->vlgrp && vlan_tx_tag_present(skb))
640 vlan_tag = vlan_tx_tag_get(skb);
679 641
680 /* Check available TXBBs And 2K spare for prefetch */ 642 /* Check available TXBBs And 2K spare for prefetch */
681 if (unlikely(((int)(ring->prod - ring->cons)) > 643 if (unlikely(((int)(ring->prod - ring->cons)) >
682 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 644 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
683 /* every full Tx ring stops queue. 645 /* every full Tx ring stops queue */
684 * TODO: implement multi-queue support (per-queue stop) */ 646 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind));
685 netif_stop_queue(dev);
686 ring->blocked = 1; 647 ring->blocked = 1;
687 priv->port_stats.queue_stopped++; 648 priv->port_stats.queue_stopped++;
688 649
@@ -695,7 +656,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* Now that we know what Tx ring to use */ 656 /* Now that we know what Tx ring to use */
696 if (unlikely(!priv->port_up)) { 657 if (unlikely(!priv->port_up)) {
697 if (netif_msg_tx_err(priv)) 658 if (netif_msg_tx_err(priv))
698 mlx4_warn(mdev, "xmit: port down!\n"); 659 en_warn(priv, "xmit: port down!\n");
699 dev_kfree_skb_any(skb); 660 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 661 return NETDEV_TX_OK;
701 } 662 }
@@ -819,7 +780,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
819 /* Ring doorbell! */ 780 /* Ring doorbell! */
820 wmb(); 781 wmb();
821 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 782 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
822 dev->trans_start = jiffies;
823 783
824 /* Poll CQ here */ 784 /* Poll CQ here */
825 mlx4_en_xmit_poll(priv, tx_ind); 785 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec8..dee188761a3c 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -623,8 +623,10 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
623 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 623 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
624 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 624 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
625 &priv->eq_table.eq[i]); 625 &priv->eq_table.eq[i]);
626 if (err) 626 if (err) {
627 --i;
627 goto err_out_unmap; 628 goto err_out_unmap;
629 }
628 } 630 }
629 631
630 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 632 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index ef840abbcd39..d43a9e4c2aea 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,26 +49,42 @@
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.4.0" 52#define DRV_VERSION "1.4.1.1"
53#define DRV_RELDATE "Sep 2008" 53#define DRV_RELDATE "June 2009"
54 54
55 55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57 57
58#define mlx4_dbg(mlevel, priv, format, arg...) \ 58#define en_print(level, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 59 { \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ 60 if ((priv)->registered) \
61 (dev_name(&priv->mdev->pdev->dev)) , ## arg) 61 printk(level "%s: %s: " format, DRV_NAME, \
62 (priv->dev)->name, ## arg); \
63 else \
64 printk(level "%s: %s: Port %d: " format, \
65 DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
66 (priv)->port, ## arg); \
67 }
68
69#define en_dbg(mlevel, priv, format, arg...) \
70 { \
71 if (NETIF_MSG_##mlevel & priv->msg_enable) \
72 en_print(KERN_DEBUG, priv, format, ## arg) \
73 }
74#define en_warn(priv, format, arg...) \
75 en_print(KERN_WARNING, priv, format, ## arg)
76#define en_err(priv, format, arg...) \
77 en_print(KERN_ERR, priv, format, ## arg)
62 78
63#define mlx4_err(mdev, format, arg...) \ 79#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ 80 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (dev_name(&mdev->pdev->dev)) , ## arg) 81 dev_name(&mdev->pdev->dev) , ## arg)
66#define mlx4_info(mdev, format, arg...) \ 82#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ 83 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (dev_name(&mdev->pdev->dev)) , ## arg) 84 dev_name(&mdev->pdev->dev) , ## arg)
69#define mlx4_warn(mdev, format, arg...) \ 85#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ 86 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (dev_name(&mdev->pdev->dev)) , ## arg) 87 dev_name(&mdev->pdev->dev) , ## arg)
72 88
73/* 89/*
74 * Device constants 90 * Device constants
@@ -123,12 +139,14 @@ enum {
123#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES) 139#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
124#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE) 140#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
125 141
126#define MLX4_EN_TX_RING_NUM 9 142#define MLX4_EN_SMALL_PKT_SIZE 64
127#define MLX4_EN_DEF_TX_RING_SIZE 1024 143#define MLX4_EN_NUM_TX_RINGS 8
144#define MLX4_EN_NUM_PPP_RINGS 8
145#define MLX4_EN_DEF_TX_RING_SIZE 512
128#define MLX4_EN_DEF_RX_RING_SIZE 1024 146#define MLX4_EN_DEF_RX_RING_SIZE 1024
129 147
130/* Target number of bytes to coalesce with interrupt moderation */ 148/* Target number of packets to coalesce with interrupt moderation */
131#define MLX4_EN_RX_COAL_TARGET 0x20000 149#define MLX4_EN_RX_COAL_TARGET 44
132#define MLX4_EN_RX_COAL_TIME 0x10 150#define MLX4_EN_RX_COAL_TIME 0x10
133 151
134#define MLX4_EN_TX_COAL_PKTS 5 152#define MLX4_EN_TX_COAL_PKTS 5
@@ -462,7 +480,6 @@ struct mlx4_en_priv {
462 int base_qpn; 480 int base_qpn;
463 481
464 struct mlx4_en_rss_map rss_map; 482 struct mlx4_en_rss_map rss_map;
465 u16 tx_prio_map[8];
466 u32 flags; 483 u32 flags;
467#define MLX4_EN_FLAG_PROMISC 0x1 484#define MLX4_EN_FLAG_PROMISC 0x1
468 u32 tx_ring_num; 485 u32 tx_ring_num;
@@ -500,8 +517,6 @@ void mlx4_en_stop_port(struct net_device *dev);
500void mlx4_en_free_resources(struct mlx4_en_priv *priv); 517void mlx4_en_free_resources(struct mlx4_en_priv *priv);
501int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 518int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
502 519
503int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
504
505int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 520int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
506 int entries, int ring, enum cq_type mode); 521 int entries, int ring, enum cq_type mode);
507void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 522void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -512,6 +527,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
512 527
513void mlx4_en_poll_tx_cq(unsigned long data); 528void mlx4_en_poll_tx_cq(unsigned long data);
514void mlx4_en_tx_irq(struct mlx4_cq *mcq); 529void mlx4_en_tx_irq(struct mlx4_cq *mcq);
530u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
515int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 531int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
516 532
517int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 533int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
@@ -546,7 +562,6 @@ void mlx4_en_calc_rx_buf(struct net_device *dev);
546void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, 562void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
547 struct mlx4_en_rss_map *rss_map, 563 struct mlx4_en_rss_map *rss_map,
548 int num_entries, int num_rings); 564 int num_entries, int num_rings);
549void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
550int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); 565int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
551void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); 566void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
552int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); 567int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8bc..0a467785f065 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -402,7 +402,8 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
402 for (i = 0; i < npages; ++i) 402 for (i = 0; i < npages; ++i)
403 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 403 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
404 404
405 dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE); 405 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
406 npages * sizeof (u64), DMA_TO_DEVICE);
406 407
407 return 0; 408 return 0;
408} 409}
@@ -549,8 +550,8 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
549 for (i = 0; i < npages; ++i) 550 for (i = 0; i < npages; ++i)
550 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 551 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
551 552
552 dma_sync_single(&dev->pdev->dev, fmr->dma_handle, 553 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
553 npages * sizeof(u64), DMA_TO_DEVICE); 554 npages * sizeof(u64), DMA_TO_DEVICE);
554 555
555 fmr->mpt->key = cpu_to_be32(key); 556 fmr->mpt->key = cpu_to_be32(key);
556 fmr->mpt->lkey = cpu_to_be32(key); 557 fmr->mpt->lkey = cpu_to_be32(key);