aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-26 00:02:22 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-26 00:02:22 -0400
commit00a2470546dd8427325636a711a42c934135dbf5 (patch)
tree9567002c1ae07a918ccf11ec2a72c6e4831cb535 /drivers/net/mlx4
parent5aafdea448fb86412a6f8e46df518c1545d32436 (diff)
parent6df59a84eccd4cad7fcefda3e0c5e55239a3b2dd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits) route: Take the right src and dst addresses in ip_route_newports ipv4: Fix nexthop caching wrt. scoping. ipv4: Invalidate nexthop cache nh_saddr more correctly. net: fix pch_gbe section mismatch warning ipv4: fix fib metrics mlx4_en: Removing HW info from ethtool -i report. net_sched: fix THROTTLED/RUNNING race drivers/net/a2065.c: Convert release_resource to release_region/release_mem_region drivers/net/ariadne.c: Convert release_resource to release_region/release_mem_region bonding: fix rx_handler locking myri10ge: fix rmmod crash mlx4_en: updated driver version to 1.5.4.1 mlx4_en: Using blue flame support mlx4_core: reserve UARs for userspace consumers mlx4_core: maintain available field in bitmap allocator mlx4: Add blue flame support for kernel consumers mlx4_en: Enabling new steering mlx4: Add support for promiscuous mode in the new steering model. mlx4: generalization of multicast steering. mlx4_en: Reporting HW revision in ethtool -i ...
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/alloc.c13
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c38
-rw-r--r--drivers/net/mlx4/en_ethtool.c66
-rw-r--r--drivers/net/mlx4/en_main.c22
-rw-r--r--drivers/net/mlx4/en_netdev.c199
-rw-r--r--drivers/net/mlx4/en_port.c13
-rw-r--r--drivers/net/mlx4/en_port.h19
-rw-r--r--drivers/net/mlx4/en_rx.c11
-rw-r--r--drivers/net/mlx4/en_tx.c72
-rw-r--r--drivers/net/mlx4/eq.c107
-rw-r--r--drivers/net/mlx4/fw.c25
-rw-r--r--drivers/net/mlx4/fw.h3
-rw-r--r--drivers/net/mlx4/main.c119
-rw-r--r--drivers/net/mlx4/mcg.c646
-rw-r--r--drivers/net/mlx4/mlx4.h50
-rw-r--r--drivers/net/mlx4/mlx4_en.h27
-rw-r--r--drivers/net/mlx4/pd.c102
-rw-r--r--drivers/net/mlx4/port.c165
-rw-r--r--drivers/net/mlx4/profile.c4
20 files changed, 1520 insertions, 183 deletions
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 3a4277f6fac4..116cae334dad 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -62,6 +62,9 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
62 } else 62 } else
63 obj = -1; 63 obj = -1;
64 64
65 if (obj != -1)
66 --bitmap->avail;
67
65 spin_unlock(&bitmap->lock); 68 spin_unlock(&bitmap->lock);
66 69
67 return obj; 70 return obj;
@@ -101,11 +104,19 @@ u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
101 } else 104 } else
102 obj = -1; 105 obj = -1;
103 106
107 if (obj != -1)
108 bitmap->avail -= cnt;
109
104 spin_unlock(&bitmap->lock); 110 spin_unlock(&bitmap->lock);
105 111
106 return obj; 112 return obj;
107} 113}
108 114
115u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap)
116{
117 return bitmap->avail;
118}
119
109void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt) 120void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
110{ 121{
111 obj &= bitmap->max + bitmap->reserved_top - 1; 122 obj &= bitmap->max + bitmap->reserved_top - 1;
@@ -115,6 +126,7 @@ void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
115 bitmap->last = min(bitmap->last, obj); 126 bitmap->last = min(bitmap->last, obj);
116 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top) 127 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
117 & bitmap->mask; 128 & bitmap->mask;
129 bitmap->avail += cnt;
118 spin_unlock(&bitmap->lock); 130 spin_unlock(&bitmap->lock);
119} 131}
120 132
@@ -130,6 +142,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
130 bitmap->max = num - reserved_top; 142 bitmap->max = num - reserved_top;
131 bitmap->mask = mask; 143 bitmap->mask = mask;
132 bitmap->reserved_top = reserved_top; 144 bitmap->reserved_top = reserved_top;
145 bitmap->avail = num - reserved_top - reserved_bot;
133 spin_lock_init(&bitmap->lock); 146 spin_lock_init(&bitmap->lock);
134 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * 147 bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
135 sizeof (long), GFP_KERNEL); 148 sizeof (long), GFP_KERNEL);
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 7cd34e9c7c7e..bd8ef9f2fa71 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -198,7 +198,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
198 u64 mtt_addr; 198 u64 mtt_addr;
199 int err; 199 int err;
200 200
201 if (vector >= dev->caps.num_comp_vectors) 201 if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
202 return -EINVAL; 202 return -EINVAL;
203 203
204 cq->vector = vector; 204 cq->vector = vector;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
index 21786ad4455e..ec4b6d047fe0 100644
--- a/drivers/net/mlx4/en_cq.c
+++ b/drivers/net/mlx4/en_cq.c
@@ -51,13 +51,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
51 int err; 51 int err;
52 52
53 cq->size = entries; 53 cq->size = entries;
54 if (mode == RX) { 54 if (mode == RX)
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); 55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
56 cq->vector = ring % mdev->dev->caps.num_comp_vectors; 56 else
57 } else {
58 cq->buf_size = sizeof(struct mlx4_cqe); 57 cq->buf_size = sizeof(struct mlx4_cqe);
59 cq->vector = 0;
60 }
61 58
62 cq->ring = ring; 59 cq->ring = ring;
63 cq->is_tx = mode; 60 cq->is_tx = mode;
@@ -80,7 +77,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
80int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 77int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
81{ 78{
82 struct mlx4_en_dev *mdev = priv->mdev; 79 struct mlx4_en_dev *mdev = priv->mdev;
83 int err; 80 int err = 0;
81 char name[25];
84 82
85 cq->dev = mdev->pndev[priv->port]; 83 cq->dev = mdev->pndev[priv->port];
86 cq->mcq.set_ci_db = cq->wqres.db.db; 84 cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -89,6 +87,29 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
89 *cq->mcq.arm_db = 0; 87 *cq->mcq.arm_db = 0;
90 memset(cq->buf, 0, cq->buf_size); 88 memset(cq->buf, 0, cq->buf_size);
91 89
90 if (cq->is_tx == RX) {
91 if (mdev->dev->caps.comp_pool) {
92 if (!cq->vector) {
93 sprintf(name , "%s-rx-%d", priv->dev->name, cq->ring);
94 if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
95 cq->vector = (cq->ring + 1 + priv->port) %
96 mdev->dev->caps.num_comp_vectors;
97 mlx4_warn(mdev, "Failed Assigning an EQ to "
98 "%s_rx-%d ,Falling back to legacy EQ's\n",
99 priv->dev->name, cq->ring);
100 }
101 }
102 } else {
103 cq->vector = (cq->ring + 1 + priv->port) %
104 mdev->dev->caps.num_comp_vectors;
105 }
106 } else {
107 if (!cq->vector || !mdev->dev->caps.comp_pool) {
108 /*Fallback to legacy pool in case of error*/
109 cq->vector = 0;
110 }
111 }
112
92 if (!cq->is_tx) 113 if (!cq->is_tx)
93 cq->size = priv->rx_ring[cq->ring].actual_size; 114 cq->size = priv->rx_ring[cq->ring].actual_size;
94 115
@@ -112,12 +133,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
112 return 0; 133 return 0;
113} 134}
114 135
115void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 136void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
137 bool reserve_vectors)
116{ 138{
117 struct mlx4_en_dev *mdev = priv->mdev; 139 struct mlx4_en_dev *mdev = priv->mdev;
118 140
119 mlx4_en_unmap_buffer(&cq->wqres.buf); 141 mlx4_en_unmap_buffer(&cq->wqres.buf);
120 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 142 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
143 if (priv->mdev->dev->caps.comp_pool && cq->vector && !reserve_vectors)
144 mlx4_release_eq(priv->mdev->dev, cq->vector);
121 cq->buf_size = 0; 145 cq->buf_size = 0;
122 cq->buf = NULL; 146 cq->buf = NULL;
123} 147}
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 056152b3ff58..d54b7abf0225 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -45,7 +45,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
45 struct mlx4_en_priv *priv = netdev_priv(dev); 45 struct mlx4_en_priv *priv = netdev_priv(dev);
46 struct mlx4_en_dev *mdev = priv->mdev; 46 struct mlx4_en_dev *mdev = priv->mdev;
47 47
48 sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id); 48 strncpy(drvinfo->driver, DRV_NAME, 32);
49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32); 49 strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
50 sprintf(drvinfo->fw_version, "%d.%d.%d", 50 sprintf(drvinfo->fw_version, "%d.%d.%d",
51 (u16) (mdev->dev->caps.fw_ver >> 32), 51 (u16) (mdev->dev->caps.fw_ver >> 32),
@@ -131,8 +131,65 @@ static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
131static void mlx4_en_get_wol(struct net_device *netdev, 131static void mlx4_en_get_wol(struct net_device *netdev,
132 struct ethtool_wolinfo *wol) 132 struct ethtool_wolinfo *wol)
133{ 133{
134 wol->supported = 0; 134 struct mlx4_en_priv *priv = netdev_priv(netdev);
135 wol->wolopts = 0; 135 int err = 0;
136 u64 config = 0;
137
138 if (!priv->mdev->dev->caps.wol) {
139 wol->supported = 0;
140 wol->wolopts = 0;
141 return;
142 }
143
144 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
145 if (err) {
146 en_err(priv, "Failed to get WoL information\n");
147 return;
148 }
149
150 if (config & MLX4_EN_WOL_MAGIC)
151 wol->supported = WAKE_MAGIC;
152 else
153 wol->supported = 0;
154
155 if (config & MLX4_EN_WOL_ENABLED)
156 wol->wolopts = WAKE_MAGIC;
157 else
158 wol->wolopts = 0;
159}
160
161static int mlx4_en_set_wol(struct net_device *netdev,
162 struct ethtool_wolinfo *wol)
163{
164 struct mlx4_en_priv *priv = netdev_priv(netdev);
165 u64 config = 0;
166 int err = 0;
167
168 if (!priv->mdev->dev->caps.wol)
169 return -EOPNOTSUPP;
170
171 if (wol->supported & ~WAKE_MAGIC)
172 return -EINVAL;
173
174 err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
175 if (err) {
176 en_err(priv, "Failed to get WoL info, unable to modify\n");
177 return err;
178 }
179
180 if (wol->wolopts & WAKE_MAGIC) {
181 config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
182 MLX4_EN_WOL_MAGIC;
183 } else {
184 config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
185 config |= MLX4_EN_WOL_DO_MODIFY;
186 }
187
188 err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
189 if (err)
190 en_err(priv, "Failed to set WoL information\n");
191
192 return err;
136} 193}
137 194
138static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 195static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
@@ -388,7 +445,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
388 mlx4_en_stop_port(dev); 445 mlx4_en_stop_port(dev);
389 } 446 }
390 447
391 mlx4_en_free_resources(priv); 448 mlx4_en_free_resources(priv, true);
392 449
393 priv->prof->tx_ring_size = tx_size; 450 priv->prof->tx_ring_size = tx_size;
394 priv->prof->rx_ring_size = rx_size; 451 priv->prof->rx_ring_size = rx_size;
@@ -442,6 +499,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
442 .get_ethtool_stats = mlx4_en_get_ethtool_stats, 499 .get_ethtool_stats = mlx4_en_get_ethtool_stats,
443 .self_test = mlx4_en_self_test, 500 .self_test = mlx4_en_self_test,
444 .get_wol = mlx4_en_get_wol, 501 .get_wol = mlx4_en_get_wol,
502 .set_wol = mlx4_en_set_wol,
445 .get_msglevel = mlx4_en_get_msglevel, 503 .get_msglevel = mlx4_en_get_msglevel,
446 .set_msglevel = mlx4_en_set_msglevel, 504 .set_msglevel = mlx4_en_set_msglevel,
447 .get_coalesce = mlx4_en_get_coalesce, 505 .get_coalesce = mlx4_en_get_coalesce,
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 1ff6ca6466ed..9317b61a75b8 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -241,16 +241,18 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) 241 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
242 mdev->port_cnt++; 242 mdev->port_cnt++;
243 243
244 /* If we did not receive an explicit number of Rx rings, default to
245 * the number of completion vectors populated by the mlx4_core */
246 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 244 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
247 mlx4_info(mdev, "Using %d tx rings for port:%d\n", 245 if (!dev->caps.comp_pool) {
248 mdev->profile.prof[i].tx_ring_num, i); 246 mdev->profile.prof[i].rx_ring_num =
249 mdev->profile.prof[i].rx_ring_num = min_t(int, 247 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
250 roundup_pow_of_two(dev->caps.num_comp_vectors), 248 min_t(int,
251 MAX_RX_RINGS); 249 dev->caps.num_comp_vectors,
252 mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", 250 MAX_RX_RINGS)));
253 mdev->profile.prof[i].rx_ring_num, i); 251 } else {
252 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
253 min_t(int, dev->caps.comp_pool/
254 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
255 }
254 } 256 }
255 257
256 /* Create our own workqueue for reset/multicast tasks 258 /* Create our own workqueue for reset/multicast tasks
@@ -294,7 +296,7 @@ static struct mlx4_interface mlx4_en_interface = {
294 .remove = mlx4_en_remove, 296 .remove = mlx4_en_remove,
295 .event = mlx4_en_event, 297 .event = mlx4_en_event,
296 .get_dev = mlx4_en_get_netdev, 298 .get_dev = mlx4_en_get_netdev,
297 .protocol = MLX4_PROTOCOL_EN, 299 .protocol = MLX4_PROT_ETH,
298}; 300};
299 301
300static int __init mlx4_en_init(void) 302static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 897f576b8b17..5762ebde4455 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
156 mutex_lock(&mdev->state_lock); 156 mutex_lock(&mdev->state_lock);
157 if (priv->port_up) { 157 if (priv->port_up) {
158 /* Remove old MAC and insert the new one */ 158 /* Remove old MAC and insert the new one */
159 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 159 err = mlx4_replace_mac(mdev->dev, priv->port,
160 err = mlx4_register_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac, 0);
161 priv->mac, &priv->mac_index);
162 if (err) 161 if (err)
163 en_err(priv, "Failed changing HW MAC address\n"); 162 en_err(priv, "Failed changing HW MAC address\n");
164 } else 163 } else
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
214 struct mlx4_en_dev *mdev = priv->mdev; 213 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev; 214 struct net_device *dev = priv->dev;
216 u64 mcast_addr = 0; 215 u64 mcast_addr = 0;
216 u8 mc_list[16] = {0};
217 int err; 217 int err;
218 218
219 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
239 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
240 240
241 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 if (!mdev->dev->caps.vep_uc_steering)
243 priv->base_qpn, 1); 243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
244 if (err) 248 if (err)
245 en_err(priv, "Failed enabling " 249 en_err(priv, "Failed enabling "
246 "promiscous mode\n"); 250 "promiscous mode\n");
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
252 en_err(priv, "Failed disabling " 256 en_err(priv, "Failed disabling "
253 "multicast filter\n"); 257 "multicast filter\n");
254 258
255 /* Disable port VLAN filter */ 259 /* Add the default qp number as multicast promisc */
256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
257 if (err) 261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
258 en_err(priv, "Failed disabling VLAN filter\n"); 262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
268 if (priv->vlgrp) {
269 /* Disable port VLAN filter */
270 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
271 if (err)
272 en_err(priv, "Failed disabling VLAN filter\n");
273 }
259 } 274 }
260 goto out; 275 goto out;
261 } 276 }
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
270 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
271 286
272 /* Disable promiscouos mode */ 287 /* Disable promiscouos mode */
273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 288 if (!mdev->dev->caps.vep_uc_steering)
274 priv->base_qpn, 0); 289 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
290 priv->base_qpn, 0);
291 else
292 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
293 priv->port);
275 if (err) 294 if (err)
276 en_err(priv, "Failed disabling promiscous mode\n"); 295 en_err(priv, "Failed disabling promiscous mode\n");
277 296
297 /* Disable Multicast promisc */
298 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
299 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
300 priv->port);
301 if (err)
302 en_err(priv, "Failed disabling multicast promiscous mode\n");
303 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
304 }
305
278 /* Enable port VLAN filter */ 306 /* Enable port VLAN filter */
279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 307 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
280 if (err) 308 if (err)
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
287 0, MLX4_MCAST_DISABLE); 315 0, MLX4_MCAST_DISABLE);
288 if (err) 316 if (err)
289 en_err(priv, "Failed disabling multicast filter\n"); 317 en_err(priv, "Failed disabling multicast filter\n");
318
319 /* Add the default qp number as multicast promisc */
320 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
321 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
322 priv->port);
323 if (err)
324 en_err(priv, "Failed entering multicast promisc mode\n");
325 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
326 }
290 } else { 327 } else {
291 int i; 328 int i;
329 /* Disable Multicast promisc */
330 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
331 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
332 priv->port);
333 if (err)
334 en_err(priv, "Failed disabling multicast promiscous mode\n");
335 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
336 }
292 337
293 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 338 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
294 0, MLX4_MCAST_DISABLE); 339 0, MLX4_MCAST_DISABLE);
295 if (err) 340 if (err)
296 en_err(priv, "Failed disabling multicast filter\n"); 341 en_err(priv, "Failed disabling multicast filter\n");
297 342
343 /* Detach our qp from all the multicast addresses */
344 for (i = 0; i < priv->mc_addrs_cnt; i++) {
345 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
346 mc_list[5] = priv->port;
347 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
348 mc_list, MLX4_PROT_ETH);
349 }
298 /* Flush mcast filter and init it with broadcast address */ 350 /* Flush mcast filter and init it with broadcast address */
299 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 351 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
300 1, MLX4_MCAST_CONFIG); 352 1, MLX4_MCAST_CONFIG);
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
307 for (i = 0; i < priv->mc_addrs_cnt; i++) { 359 for (i = 0; i < priv->mc_addrs_cnt; i++) {
308 mcast_addr = 360 mcast_addr =
309 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
362 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
363 mc_list[5] = priv->port;
364 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
365 mc_list, 0, MLX4_PROT_ETH);
310 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 366 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
311 mcast_addr, 0, MLX4_MCAST_CONFIG); 367 mcast_addr, 0, MLX4_MCAST_CONFIG);
312 } 368 }
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 0, MLX4_MCAST_ENABLE); 370 0, MLX4_MCAST_ENABLE);
315 if (err) 371 if (err)
316 en_err(priv, "Failed enabling multicast filter\n"); 372 en_err(priv, "Failed enabling multicast filter\n");
317
318 mlx4_en_clear_list(dev);
319 } 373 }
320out: 374out:
321 mutex_unlock(&mdev->state_lock); 375 mutex_unlock(&mdev->state_lock);
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
417 unsigned long avg_pkt_size; 471 unsigned long avg_pkt_size;
418 unsigned long rx_packets; 472 unsigned long rx_packets;
419 unsigned long rx_bytes; 473 unsigned long rx_bytes;
420 unsigned long rx_byte_diff;
421 unsigned long tx_packets; 474 unsigned long tx_packets;
422 unsigned long tx_pkt_diff; 475 unsigned long tx_pkt_diff;
423 unsigned long rx_pkt_diff; 476 unsigned long rx_pkt_diff;
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
441 rx_pkt_diff = ((unsigned long) (rx_packets - 494 rx_pkt_diff = ((unsigned long) (rx_packets -
442 priv->last_moder_packets)); 495 priv->last_moder_packets));
443 packets = max(tx_pkt_diff, rx_pkt_diff); 496 packets = max(tx_pkt_diff, rx_pkt_diff);
444 rx_byte_diff = rx_bytes - priv->last_moder_bytes;
445 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
446 rate = packets * HZ / period; 497 rate = packets * HZ / period;
447 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 498 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
448 priv->last_moder_bytes)) / packets : 0; 499 priv->last_moder_bytes)) / packets : 0;
449 500
450 /* Apply auto-moderation only when packet rate exceeds a rate that 501 /* Apply auto-moderation only when packet rate exceeds a rate that
451 * it matters */ 502 * it matters */
452 if (rate > MLX4_EN_RX_RATE_THRESH) { 503 if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
453 /* If tx and rx packet rates are not balanced, assume that 504 /* If tx and rx packet rates are not balanced, assume that
454 * traffic is mainly BW bound and apply maximum moderation. 505 * traffic is mainly BW bound and apply maximum moderation.
455 * Otherwise, moderate according to packet rate */ 506 * Otherwise, moderate according to packet rate */
456 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 507 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
457 rx_pkt_diff / rx_byte_diff < 508 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
458 MLX4_EN_SMALL_PKT_SIZE)
459 moder_time = priv->rx_usecs_low;
460 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
461 moder_time = priv->rx_usecs_high; 509 moder_time = priv->rx_usecs_high;
462 else { 510 } else {
463 if (rate < priv->pkt_rate_low) 511 if (rate < priv->pkt_rate_low)
464 moder_time = priv->rx_usecs_low; 512 moder_time = priv->rx_usecs_low;
465 else if (rate > priv->pkt_rate_high) 513 else if (rate > priv->pkt_rate_high)
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
471 priv->rx_usecs_low; 519 priv->rx_usecs_low;
472 } 520 }
473 } else { 521 } else {
474 /* When packet rate is low, use default moderation rather than 522 moder_time = priv->rx_usecs_low;
475 * 0 to prevent interrupt storms if traffic suddenly increases */
476 moder_time = priv->rx_usecs;
477 } 523 }
478 524
479 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 525 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
565 int err = 0; 611 int err = 0;
566 int i; 612 int i;
567 int j; 613 int j;
614 u8 mc_list[16] = {0};
615 char name[32];
568 616
569 if (priv->port_up) { 617 if (priv->port_up) {
570 en_dbg(DRV, priv, "start port called while port already up\n"); 618 en_dbg(DRV, priv, "start port called while port already up\n");
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
603 ++rx_index; 651 ++rx_index;
604 } 652 }
605 653
654 /* Set port mac number */
655 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
656 err = mlx4_register_mac(mdev->dev, priv->port,
657 priv->mac, &priv->base_qpn, 0);
658 if (err) {
659 en_err(priv, "Failed setting port mac\n");
660 goto cq_err;
661 }
662 mdev->mac_removed[priv->port] = 0;
663
606 err = mlx4_en_config_rss_steer(priv); 664 err = mlx4_en_config_rss_steer(priv);
607 if (err) { 665 if (err) {
608 en_err(priv, "Failed configuring rss steering\n"); 666 en_err(priv, "Failed configuring rss steering\n");
609 goto cq_err; 667 goto mac_err;
610 } 668 }
611 669
670 if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
671 sprintf(name , "%s-tx", priv->dev->name);
672 if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
673 mlx4_warn(mdev, "Failed Assigning an EQ to "
674 "%s_tx ,Falling back to legacy "
675 "EQ's\n", priv->dev->name);
676 }
677 }
612 /* Configure tx cq's and rings */ 678 /* Configure tx cq's and rings */
613 for (i = 0; i < priv->tx_ring_num; i++) { 679 for (i = 0; i < priv->tx_ring_num; i++) {
614 /* Configure cq */ 680 /* Configure cq */
615 cq = &priv->tx_cq[i]; 681 cq = &priv->tx_cq[i];
682 cq->vector = priv->tx_vector;
616 err = mlx4_en_activate_cq(priv, cq); 683 err = mlx4_en_activate_cq(priv, cq);
617 if (err) { 684 if (err) {
618 en_err(priv, "Failed allocating Tx CQ\n"); 685 en_err(priv, "Failed allocating Tx CQ\n");
@@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev)
659 en_err(priv, "Failed setting default qp numbers\n"); 726 en_err(priv, "Failed setting default qp numbers\n");
660 goto tx_err; 727 goto tx_err;
661 } 728 }
662 /* Set port mac number */
663 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
664 err = mlx4_register_mac(mdev->dev, priv->port,
665 priv->mac, &priv->mac_index);
666 if (err) {
667 en_err(priv, "Failed setting port mac\n");
668 goto tx_err;
669 }
670 mdev->mac_removed[priv->port] = 0;
671 729
672 /* Init port */ 730 /* Init port */
673 en_dbg(HW, priv, "Initializing port\n"); 731 en_dbg(HW, priv, "Initializing port\n");
674 err = mlx4_INIT_PORT(mdev->dev, priv->port); 732 err = mlx4_INIT_PORT(mdev->dev, priv->port);
675 if (err) { 733 if (err) {
676 en_err(priv, "Failed Initializing port\n"); 734 en_err(priv, "Failed Initializing port\n");
677 goto mac_err; 735 goto tx_err;
678 } 736 }
679 737
738 /* Attach rx QP to bradcast address */
739 memset(&mc_list[10], 0xff, ETH_ALEN);
740 mc_list[5] = priv->port;
741 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
742 0, MLX4_PROT_ETH))
743 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
744
680 /* Schedule multicast task to populate multicast list */ 745 /* Schedule multicast task to populate multicast list */
681 queue_work(mdev->workqueue, &priv->mcast_task); 746 queue_work(mdev->workqueue, &priv->mcast_task);
682 747
@@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev)
684 netif_tx_start_all_queues(dev); 749 netif_tx_start_all_queues(dev);
685 return 0; 750 return 0;
686 751
687mac_err:
688 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
689tx_err: 752tx_err:
690 while (tx_index--) { 753 while (tx_index--) {
691 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 754 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -693,6 +756,8 @@ tx_err:
693 } 756 }
694 757
695 mlx4_en_release_rss_steer(priv); 758 mlx4_en_release_rss_steer(priv);
759mac_err:
760 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
696cq_err: 761cq_err:
697 while (rx_index--) 762 while (rx_index--)
698 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 763 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev)
708 struct mlx4_en_priv *priv = netdev_priv(dev); 773 struct mlx4_en_priv *priv = netdev_priv(dev);
709 struct mlx4_en_dev *mdev = priv->mdev; 774 struct mlx4_en_dev *mdev = priv->mdev;
710 int i; 775 int i;
776 u8 mc_list[16] = {0};
711 777
712 if (!priv->port_up) { 778 if (!priv->port_up) {
713 en_dbg(DRV, priv, "stop port called while port already down\n"); 779 en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev)
722 /* Set port as not active */ 788 /* Set port as not active */
723 priv->port_up = false; 789 priv->port_up = false;
724 790
791 /* Detach All multicasts */
792 memset(&mc_list[10], 0xff, ETH_ALEN);
793 mc_list[5] = priv->port;
794 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
795 MLX4_PROT_ETH);
796 for (i = 0; i < priv->mc_addrs_cnt; i++) {
797 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
798 mc_list[5] = priv->port;
799 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
800 mc_list, MLX4_PROT_ETH);
801 }
802 mlx4_en_clear_list(dev);
803 /* Flush multicast filter */
804 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
805
725 /* Unregister Mac address for the port */ 806 /* Unregister Mac address for the port */
726 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 807 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
727 mdev->mac_removed[priv->port] = 1; 808 mdev->mac_removed[priv->port] = 1;
728 809
729 /* Free TX Rings */ 810 /* Free TX Rings */
@@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev)
801 priv->rx_ring[i].packets = 0; 882 priv->rx_ring[i].packets = 0;
802 } 883 }
803 884
804 mlx4_en_set_default_moderation(priv);
805 err = mlx4_en_start_port(dev); 885 err = mlx4_en_start_port(dev);
806 if (err) 886 if (err)
807 en_err(priv, "Failed starting port:%d\n", priv->port); 887 en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev)
828 return 0; 908 return 0;
829} 909}
830 910
831void mlx4_en_free_resources(struct mlx4_en_priv *priv) 911void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
832{ 912{
833 int i; 913 int i;
834 914
@@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
836 if (priv->tx_ring[i].tx_info) 916 if (priv->tx_ring[i].tx_info)
837 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 917 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
838 if (priv->tx_cq[i].buf) 918 if (priv->tx_cq[i].buf)
839 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 919 mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
840 } 920 }
841 921
842 for (i = 0; i < priv->rx_ring_num; i++) { 922 for (i = 0; i < priv->rx_ring_num; i++) {
843 if (priv->rx_ring[i].rx_info) 923 if (priv->rx_ring[i].rx_info)
844 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 924 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
845 if (priv->rx_cq[i].buf) 925 if (priv->rx_cq[i].buf)
846 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 926 mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
847 } 927 }
848} 928}
849 929
@@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
851{ 931{
852 struct mlx4_en_port_profile *prof = priv->prof; 932 struct mlx4_en_port_profile *prof = priv->prof;
853 int i; 933 int i;
934 int base_tx_qpn, err;
935
936 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
937 if (err) {
938 en_err(priv, "failed reserving range for TX rings\n");
939 return err;
940 }
854 941
855 /* Create tx Rings */ 942 /* Create tx Rings */
856 for (i = 0; i < priv->tx_ring_num; i++) { 943 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
858 prof->tx_ring_size, i, TX)) 945 prof->tx_ring_size, i, TX))
859 goto err; 946 goto err;
860 947
861 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 948 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
862 prof->tx_ring_size, TXBB_SIZE)) 949 prof->tx_ring_size, TXBB_SIZE))
863 goto err; 950 goto err;
864 } 951 }
@@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
878 965
879err: 966err:
880 en_err(priv, "Failed to allocate NIC resources\n"); 967 en_err(priv, "Failed to allocate NIC resources\n");
968 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
881 return -ENOMEM; 969 return -ENOMEM;
882} 970}
883 971
@@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
905 mdev->pndev[priv->port] = NULL; 993 mdev->pndev[priv->port] = NULL;
906 mutex_unlock(&mdev->state_lock); 994 mutex_unlock(&mdev->state_lock);
907 995
908 mlx4_en_free_resources(priv); 996 mlx4_en_free_resources(priv, false);
909 free_netdev(dev); 997 free_netdev(dev);
910} 998}
911 999
@@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
932 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1020 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
933 } else { 1021 } else {
934 mlx4_en_stop_port(dev); 1022 mlx4_en_stop_port(dev);
935 mlx4_en_set_default_moderation(priv);
936 err = mlx4_en_start_port(dev); 1023 err = mlx4_en_start_port(dev);
937 if (err) { 1024 if (err) {
938 en_err(priv, "Failed restarting port:%d\n", 1025 en_err(priv, "Failed restarting port:%d\n",
@@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1166 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1080 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1167 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1081 1168
1169 /* Configure port */
1170 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1171 MLX4_EN_MIN_MTU,
1172 0, 0, 0, 0);
1173 if (err) {
1174 en_err(priv, "Failed setting port general configurations "
1175 "for port %d, with error %d\n", priv->port, err);
1176 goto out;
1177 }
1178
1179 /* Init port */
1180 en_warn(priv, "Initializing port\n");
1181 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1182 if (err) {
1183 en_err(priv, "Failed Initializing port\n");
1184 goto out;
1185 }
1082 priv->registered = 1; 1186 priv->registered = 1;
1187 mlx4_en_set_default_moderation(priv);
1083 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1188 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1084 return 0; 1189 return 0;
1085 1190
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 7f5a3221e0c1..f2a4f5dd313d 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -119,6 +119,10 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
119 struct mlx4_set_port_rqp_calc_context *context; 119 struct mlx4_set_port_rqp_calc_context *context;
120 int err; 120 int err;
121 u32 in_mod; 121 u32 in_mod;
122 u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
123
124 if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
125 return 0;
122 126
123 mailbox = mlx4_alloc_cmd_mailbox(dev); 127 mailbox = mlx4_alloc_cmd_mailbox(dev);
124 if (IS_ERR(mailbox)) 128 if (IS_ERR(mailbox))
@@ -127,8 +131,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
127 memset(context, 0, sizeof *context); 131 memset(context, 0, sizeof *context);
128 132
129 context->base_qpn = cpu_to_be32(base_qpn); 133 context->base_qpn = cpu_to_be32(base_qpn);
130 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); 134 context->n_mac = 0x7;
131 context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); 135 context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
136 base_qpn);
137 context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
138 base_qpn);
132 context->intra_no_vlan = 0; 139 context->intra_no_vlan = 0;
133 context->no_vlan = MLX4_NO_VLAN_IDX; 140 context->no_vlan = MLX4_NO_VLAN_IDX;
134 context->intra_vlan_miss = 0; 141 context->intra_vlan_miss = 0;
@@ -206,7 +213,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
206 } 213 }
207 stats->tx_packets = 0; 214 stats->tx_packets = 0;
208 stats->tx_bytes = 0; 215 stats->tx_bytes = 0;
209 for (i = 0; i <= priv->tx_ring_num; i++) { 216 for (i = 0; i < priv->tx_ring_num; i++) {
210 stats->tx_packets += priv->tx_ring[i].packets; 217 stats->tx_packets += priv->tx_ring[i].packets;
211 stats->tx_bytes += priv->tx_ring[i].bytes; 218 stats->tx_bytes += priv->tx_ring[i].bytes;
212 } 219 }
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index 092e814b1981..e3d73e41c567 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,8 +36,8 @@
36 36
37 37
38#define SET_PORT_GEN_ALL_VALID 0x7 38#define SET_PORT_GEN_ALL_VALID 0x7
39#define SET_PORT_PROMISC_EN_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_PROMISC_MODE_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42enum { 42enum {
43 MLX4_CMD_SET_VLAN_FLTR = 0x47, 43 MLX4_CMD_SET_VLAN_FLTR = 0x47,
@@ -45,6 +45,12 @@ enum {
45 MLX4_CMD_DUMP_ETH_STATS = 0x49, 45 MLX4_CMD_DUMP_ETH_STATS = 0x49,
46}; 46};
47 47
48enum {
49 MCAST_DIRECT_ONLY = 0,
50 MCAST_DIRECT = 1,
51 MCAST_DEFAULT = 2
52};
53
48struct mlx4_set_port_general_context { 54struct mlx4_set_port_general_context {
49 u8 reserved[3]; 55 u8 reserved[3];
50 u8 flags; 56 u8 flags;
@@ -60,14 +66,17 @@ struct mlx4_set_port_general_context {
60 66
61struct mlx4_set_port_rqp_calc_context { 67struct mlx4_set_port_rqp_calc_context {
62 __be32 base_qpn; 68 __be32 base_qpn;
63 __be32 flags; 69 u8 rererved;
64 u8 reserved[3]; 70 u8 n_mac;
71 u8 n_vlan;
72 u8 n_prio;
73 u8 reserved2[3];
65 u8 mac_miss; 74 u8 mac_miss;
66 u8 intra_no_vlan; 75 u8 intra_no_vlan;
67 u8 no_vlan; 76 u8 no_vlan;
68 u8 intra_vlan_miss; 77 u8 intra_vlan_miss;
69 u8 vlan_miss; 78 u8 vlan_miss;
70 u8 reserved2[3]; 79 u8 reserved3[3];
71 u8 no_vlan_prio; 80 u8 no_vlan_prio;
72 __be32 promisc; 81 __be32 promisc;
73 __be32 mcast; 82 __be32 mcast;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 570f2508fb30..05998ee297c9 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -845,16 +845,10 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
845 } 845 }
846 846
847 /* Configure RSS indirection qp */ 847 /* Configure RSS indirection qp */
848 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
849 if (err) {
850 en_err(priv, "Failed to reserve range for RSS "
851 "indirection qp\n");
852 goto rss_err;
853 }
854 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 848 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
855 if (err) { 849 if (err) {
856 en_err(priv, "Failed to allocate RSS indirection QP\n"); 850 en_err(priv, "Failed to allocate RSS indirection QP\n");
857 goto reserve_err; 851 goto rss_err;
858 } 852 }
859 rss_map->indir_qp.event = mlx4_en_sqp_event; 853 rss_map->indir_qp.event = mlx4_en_sqp_event;
860 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 854 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
@@ -881,8 +875,6 @@ indir_err:
881 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 875 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
882 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 876 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
883 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 877 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
884reserve_err:
885 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
886rss_err: 878rss_err:
887 for (i = 0; i < good_qps; i++) { 879 for (i = 0; i < good_qps; i++) {
888 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 880 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
@@ -904,7 +896,6 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
904 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp); 896 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
905 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp); 897 mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
906 mlx4_qp_free(mdev->dev, &rss_map->indir_qp); 898 mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
907 mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
908 899
909 for (i = 0; i < priv->rx_ring_num; i++) { 900 for (i = 0; i < priv->rx_ring_num; i++) {
910 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i], 901 mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index a680cd4a5ab6..01feb8fd42ad 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -44,6 +44,7 @@
44 44
45enum { 45enum {
46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 46 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
47 MAX_BF = 256,
47}; 48};
48 49
49static int inline_thold __read_mostly = MAX_INLINE; 50static int inline_thold __read_mostly = MAX_INLINE;
@@ -52,7 +53,7 @@ module_param_named(inline_thold, inline_thold, int, 0444);
52MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 53MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
53 54
54int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 55int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
55 struct mlx4_en_tx_ring *ring, u32 size, 56 struct mlx4_en_tx_ring *ring, int qpn, u32 size,
56 u16 stride) 57 u16 stride)
57{ 58{
58 struct mlx4_en_dev *mdev = priv->mdev; 59 struct mlx4_en_dev *mdev = priv->mdev;
@@ -103,23 +104,25 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
105 106
106 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 107 ring->qpn = qpn;
107 if (err) {
108 en_err(priv, "Failed reserving qp for tx ring.\n");
109 goto err_map;
110 }
111
112 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
113 if (err) { 109 if (err) {
114 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
115 goto err_reserve; 111 goto err_map;
116 } 112 }
117 ring->qp.event = mlx4_en_sqp_event; 113 ring->qp.event = mlx4_en_sqp_event;
118 114
115 err = mlx4_bf_alloc(mdev->dev, &ring->bf);
116 if (err) {
117 en_dbg(DRV, priv, "working without blueflame (%d)", err);
118 ring->bf.uar = &mdev->priv_uar;
119 ring->bf.uar->map = mdev->uar_map;
120 ring->bf_enabled = false;
121 } else
122 ring->bf_enabled = true;
123
119 return 0; 124 return 0;
120 125
121err_reserve:
122 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
123err_map: 126err_map:
124 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 mlx4_en_unmap_buffer(&ring->wqres.buf);
125err_hwq_res: 128err_hwq_res:
@@ -139,6 +142,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
139 struct mlx4_en_dev *mdev = priv->mdev; 142 struct mlx4_en_dev *mdev = priv->mdev;
140 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
141 144
145 if (ring->bf_enabled)
146 mlx4_bf_free(mdev->dev, &ring->bf);
142 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_remove(mdev->dev, &ring->qp);
143 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_free(mdev->dev, &ring->qp);
144 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
@@ -171,6 +176,8 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
171 176
172 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
173 ring->cqn, &ring->context); 178 ring->cqn, &ring->context);
179 if (ring->bf_enabled)
180 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
174 181
175 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
176 &ring->qp, &ring->qp_state); 183 &ring->qp, &ring->qp_state);
@@ -591,6 +598,11 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
591 return skb_tx_hash(dev, skb); 598 return skb_tx_hash(dev, skb);
592} 599}
593 600
601static void mlx4_bf_copy(unsigned long *dst, unsigned long *src, unsigned bytecnt)
602{
603 __iowrite64_copy(dst, src, bytecnt / 8);
604}
605
594netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 606netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
595{ 607{
596 struct mlx4_en_priv *priv = netdev_priv(dev); 608 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -609,12 +621,13 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
609 int desc_size; 621 int desc_size;
610 int real_size; 622 int real_size;
611 dma_addr_t dma; 623 dma_addr_t dma;
612 u32 index; 624 u32 index, bf_index;
613 __be32 op_own; 625 __be32 op_own;
614 u16 vlan_tag = 0; 626 u16 vlan_tag = 0;
615 int i; 627 int i;
616 int lso_header_size; 628 int lso_header_size;
617 void *fragptr; 629 void *fragptr;
630 bool bounce = false;
618 631
619 if (!priv->port_up) 632 if (!priv->port_up)
620 goto tx_drop; 633 goto tx_drop;
@@ -657,13 +670,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
657 670
658 /* Packet is good - grab an index and transmit it */ 671 /* Packet is good - grab an index and transmit it */
659 index = ring->prod & ring->size_mask; 672 index = ring->prod & ring->size_mask;
673 bf_index = ring->prod;
660 674
661 /* See if we have enough space for whole descriptor TXBB for setting 675 /* See if we have enough space for whole descriptor TXBB for setting
662 * SW ownership on next descriptor; if not, use a bounce buffer. */ 676 * SW ownership on next descriptor; if not, use a bounce buffer. */
663 if (likely(index + nr_txbb <= ring->size)) 677 if (likely(index + nr_txbb <= ring->size))
664 tx_desc = ring->buf + index * TXBB_SIZE; 678 tx_desc = ring->buf + index * TXBB_SIZE;
665 else 679 else {
666 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 680 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
681 bounce = true;
682 }
667 683
668 /* Save skb in tx_info ring */ 684 /* Save skb in tx_info ring */
669 tx_info = &ring->tx_info[index]; 685 tx_info = &ring->tx_info[index];
@@ -768,21 +784,37 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
768 ring->prod += nr_txbb; 784 ring->prod += nr_txbb;
769 785
770 /* If we used a bounce buffer then copy descriptor back into place */ 786 /* If we used a bounce buffer then copy descriptor back into place */
771 if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf) 787 if (bounce)
772 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 788 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
773 789
774 /* Run destructor before passing skb to HW */ 790 /* Run destructor before passing skb to HW */
775 if (likely(!skb_shared(skb))) 791 if (likely(!skb_shared(skb)))
776 skb_orphan(skb); 792 skb_orphan(skb);
777 793
778 /* Ensure new descirptor hits memory 794 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
779 * before setting ownership of this descriptor to HW */ 795 *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
780 wmb(); 796 op_own |= htonl((bf_index & 0xffff) << 8);
781 tx_desc->ctrl.owner_opcode = op_own; 797 /* Ensure new descirptor hits memory
798 * before setting ownership of this descriptor to HW */
799 wmb();
800 tx_desc->ctrl.owner_opcode = op_own;
782 801
783 /* Ring doorbell! */ 802 wmb();
784 wmb(); 803
785 writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL); 804 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
805 desc_size);
806
807 wmb();
808
809 ring->bf.offset ^= ring->bf.buf_size;
810 } else {
811 /* Ensure new descirptor hits memory
812 * before setting ownership of this descriptor to HW */
813 wmb();
814 tx_desc->ctrl.owner_opcode = op_own;
815 wmb();
816 writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
817 }
786 818
787 /* Poll CQ here */ 819 /* Poll CQ here */
788 mlx4_en_xmit_poll(priv, tx_ind); 820 mlx4_en_xmit_poll(priv, tx_ind);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 552d0fce6f67..506cfd0372ec 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -42,7 +42,7 @@
42#include "fw.h" 42#include "fw.h"
43 43
44enum { 44enum {
45 MLX4_IRQNAME_SIZE = 64 45 MLX4_IRQNAME_SIZE = 32
46}; 46};
47 47
48enum { 48enum {
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
317 * we need to map, take the difference of highest index and 317 * we need to map, take the difference of highest index and
318 * the lowest index we'll use and add 1. 318 * the lowest index we'll use and add 1.
319 */ 319 */
320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 320 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
321 dev->caps.reserved_eqs / 4 + 1; 321 dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
322} 322}
323 323
324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 324static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
496static void mlx4_free_irqs(struct mlx4_dev *dev) 496static void mlx4_free_irqs(struct mlx4_dev *dev)
497{ 497{
498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 498 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
499 int i; 499 struct mlx4_priv *priv = mlx4_priv(dev);
500 int i, vec;
500 501
501 if (eq_table->have_irq) 502 if (eq_table->have_irq)
502 free_irq(dev->pdev->irq, dev); 503 free_irq(dev->pdev->irq, dev);
504
503 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 505 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
504 if (eq_table->eq[i].have_irq) { 506 if (eq_table->eq[i].have_irq) {
505 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 507 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
506 eq_table->eq[i].have_irq = 0; 508 eq_table->eq[i].have_irq = 0;
507 } 509 }
508 510
511 for (i = 0; i < dev->caps.comp_pool; i++) {
512 /*
513 * Freeing the assigned irq's
514 * all bits should be 0, but we need to validate
515 */
516 if (priv->msix_ctl.pool_bm & 1ULL << i) {
517 /* NO need protecting*/
518 vec = dev->caps.num_comp_vectors + 1 + i;
519 free_irq(priv->eq_table.eq[vec].irq,
520 &priv->eq_table.eq[vec]);
521 }
522 }
523
524
509 kfree(eq_table->irq_names); 525 kfree(eq_table->irq_names);
510} 526}
511 527
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
578 (priv->eq_table.inta_pin < 32 ? 4 : 0); 594 (priv->eq_table.inta_pin < 32 ? 4 : 0);
579 595
580 priv->eq_table.irq_names = 596 priv->eq_table.irq_names =
581 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 597 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
598 dev->caps.comp_pool),
582 GFP_KERNEL); 599 GFP_KERNEL);
583 if (!priv->eq_table.irq_names) { 600 if (!priv->eq_table.irq_names) {
584 err = -ENOMEM; 601 err = -ENOMEM;
@@ -601,6 +618,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
601 if (err) 618 if (err)
602 goto err_out_comp; 619 goto err_out_comp;
603 620
621 /*if additional completion vectors poolsize is 0 this loop will not run*/
622 for (i = dev->caps.num_comp_vectors + 1;
623 i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
624
625 err = mlx4_create_eq(dev, dev->caps.num_cqs -
626 dev->caps.reserved_cqs +
627 MLX4_NUM_SPARE_EQE,
628 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
629 &priv->eq_table.eq[i]);
630 if (err) {
631 --i;
632 goto err_out_unmap;
633 }
634 }
635
636
604 if (dev->flags & MLX4_FLAG_MSI_X) { 637 if (dev->flags & MLX4_FLAG_MSI_X) {
605 const char *eq_name; 638 const char *eq_name;
606 639
@@ -686,7 +719,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
686 719
687 mlx4_free_irqs(dev); 720 mlx4_free_irqs(dev);
688 721
689 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 722 for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
690 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 723 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
691 724
692 mlx4_unmap_clr_int(dev); 725 mlx4_unmap_clr_int(dev);
@@ -743,3 +776,65 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
743 return err; 776 return err;
744} 777}
745EXPORT_SYMBOL(mlx4_test_interrupts); 778EXPORT_SYMBOL(mlx4_test_interrupts);
779
780int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
781{
782
783 struct mlx4_priv *priv = mlx4_priv(dev);
784 int vec = 0, err = 0, i;
785
786 spin_lock(&priv->msix_ctl.pool_lock);
787 for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
788 if (~priv->msix_ctl.pool_bm & 1ULL << i) {
789 priv->msix_ctl.pool_bm |= 1ULL << i;
790 vec = dev->caps.num_comp_vectors + 1 + i;
791 snprintf(priv->eq_table.irq_names +
792 vec * MLX4_IRQNAME_SIZE,
793 MLX4_IRQNAME_SIZE, "%s", name);
794 err = request_irq(priv->eq_table.eq[vec].irq,
795 mlx4_msi_x_interrupt, 0,
796 &priv->eq_table.irq_names[vec<<5],
797 priv->eq_table.eq + vec);
798 if (err) {
799 /*zero out bit by fliping it*/
800 priv->msix_ctl.pool_bm ^= 1 << i;
801 vec = 0;
802 continue;
803 /*we dont want to break here*/
804 }
805 eq_set_ci(&priv->eq_table.eq[vec], 1);
806 }
807 }
808 spin_unlock(&priv->msix_ctl.pool_lock);
809
810 if (vec) {
811 *vector = vec;
812 } else {
813 *vector = 0;
814 err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
815 }
816 return err;
817}
818EXPORT_SYMBOL(mlx4_assign_eq);
819
820void mlx4_release_eq(struct mlx4_dev *dev, int vec)
821{
822 struct mlx4_priv *priv = mlx4_priv(dev);
823 /*bm index*/
824 int i = vec - dev->caps.num_comp_vectors - 1;
825
826 if (likely(i >= 0)) {
827 /*sanity check , making sure were not trying to free irq's
828 Belonging to a legacy EQ*/
829 spin_lock(&priv->msix_ctl.pool_lock);
830 if (priv->msix_ctl.pool_bm & 1ULL << i) {
831 free_irq(priv->eq_table.eq[vec].irq,
832 &priv->eq_table.eq[vec]);
833 priv->msix_ctl.pool_bm &= ~(1ULL << i);
834 }
835 spin_unlock(&priv->msix_ctl.pool_lock);
836 }
837
838}
839EXPORT_SYMBOL(mlx4_release_eq);
840
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 5de1db897835..67a209ba939d 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -274,8 +274,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
274 dev_cap->stat_rate_support = stat_rate; 274 dev_cap->stat_rate_support = stat_rate;
275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
276 dev_cap->udp_rss = field & 0x1; 276 dev_cap->udp_rss = field & 0x1;
277 dev_cap->vep_uc_steering = field & 0x2;
278 dev_cap->vep_mc_steering = field & 0x4;
277 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET); 279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
278 dev_cap->loopback_support = field & 0x1; 280 dev_cap->loopback_support = field & 0x1;
281 dev_cap->wol = field & 0x40;
279 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET); 282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
280 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
281 dev_cap->reserved_uars = field >> 4; 284 dev_cap->reserved_uars = field >> 4;
@@ -737,6 +740,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
737#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) 740#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
738#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) 741#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12)
739#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) 742#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
743#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
740#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) 744#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
741#define INIT_HCA_TPT_OFFSET 0x0f0 745#define INIT_HCA_TPT_OFFSET 0x0f0
742#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) 746#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
@@ -797,6 +801,8 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
797 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
798 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
799 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
804 if (dev->caps.vep_mc_steering)
805 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
800 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 806 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
801 807
802 /* TPT attributes */ 808 /* TPT attributes */
@@ -908,3 +914,22 @@ int mlx4_NOP(struct mlx4_dev *dev)
908 /* Input modifier of 0x1f means "finish as soon as possible." */ 914 /* Input modifier of 0x1f means "finish as soon as possible." */
909 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100); 915 return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
910} 916}
917
918#define MLX4_WOL_SETUP_MODE (5 << 28)
919int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
920{
921 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
922
923 return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
924 MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
925}
926EXPORT_SYMBOL_GPL(mlx4_wol_read);
927
928int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
929{
930 u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
931
932 return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
933 MLX4_CMD_TIME_CLASS_A);
934}
935EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 65cc72eb899d..88003ebc6185 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -80,6 +80,9 @@ struct mlx4_dev_cap {
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss; 81 int udp_rss;
82 int loopback_support; 82 int loopback_support;
83 int vep_uc_steering;
84 int vep_mc_steering;
85 int wol;
83 u32 flags; 86 u32 flags;
84 int reserved_uars; 87 int reserved_uars;
85 int uar_size; 88 int uar_size;
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index c83501122d77..62fa7eec5f0c 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -39,6 +39,7 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/dma-mapping.h> 40#include <linux/dma-mapping.h>
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h>
42 43
43#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
44#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
@@ -227,6 +228,9 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
227 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
228 dev->caps.udp_rss = dev_cap->udp_rss; 229 dev->caps.udp_rss = dev_cap->udp_rss;
229 dev->caps.loopback_support = dev_cap->loopback_support; 230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
231 235
232 dev->caps.log_num_macs = log_num_mac; 236 dev->caps.log_num_macs = log_num_mac;
@@ -718,8 +722,31 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
718 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
719} 723}
720 724
725static int map_bf_area(struct mlx4_dev *dev)
726{
727 struct mlx4_priv *priv = mlx4_priv(dev);
728 resource_size_t bf_start;
729 resource_size_t bf_len;
730 int err = 0;
731
732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
735 if (!priv->bf_mapping)
736 err = -ENOMEM;
737
738 return err;
739}
740
741static void unmap_bf_area(struct mlx4_dev *dev)
742{
743 if (mlx4_priv(dev)->bf_mapping)
744 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745}
746
721static void mlx4_close_hca(struct mlx4_dev *dev) 747static void mlx4_close_hca(struct mlx4_dev *dev)
722{ 748{
749 unmap_bf_area(dev);
723 mlx4_CLOSE_HCA(dev, 0); 750 mlx4_CLOSE_HCA(dev, 0);
724 mlx4_free_icms(dev); 751 mlx4_free_icms(dev);
725 mlx4_UNMAP_FA(dev); 752 mlx4_UNMAP_FA(dev);
@@ -772,6 +799,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
772 goto err_stop_fw; 799 goto err_stop_fw;
773 } 800 }
774 801
802 if (map_bf_area(dev))
803 mlx4_dbg(dev, "Failed to map blue flame area\n");
804
775 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
776 806
777 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
@@ -802,6 +832,7 @@ err_free_icm:
802 mlx4_free_icms(dev); 832 mlx4_free_icms(dev);
803 833
804err_stop_fw: 834err_stop_fw:
835 unmap_bf_area(dev);
805 mlx4_UNMAP_FA(dev); 836 mlx4_UNMAP_FA(dev);
806 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
807 838
@@ -969,13 +1000,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
969{ 1000{
970 struct mlx4_priv *priv = mlx4_priv(dev); 1001 struct mlx4_priv *priv = mlx4_priv(dev);
971 struct msix_entry *entries; 1002 struct msix_entry *entries;
972 int nreq; 1003 int nreq = min_t(int, dev->caps.num_ports *
1004 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1005 + MSIX_LEGACY_SZ, MAX_MSIX);
973 int err; 1006 int err;
974 int i; 1007 int i;
975 1008
976 if (msi_x) { 1009 if (msi_x) {
977 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1010 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
978 num_possible_cpus() + 1); 1011 nreq);
979 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1012 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
980 if (!entries) 1013 if (!entries)
981 goto no_msi; 1014 goto no_msi;
@@ -998,7 +1031,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
998 goto no_msi; 1031 goto no_msi;
999 } 1032 }
1000 1033
1001 dev->caps.num_comp_vectors = nreq - 1; 1034 if (nreq <
1035 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1036 /*Working in legacy mode , all EQ's shared*/
1037 dev->caps.comp_pool = 0;
1038 dev->caps.num_comp_vectors = nreq - 1;
1039 } else {
1040 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1041 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1042 }
1002 for (i = 0; i < nreq; ++i) 1043 for (i = 0; i < nreq; ++i)
1003 priv->eq_table.eq[i].irq = entries[i].vector; 1044 priv->eq_table.eq[i].irq = entries[i].vector;
1004 1045
@@ -1010,6 +1051,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1010 1051
1011no_msi: 1052no_msi:
1012 dev->caps.num_comp_vectors = 1; 1053 dev->caps.num_comp_vectors = 1;
1054 dev->caps.comp_pool = 0;
1013 1055
1014 for (i = 0; i < 2; ++i) 1056 for (i = 0; i < 2; ++i)
1015 priv->eq_table.eq[i].irq = dev->pdev->irq; 1057 priv->eq_table.eq[i].irq = dev->pdev->irq;
@@ -1049,6 +1091,59 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1049 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1091 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1050} 1092}
1051 1093
1094static int mlx4_init_steering(struct mlx4_dev *dev)
1095{
1096 struct mlx4_priv *priv = mlx4_priv(dev);
1097 int num_entries = dev->caps.num_ports;
1098 int i, j;
1099
1100 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1101 if (!priv->steer)
1102 return -ENOMEM;
1103
1104 for (i = 0; i < num_entries; i++) {
1105 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1106 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1107 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1108 }
1109 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1110 }
1111 return 0;
1112}
1113
1114static void mlx4_clear_steering(struct mlx4_dev *dev)
1115{
1116 struct mlx4_priv *priv = mlx4_priv(dev);
1117 struct mlx4_steer_index *entry, *tmp_entry;
1118 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1119 int num_entries = dev->caps.num_ports;
1120 int i, j;
1121
1122 for (i = 0; i < num_entries; i++) {
1123 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1124 list_for_each_entry_safe(pqp, tmp_pqp,
1125 &priv->steer[i].promisc_qps[j],
1126 list) {
1127 list_del(&pqp->list);
1128 kfree(pqp);
1129 }
1130 list_for_each_entry_safe(entry, tmp_entry,
1131 &priv->steer[i].steer_entries[j],
1132 list) {
1133 list_del(&entry->list);
1134 list_for_each_entry_safe(pqp, tmp_pqp,
1135 &entry->duplicates,
1136 list) {
1137 list_del(&pqp->list);
1138 kfree(pqp);
1139 }
1140 kfree(entry);
1141 }
1142 }
1143 }
1144 kfree(priv->steer);
1145}
1146
1052static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1147static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1053{ 1148{
1054 struct mlx4_priv *priv; 1149 struct mlx4_priv *priv;
@@ -1130,6 +1225,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1130 INIT_LIST_HEAD(&priv->pgdir_list); 1225 INIT_LIST_HEAD(&priv->pgdir_list);
1131 mutex_init(&priv->pgdir_mutex); 1226 mutex_init(&priv->pgdir_mutex);
1132 1227
1228 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id);
1229
1230 INIT_LIST_HEAD(&priv->bf_list);
1231 mutex_init(&priv->bf_mutex);
1232
1133 /* 1233 /*
1134 * Now reset the HCA before we touch the PCI capabilities or 1234 * Now reset the HCA before we touch the PCI capabilities or
1135 * attempt a firmware command, since a boot ROM may have left 1235 * attempt a firmware command, since a boot ROM may have left
@@ -1154,8 +1254,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1154 if (err) 1254 if (err)
1155 goto err_close; 1255 goto err_close;
1156 1256
1257 priv->msix_ctl.pool_bm = 0;
1258 spin_lock_init(&priv->msix_ctl.pool_lock);
1259
1157 mlx4_enable_msi_x(dev); 1260 mlx4_enable_msi_x(dev);
1158 1261
1262 err = mlx4_init_steering(dev);
1263 if (err)
1264 goto err_free_eq;
1265
1159 err = mlx4_setup_hca(dev); 1266 err = mlx4_setup_hca(dev);
1160 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1267 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1161 dev->flags &= ~MLX4_FLAG_MSI_X; 1268 dev->flags &= ~MLX4_FLAG_MSI_X;
@@ -1164,7 +1271,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1164 } 1271 }
1165 1272
1166 if (err) 1273 if (err)
1167 goto err_free_eq; 1274 goto err_steer;
1168 1275
1169 for (port = 1; port <= dev->caps.num_ports; port++) { 1276 for (port = 1; port <= dev->caps.num_ports; port++) {
1170 err = mlx4_init_port_info(dev, port); 1277 err = mlx4_init_port_info(dev, port);
@@ -1197,6 +1304,9 @@ err_port:
1197 mlx4_cleanup_pd_table(dev); 1304 mlx4_cleanup_pd_table(dev);
1198 mlx4_cleanup_uar_table(dev); 1305 mlx4_cleanup_uar_table(dev);
1199 1306
1307err_steer:
1308 mlx4_clear_steering(dev);
1309
1200err_free_eq: 1310err_free_eq:
1201 mlx4_free_eq_table(dev); 1311 mlx4_free_eq_table(dev);
1202 1312
@@ -1256,6 +1366,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1256 iounmap(priv->kar); 1366 iounmap(priv->kar);
1257 mlx4_uar_free(dev, &priv->driver_uar); 1367 mlx4_uar_free(dev, &priv->driver_uar);
1258 mlx4_cleanup_uar_table(dev); 1368 mlx4_cleanup_uar_table(dev);
1369 mlx4_clear_steering(dev);
1259 mlx4_free_eq_table(dev); 1370 mlx4_free_eq_table(dev);
1260 mlx4_close_hca(dev); 1371 mlx4_close_hca(dev);
1261 mlx4_cmd_cleanup(dev); 1372 mlx4_cmd_cleanup(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index 79cf42db2ea9..e71372aa9cc4 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/etherdevice.h>
35 36
36#include <linux/mlx4/cmd.h> 37#include <linux/mlx4/cmd.h>
37 38
@@ -40,38 +41,40 @@
40#define MGM_QPN_MASK 0x00FFFFFF 41#define MGM_QPN_MASK 0x00FFFFFF
41#define MGM_BLCK_LB_BIT 30 42#define MGM_BLCK_LB_BIT 30
42 43
43struct mlx4_mgm {
44 __be32 next_gid_index;
45 __be32 members_count;
46 u32 reserved[2];
47 u8 gid[16];
48 __be32 qp[MLX4_QP_PER_MGM];
49};
50
51static const u8 zero_gid[16]; /* automatically initialized to 0 */ 44static const u8 zero_gid[16]; /* automatically initialized to 0 */
52 45
53static int mlx4_READ_MCG(struct mlx4_dev *dev, int index, 46static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
54 struct mlx4_cmd_mailbox *mailbox) 47 struct mlx4_cmd_mailbox *mailbox)
55{ 48{
56 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 49 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
57 MLX4_CMD_TIME_CLASS_A); 50 MLX4_CMD_TIME_CLASS_A);
58} 51}
59 52
60static int mlx4_WRITE_MCG(struct mlx4_dev *dev, int index, 53static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
61 struct mlx4_cmd_mailbox *mailbox) 54 struct mlx4_cmd_mailbox *mailbox)
62{ 55{
63 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 56 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
64 MLX4_CMD_TIME_CLASS_A); 57 MLX4_CMD_TIME_CLASS_A);
65} 58}
66 59
67static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 60static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
68 u16 *hash) 61 struct mlx4_cmd_mailbox *mailbox)
62{
63 u32 in_mod;
64
65 in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
66 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
67 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
68}
69
70static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
71 u16 *hash, u8 op_mod)
69{ 72{
70 u64 imm; 73 u64 imm;
71 int err; 74 int err;
72 75
73 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, 0, MLX4_CMD_MGID_HASH, 76 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
74 MLX4_CMD_TIME_CLASS_A); 77 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
75 78
76 if (!err) 79 if (!err)
77 *hash = imm; 80 *hash = imm;
@@ -79,6 +82,457 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
79 return err; 82 return err;
80} 83}
81 84
85static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
86 enum mlx4_steer_type steer,
87 u32 qpn)
88{
89 struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
90 struct mlx4_promisc_qp *pqp;
91
92 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
93 if (pqp->qpn == qpn)
94 return pqp;
95 }
96 /* not found */
97 return NULL;
98}
99
100/*
101 * Add new entry to steering data structure.
102 * All promisc QPs should be added as well
103 */
104static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
105 enum mlx4_steer_type steer,
106 unsigned int index, u32 qpn)
107{
108 struct mlx4_steer *s_steer;
109 struct mlx4_cmd_mailbox *mailbox;
110 struct mlx4_mgm *mgm;
111 u32 members_count;
112 struct mlx4_steer_index *new_entry;
113 struct mlx4_promisc_qp *pqp;
114 struct mlx4_promisc_qp *dqp;
115 u32 prot;
116 int err;
117 u8 pf_num;
118
119 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
120 s_steer = &mlx4_priv(dev)->steer[pf_num];
121 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
122 if (!new_entry)
123 return -ENOMEM;
124
125 INIT_LIST_HEAD(&new_entry->duplicates);
126 new_entry->index = index;
127 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
128
129 /* If the given qpn is also a promisc qp,
130 * it should be inserted to duplicates list
131 */
132 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
133 if (pqp) {
134 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
135 if (!dqp) {
136 err = -ENOMEM;
137 goto out_alloc;
138 }
139 dqp->qpn = qpn;
140 list_add_tail(&dqp->list, &new_entry->duplicates);
141 }
142
143 /* if no promisc qps for this vep, we are done */
144 if (list_empty(&s_steer->promisc_qps[steer]))
145 return 0;
146
147 /* now need to add all the promisc qps to the new
148 * steering entry, as they should also receive the packets
149 * destined to this address */
150 mailbox = mlx4_alloc_cmd_mailbox(dev);
151 if (IS_ERR(mailbox)) {
152 err = -ENOMEM;
153 goto out_alloc;
154 }
155 mgm = mailbox->buf;
156
157 err = mlx4_READ_ENTRY(dev, index, mailbox);
158 if (err)
159 goto out_mailbox;
160
161 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
162 prot = be32_to_cpu(mgm->members_count) >> 30;
163 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
164 /* don't add already existing qpn */
165 if (pqp->qpn == qpn)
166 continue;
167 if (members_count == MLX4_QP_PER_MGM) {
168 /* out of space */
169 err = -ENOMEM;
170 goto out_mailbox;
171 }
172
173 /* add the qpn */
174 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
175 }
176 /* update the qps count and update the entry with all the promisc qps*/
177 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
178 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
179
180out_mailbox:
181 mlx4_free_cmd_mailbox(dev, mailbox);
182 if (!err)
183 return 0;
184out_alloc:
185 if (dqp) {
186 list_del(&dqp->list);
187 kfree(&dqp);
188 }
189 list_del(&new_entry->list);
190 kfree(new_entry);
191 return err;
192}
193
194/* update the data structures with existing steering entry */
195static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
196 enum mlx4_steer_type steer,
197 unsigned int index, u32 qpn)
198{
199 struct mlx4_steer *s_steer;
200 struct mlx4_steer_index *tmp_entry, *entry = NULL;
201 struct mlx4_promisc_qp *pqp;
202 struct mlx4_promisc_qp *dqp;
203 u8 pf_num;
204
205 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
206 s_steer = &mlx4_priv(dev)->steer[pf_num];
207
208 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
209 if (!pqp)
210 return 0; /* nothing to do */
211
212 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
213 if (tmp_entry->index == index) {
214 entry = tmp_entry;
215 break;
216 }
217 }
218 if (unlikely(!entry)) {
219 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
220 return -EINVAL;
221 }
222
223 /* the given qpn is listed as a promisc qpn
224 * we need to add it as a duplicate to this entry
225 * for future refernce */
226 list_for_each_entry(dqp, &entry->duplicates, list) {
227 if (qpn == dqp->qpn)
228 return 0; /* qp is already duplicated */
229 }
230
231 /* add the qp as a duplicate on this index */
232 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
233 if (!dqp)
234 return -ENOMEM;
235 dqp->qpn = qpn;
236 list_add_tail(&dqp->list, &entry->duplicates);
237
238 return 0;
239}
240
241/* Check whether a qpn is a duplicate on steering entry
242 * If so, it should not be removed from mgm */
243static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
244 enum mlx4_steer_type steer,
245 unsigned int index, u32 qpn)
246{
247 struct mlx4_steer *s_steer;
248 struct mlx4_steer_index *tmp_entry, *entry = NULL;
249 struct mlx4_promisc_qp *dqp, *tmp_dqp;
250 u8 pf_num;
251
252 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
253 s_steer = &mlx4_priv(dev)->steer[pf_num];
254
255 /* if qp is not promisc, it cannot be duplicated */
256 if (!get_promisc_qp(dev, pf_num, steer, qpn))
257 return false;
258
259 /* The qp is promisc qp so it is a duplicate on this index
260 * Find the index entry, and remove the duplicate */
261 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
262 if (tmp_entry->index == index) {
263 entry = tmp_entry;
264 break;
265 }
266 }
267 if (unlikely(!entry)) {
268 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
269 return false;
270 }
271 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
272 if (dqp->qpn == qpn) {
273 list_del(&dqp->list);
274 kfree(dqp);
275 }
276 }
277 return true;
278}
279
280/* I a steering entry contains only promisc QPs, it can be removed. */
281static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
282 enum mlx4_steer_type steer,
283 unsigned int index, u32 tqpn)
284{
285 struct mlx4_steer *s_steer;
286 struct mlx4_cmd_mailbox *mailbox;
287 struct mlx4_mgm *mgm;
288 struct mlx4_steer_index *entry = NULL, *tmp_entry;
289 u32 qpn;
290 u32 members_count;
291 bool ret = false;
292 int i;
293 u8 pf_num;
294
295 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
296 s_steer = &mlx4_priv(dev)->steer[pf_num];
297
298 mailbox = mlx4_alloc_cmd_mailbox(dev);
299 if (IS_ERR(mailbox))
300 return false;
301 mgm = mailbox->buf;
302
303 if (mlx4_READ_ENTRY(dev, index, mailbox))
304 goto out;
305 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
306 for (i = 0; i < members_count; i++) {
307 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
308 if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
309 /* the qp is not promisc, the entry can't be removed */
310 goto out;
311 }
312 }
313 /* All the qps currently registered for this entry are promiscuous,
314 * Checking for duplicates */
315 ret = true;
316 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
317 if (entry->index == index) {
318 if (list_empty(&entry->duplicates)) {
319 list_del(&entry->list);
320 kfree(entry);
321 } else {
322 /* This entry contains duplicates so it shouldn't be removed */
323 ret = false;
324 goto out;
325 }
326 }
327 }
328
329out:
330 mlx4_free_cmd_mailbox(dev, mailbox);
331 return ret;
332}
333
334static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
335 enum mlx4_steer_type steer, u32 qpn)
336{
337 struct mlx4_steer *s_steer;
338 struct mlx4_cmd_mailbox *mailbox;
339 struct mlx4_mgm *mgm;
340 struct mlx4_steer_index *entry;
341 struct mlx4_promisc_qp *pqp;
342 struct mlx4_promisc_qp *dqp;
343 u32 members_count;
344 u32 prot;
345 int i;
346 bool found;
347 int last_index;
348 int err;
349 u8 pf_num;
350 struct mlx4_priv *priv = mlx4_priv(dev);
351 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
352 s_steer = &mlx4_priv(dev)->steer[pf_num];
353
354 mutex_lock(&priv->mcg_table.mutex);
355
356 if (get_promisc_qp(dev, pf_num, steer, qpn)) {
357 err = 0; /* Noting to do, already exists */
358 goto out_mutex;
359 }
360
361 pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
362 if (!pqp) {
363 err = -ENOMEM;
364 goto out_mutex;
365 }
366 pqp->qpn = qpn;
367
368 mailbox = mlx4_alloc_cmd_mailbox(dev);
369 if (IS_ERR(mailbox)) {
370 err = -ENOMEM;
371 goto out_alloc;
372 }
373 mgm = mailbox->buf;
374
375 /* the promisc qp needs to be added for each one of the steering
376 * entries, if it already exists, needs to be added as a duplicate
377 * for this entry */
378 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
379 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
380 if (err)
381 goto out_mailbox;
382
383 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
384 prot = be32_to_cpu(mgm->members_count) >> 30;
385 found = false;
386 for (i = 0; i < members_count; i++) {
387 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
388 /* Entry already exists, add to duplicates */
389 dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
390 if (!dqp)
391 goto out_mailbox;
392 dqp->qpn = qpn;
393 list_add_tail(&dqp->list, &entry->duplicates);
394 found = true;
395 }
396 }
397 if (!found) {
398 /* Need to add the qpn to mgm */
399 if (members_count == MLX4_QP_PER_MGM) {
400 /* entry is full */
401 err = -ENOMEM;
402 goto out_mailbox;
403 }
404 mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
405 mgm->members_count = cpu_to_be32(members_count | (prot << 30));
406 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
407 if (err)
408 goto out_mailbox;
409 }
410 last_index = entry->index;
411 }
412
413 /* add the new qpn to list of promisc qps */
414 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
415 /* now need to add all the promisc qps to default entry */
416 memset(mgm, 0, sizeof *mgm);
417 members_count = 0;
418 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
419 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
420 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
421
422 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
423 if (err)
424 goto out_list;
425
426 mlx4_free_cmd_mailbox(dev, mailbox);
427 mutex_unlock(&priv->mcg_table.mutex);
428 return 0;
429
430out_list:
431 list_del(&pqp->list);
432out_mailbox:
433 mlx4_free_cmd_mailbox(dev, mailbox);
434out_alloc:
435 kfree(pqp);
436out_mutex:
437 mutex_unlock(&priv->mcg_table.mutex);
438 return err;
439}
440
441static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
442 enum mlx4_steer_type steer, u32 qpn)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 struct mlx4_steer *s_steer;
446 struct mlx4_cmd_mailbox *mailbox;
447 struct mlx4_mgm *mgm;
448 struct mlx4_steer_index *entry;
449 struct mlx4_promisc_qp *pqp;
450 struct mlx4_promisc_qp *dqp;
451 u32 members_count;
452 bool found;
453 bool back_to_list = false;
454 int loc, i;
455 int err;
456 u8 pf_num;
457
458 pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
459 s_steer = &mlx4_priv(dev)->steer[pf_num];
460 mutex_lock(&priv->mcg_table.mutex);
461
462 pqp = get_promisc_qp(dev, pf_num, steer, qpn);
463 if (unlikely(!pqp)) {
464 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
465 /* nothing to do */
466 err = 0;
467 goto out_mutex;
468 }
469
470 /*remove from list of promisc qps */
471 list_del(&pqp->list);
472 kfree(pqp);
473
474 /* set the default entry not to include the removed one */
475 mailbox = mlx4_alloc_cmd_mailbox(dev);
476 if (IS_ERR(mailbox)) {
477 err = -ENOMEM;
478 back_to_list = true;
479 goto out_list;
480 }
481 mgm = mailbox->buf;
482 members_count = 0;
483 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
484 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
485 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
486
487 err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
488 if (err)
489 goto out_mailbox;
490
491 /* remove the qp from all the steering entries*/
492 list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
493 found = false;
494 list_for_each_entry(dqp, &entry->duplicates, list) {
495 if (dqp->qpn == qpn) {
496 found = true;
497 break;
498 }
499 }
500 if (found) {
501 /* a duplicate, no need to change the mgm,
502 * only update the duplicates list */
503 list_del(&dqp->list);
504 kfree(dqp);
505 } else {
506 err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
507 if (err)
508 goto out_mailbox;
509 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
510 for (loc = -1, i = 0; i < members_count; ++i)
511 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
512 loc = i;
513
514 mgm->members_count = cpu_to_be32(--members_count |
515 (MLX4_PROT_ETH << 30));
516 mgm->qp[loc] = mgm->qp[i - 1];
517 mgm->qp[i - 1] = 0;
518
519 err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
520 if (err)
521 goto out_mailbox;
522 }
523
524 }
525
526out_mailbox:
527 mlx4_free_cmd_mailbox(dev, mailbox);
528out_list:
529 if (back_to_list)
530 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
531out_mutex:
532 mutex_unlock(&priv->mcg_table.mutex);
533 return err;
534}
535
82/* 536/*
83 * Caller must hold MCG table semaphore. gid and mgm parameters must 537 * Caller must hold MCG table semaphore. gid and mgm parameters must
84 * be properly aligned for command interface. 538 * be properly aligned for command interface.
@@ -94,15 +548,17 @@ static int mlx4_MGID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
94 * If no AMGM exists for given gid, *index = -1, *prev = index of last 548 * If no AMGM exists for given gid, *index = -1, *prev = index of last
95 * entry in hash chain and *mgm holds end of hash chain. 549 * entry in hash chain and *mgm holds end of hash chain.
96 */ 550 */
97static int find_mgm(struct mlx4_dev *dev, 551static int find_entry(struct mlx4_dev *dev, u8 port,
98 u8 *gid, enum mlx4_protocol protocol, 552 u8 *gid, enum mlx4_protocol prot,
99 struct mlx4_cmd_mailbox *mgm_mailbox, 553 enum mlx4_steer_type steer,
100 u16 *hash, int *prev, int *index) 554 struct mlx4_cmd_mailbox *mgm_mailbox,
555 u16 *hash, int *prev, int *index)
101{ 556{
102 struct mlx4_cmd_mailbox *mailbox; 557 struct mlx4_cmd_mailbox *mailbox;
103 struct mlx4_mgm *mgm = mgm_mailbox->buf; 558 struct mlx4_mgm *mgm = mgm_mailbox->buf;
104 u8 *mgid; 559 u8 *mgid;
105 int err; 560 int err;
561 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
106 562
107 mailbox = mlx4_alloc_cmd_mailbox(dev); 563 mailbox = mlx4_alloc_cmd_mailbox(dev);
108 if (IS_ERR(mailbox)) 564 if (IS_ERR(mailbox))
@@ -111,7 +567,7 @@ static int find_mgm(struct mlx4_dev *dev,
111 567
112 memcpy(mgid, gid, 16); 568 memcpy(mgid, gid, 16);
113 569
114 err = mlx4_MGID_HASH(dev, mailbox, hash); 570 err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
115 mlx4_free_cmd_mailbox(dev, mailbox); 571 mlx4_free_cmd_mailbox(dev, mailbox);
116 if (err) 572 if (err)
117 return err; 573 return err;
@@ -123,11 +579,11 @@ static int find_mgm(struct mlx4_dev *dev,
123 *prev = -1; 579 *prev = -1;
124 580
125 do { 581 do {
126 err = mlx4_READ_MCG(dev, *index, mgm_mailbox); 582 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
127 if (err) 583 if (err)
128 return err; 584 return err;
129 585
130 if (!memcmp(mgm->gid, zero_gid, 16)) { 586 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
131 if (*index != *hash) { 587 if (*index != *hash) {
132 mlx4_err(dev, "Found zero MGID in AMGM.\n"); 588 mlx4_err(dev, "Found zero MGID in AMGM.\n");
133 err = -EINVAL; 589 err = -EINVAL;
@@ -136,7 +592,7 @@ static int find_mgm(struct mlx4_dev *dev,
136 } 592 }
137 593
138 if (!memcmp(mgm->gid, gid, 16) && 594 if (!memcmp(mgm->gid, gid, 16) &&
139 be32_to_cpu(mgm->members_count) >> 30 == protocol) 595 be32_to_cpu(mgm->members_count) >> 30 == prot)
140 return err; 596 return err;
141 597
142 *prev = *index; 598 *prev = *index;
@@ -147,8 +603,9 @@ static int find_mgm(struct mlx4_dev *dev,
147 return err; 603 return err;
148} 604}
149 605
150int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 606int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
151 int block_mcast_loopback, enum mlx4_protocol protocol) 607 int block_mcast_loopback, enum mlx4_protocol prot,
608 enum mlx4_steer_type steer)
152{ 609{
153 struct mlx4_priv *priv = mlx4_priv(dev); 610 struct mlx4_priv *priv = mlx4_priv(dev);
154 struct mlx4_cmd_mailbox *mailbox; 611 struct mlx4_cmd_mailbox *mailbox;
@@ -159,6 +616,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
159 int link = 0; 616 int link = 0;
160 int i; 617 int i;
161 int err; 618 int err;
619 u8 port = gid[5];
620 u8 new_entry = 0;
162 621
163 mailbox = mlx4_alloc_cmd_mailbox(dev); 622 mailbox = mlx4_alloc_cmd_mailbox(dev);
164 if (IS_ERR(mailbox)) 623 if (IS_ERR(mailbox))
@@ -166,14 +625,16 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
166 mgm = mailbox->buf; 625 mgm = mailbox->buf;
167 626
168 mutex_lock(&priv->mcg_table.mutex); 627 mutex_lock(&priv->mcg_table.mutex);
169 628 err = find_entry(dev, port, gid, prot, steer,
170 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 629 mailbox, &hash, &prev, &index);
171 if (err) 630 if (err)
172 goto out; 631 goto out;
173 632
174 if (index != -1) { 633 if (index != -1) {
175 if (!memcmp(mgm->gid, zero_gid, 16)) 634 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
635 new_entry = 1;
176 memcpy(mgm->gid, gid, 16); 636 memcpy(mgm->gid, gid, 16);
637 }
177 } else { 638 } else {
178 link = 1; 639 link = 1;
179 640
@@ -209,26 +670,34 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
209 else 670 else
210 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 671 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
211 672
212 mgm->members_count = cpu_to_be32(members_count | (u32) protocol << 30); 673 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
213 674
214 err = mlx4_WRITE_MCG(dev, index, mailbox); 675 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
215 if (err) 676 if (err)
216 goto out; 677 goto out;
217 678
218 if (!link) 679 if (!link)
219 goto out; 680 goto out;
220 681
221 err = mlx4_READ_MCG(dev, prev, mailbox); 682 err = mlx4_READ_ENTRY(dev, prev, mailbox);
222 if (err) 683 if (err)
223 goto out; 684 goto out;
224 685
225 mgm->next_gid_index = cpu_to_be32(index << 6); 686 mgm->next_gid_index = cpu_to_be32(index << 6);
226 687
227 err = mlx4_WRITE_MCG(dev, prev, mailbox); 688 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
228 if (err) 689 if (err)
229 goto out; 690 goto out;
230 691
231out: 692out:
693 if (prot == MLX4_PROT_ETH) {
694 /* manage the steering entry for promisc mode */
695 if (new_entry)
696 new_steering_entry(dev, 0, port, steer, index, qp->qpn);
697 else
698 existing_steering_entry(dev, 0, port, steer,
699 index, qp->qpn);
700 }
232 if (err && link && index != -1) { 701 if (err && link && index != -1) {
233 if (index < dev->caps.num_mgms) 702 if (index < dev->caps.num_mgms)
234 mlx4_warn(dev, "Got AMGM index %d < %d", 703 mlx4_warn(dev, "Got AMGM index %d < %d",
@@ -242,10 +711,9 @@ out:
242 mlx4_free_cmd_mailbox(dev, mailbox); 711 mlx4_free_cmd_mailbox(dev, mailbox);
243 return err; 712 return err;
244} 713}
245EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
246 714
247int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 715int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
248 enum mlx4_protocol protocol) 716 enum mlx4_protocol prot, enum mlx4_steer_type steer)
249{ 717{
250 struct mlx4_priv *priv = mlx4_priv(dev); 718 struct mlx4_priv *priv = mlx4_priv(dev);
251 struct mlx4_cmd_mailbox *mailbox; 719 struct mlx4_cmd_mailbox *mailbox;
@@ -255,6 +723,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
255 int prev, index; 723 int prev, index;
256 int i, loc; 724 int i, loc;
257 int err; 725 int err;
726 u8 port = gid[5];
727 bool removed_entry = false;
258 728
259 mailbox = mlx4_alloc_cmd_mailbox(dev); 729 mailbox = mlx4_alloc_cmd_mailbox(dev);
260 if (IS_ERR(mailbox)) 730 if (IS_ERR(mailbox))
@@ -263,7 +733,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
263 733
264 mutex_lock(&priv->mcg_table.mutex); 734 mutex_lock(&priv->mcg_table.mutex);
265 735
266 err = find_mgm(dev, gid, protocol, mailbox, &hash, &prev, &index); 736 err = find_entry(dev, port, gid, prot, steer,
737 mailbox, &hash, &prev, &index);
267 if (err) 738 if (err)
268 goto out; 739 goto out;
269 740
@@ -273,6 +744,11 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
273 goto out; 744 goto out;
274 } 745 }
275 746
747 /* if this pq is also a promisc qp, it shouldn't be removed */
748 if (prot == MLX4_PROT_ETH &&
749 check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
750 goto out;
751
276 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 752 members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
277 for (loc = -1, i = 0; i < members_count; ++i) 753 for (loc = -1, i = 0; i < members_count; ++i)
278 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) 754 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
@@ -285,26 +761,31 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
285 } 761 }
286 762
287 763
288 mgm->members_count = cpu_to_be32(--members_count | (u32) protocol << 30); 764 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
289 mgm->qp[loc] = mgm->qp[i - 1]; 765 mgm->qp[loc] = mgm->qp[i - 1];
290 mgm->qp[i - 1] = 0; 766 mgm->qp[i - 1] = 0;
291 767
292 if (i != 1) { 768 if (prot == MLX4_PROT_ETH)
293 err = mlx4_WRITE_MCG(dev, index, mailbox); 769 removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
770 if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
771 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
294 goto out; 772 goto out;
295 } 773 }
296 774
775 /* We are going to delete the entry, members count should be 0 */
776 mgm->members_count = cpu_to_be32((u32) prot << 30);
777
297 if (prev == -1) { 778 if (prev == -1) {
298 /* Remove entry from MGM */ 779 /* Remove entry from MGM */
299 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 780 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
300 if (amgm_index) { 781 if (amgm_index) {
301 err = mlx4_READ_MCG(dev, amgm_index, mailbox); 782 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
302 if (err) 783 if (err)
303 goto out; 784 goto out;
304 } else 785 } else
305 memset(mgm->gid, 0, 16); 786 memset(mgm->gid, 0, 16);
306 787
307 err = mlx4_WRITE_MCG(dev, index, mailbox); 788 err = mlx4_WRITE_ENTRY(dev, index, mailbox);
308 if (err) 789 if (err)
309 goto out; 790 goto out;
310 791
@@ -319,13 +800,13 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
319 } else { 800 } else {
320 /* Remove entry from AMGM */ 801 /* Remove entry from AMGM */
321 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 802 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
322 err = mlx4_READ_MCG(dev, prev, mailbox); 803 err = mlx4_READ_ENTRY(dev, prev, mailbox);
323 if (err) 804 if (err)
324 goto out; 805 goto out;
325 806
326 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 807 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
327 808
328 err = mlx4_WRITE_MCG(dev, prev, mailbox); 809 err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
329 if (err) 810 if (err)
330 goto out; 811 goto out;
331 812
@@ -343,8 +824,85 @@ out:
343 mlx4_free_cmd_mailbox(dev, mailbox); 824 mlx4_free_cmd_mailbox(dev, mailbox);
344 return err; 825 return err;
345} 826}
827
828
829int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
830 int block_mcast_loopback, enum mlx4_protocol prot)
831{
832 enum mlx4_steer_type steer;
833
834 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
835
836 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
837 return 0;
838
839 if (prot == MLX4_PROT_ETH)
840 gid[7] |= (steer << 1);
841
842 return mlx4_qp_attach_common(dev, qp, gid,
843 block_mcast_loopback, prot,
844 steer);
845}
846EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
847
848int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
849 enum mlx4_protocol prot)
850{
851 enum mlx4_steer_type steer;
852
853 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
854
855 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
856 return 0;
857
858 if (prot == MLX4_PROT_ETH) {
859 gid[7] |= (steer << 1);
860 }
861
862 return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
863}
346EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 864EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
347 865
866
867int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
868{
869 if (!dev->caps.vep_mc_steering)
870 return 0;
871
872
873 return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
874}
875EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
876
877int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
878{
879 if (!dev->caps.vep_mc_steering)
880 return 0;
881
882
883 return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
884}
885EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
886
887int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
888{
889 if (!dev->caps.vep_mc_steering)
890 return 0;
891
892
893 return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
894}
895EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
896
897int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
898{
899 if (!dev->caps.vep_mc_steering)
900 return 0;
901
902 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
903}
904EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
905
348int mlx4_init_mcg_table(struct mlx4_dev *dev) 906int mlx4_init_mcg_table(struct mlx4_dev *dev)
349{ 907{
350 struct mlx4_priv *priv = mlx4_priv(dev); 908 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 0da5bb7285b4..c1e0e5f1bcdb 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -105,6 +105,7 @@ struct mlx4_bitmap {
105 u32 max; 105 u32 max;
106 u32 reserved_top; 106 u32 reserved_top;
107 u32 mask; 107 u32 mask;
108 u32 avail;
108 spinlock_t lock; 109 spinlock_t lock;
109 unsigned long *table; 110 unsigned long *table;
110}; 111};
@@ -162,6 +163,27 @@ struct mlx4_fw {
162 u8 catas_bar; 163 u8 catas_bar;
163}; 164};
164 165
166#define MGM_QPN_MASK 0x00FFFFFF
167#define MGM_BLCK_LB_BIT 30
168
169struct mlx4_promisc_qp {
170 struct list_head list;
171 u32 qpn;
172};
173
174struct mlx4_steer_index {
175 struct list_head list;
176 unsigned int index;
177 struct list_head duplicates;
178};
179
180struct mlx4_mgm {
181 __be32 next_gid_index;
182 __be32 members_count;
183 u32 reserved[2];
184 u8 gid[16];
185 __be32 qp[MLX4_QP_PER_MGM];
186};
165struct mlx4_cmd { 187struct mlx4_cmd {
166 struct pci_pool *pool; 188 struct pci_pool *pool;
167 void __iomem *hcr; 189 void __iomem *hcr;
@@ -265,6 +287,10 @@ struct mlx4_vlan_table {
265 int max; 287 int max;
266}; 288};
267 289
290struct mlx4_mac_entry {
291 u64 mac;
292};
293
268struct mlx4_port_info { 294struct mlx4_port_info {
269 struct mlx4_dev *dev; 295 struct mlx4_dev *dev;
270 int port; 296 int port;
@@ -272,7 +298,9 @@ struct mlx4_port_info {
272 struct device_attribute port_attr; 298 struct device_attribute port_attr;
273 enum mlx4_port_type tmp_type; 299 enum mlx4_port_type tmp_type;
274 struct mlx4_mac_table mac_table; 300 struct mlx4_mac_table mac_table;
301 struct radix_tree_root mac_tree;
275 struct mlx4_vlan_table vlan_table; 302 struct mlx4_vlan_table vlan_table;
303 int base_qpn;
276}; 304};
277 305
278struct mlx4_sense { 306struct mlx4_sense {
@@ -282,6 +310,17 @@ struct mlx4_sense {
282 struct delayed_work sense_poll; 310 struct delayed_work sense_poll;
283}; 311};
284 312
313struct mlx4_msix_ctl {
314 u64 pool_bm;
315 spinlock_t pool_lock;
316};
317
318struct mlx4_steer {
319 struct list_head promisc_qps[MLX4_NUM_STEERS];
320 struct list_head steer_entries[MLX4_NUM_STEERS];
321 struct list_head high_prios;
322};
323
285struct mlx4_priv { 324struct mlx4_priv {
286 struct mlx4_dev dev; 325 struct mlx4_dev dev;
287 326
@@ -313,6 +352,11 @@ struct mlx4_priv {
313 struct mlx4_port_info port[MLX4_MAX_PORTS + 1]; 352 struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
314 struct mlx4_sense sense; 353 struct mlx4_sense sense;
315 struct mutex port_mutex; 354 struct mutex port_mutex;
355 struct mlx4_msix_ctl msix_ctl;
356 struct mlx4_steer *steer;
357 struct list_head bf_list;
358 struct mutex bf_mutex;
359 struct io_mapping *bf_mapping;
316}; 360};
317 361
318static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) 362static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -328,6 +372,7 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
328void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj); 372void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
329u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); 373u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
330void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt); 374void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
375u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
331int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, 376int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
332 u32 reserved_bot, u32 resetrved_top); 377 u32 reserved_bot, u32 resetrved_top);
333void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); 378void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
@@ -403,4 +448,9 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
403int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); 448int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
404int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); 449int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
405 450
451int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
452 enum mlx4_protocol prot, enum mlx4_steer_type steer);
453int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
454 int block_mcast_loopback, enum mlx4_protocol prot,
455 enum mlx4_steer_type steer);
406#endif /* MLX4_H */ 456#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index dfed6a07c2d7..e30f6099c0de 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -49,8 +49,8 @@
49#include "en_port.h" 49#include "en_port.h"
50 50
51#define DRV_NAME "mlx4_en" 51#define DRV_NAME "mlx4_en"
52#define DRV_VERSION "1.5.1.6" 52#define DRV_VERSION "1.5.4.1"
53#define DRV_RELDATE "August 2010" 53#define DRV_RELDATE "March 2011"
54 54
55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 55#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
56 56
@@ -62,6 +62,7 @@
62#define MLX4_EN_PAGE_SHIFT 12 62#define MLX4_EN_PAGE_SHIFT 12
63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT) 63#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
64#define MAX_RX_RINGS 16 64#define MAX_RX_RINGS 16
65#define MIN_RX_RINGS 4
65#define TXBB_SIZE 64 66#define TXBB_SIZE 64
66#define HEADROOM (2048 / TXBB_SIZE + 1) 67#define HEADROOM (2048 / TXBB_SIZE + 1)
67#define STAMP_STRIDE 64 68#define STAMP_STRIDE 64
@@ -124,6 +125,7 @@ enum {
124#define MLX4_EN_RX_SIZE_THRESH 1024 125#define MLX4_EN_RX_SIZE_THRESH 1024
125#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH) 126#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
126#define MLX4_EN_SAMPLE_INTERVAL 0 127#define MLX4_EN_SAMPLE_INTERVAL 0
128#define MLX4_EN_AVG_PKT_SMALL 256
127 129
128#define MLX4_EN_AUTO_CONF 0xffff 130#define MLX4_EN_AUTO_CONF 0xffff
129 131
@@ -214,6 +216,9 @@ struct mlx4_en_tx_desc {
214 216
215#define MLX4_EN_USE_SRQ 0x01000000 217#define MLX4_EN_USE_SRQ 0x01000000
216 218
219#define MLX4_EN_CX3_LOW_ID 0x1000
220#define MLX4_EN_CX3_HIGH_ID 0x1005
221
217struct mlx4_en_rx_alloc { 222struct mlx4_en_rx_alloc {
218 struct page *page; 223 struct page *page;
219 u16 offset; 224 u16 offset;
@@ -243,6 +248,8 @@ struct mlx4_en_tx_ring {
243 unsigned long bytes; 248 unsigned long bytes;
244 unsigned long packets; 249 unsigned long packets;
245 spinlock_t comp_lock; 250 spinlock_t comp_lock;
251 struct mlx4_bf bf;
252 bool bf_enabled;
246}; 253};
247 254
248struct mlx4_en_rx_desc { 255struct mlx4_en_rx_desc {
@@ -453,6 +460,7 @@ struct mlx4_en_priv {
453 struct mlx4_en_rss_map rss_map; 460 struct mlx4_en_rss_map rss_map;
454 u32 flags; 461 u32 flags;
455#define MLX4_EN_FLAG_PROMISC 0x1 462#define MLX4_EN_FLAG_PROMISC 0x1
463#define MLX4_EN_FLAG_MC_PROMISC 0x2
456 u32 tx_ring_num; 464 u32 tx_ring_num;
457 u32 rx_ring_num; 465 u32 rx_ring_num;
458 u32 rx_skb_size; 466 u32 rx_skb_size;
@@ -461,6 +469,7 @@ struct mlx4_en_priv {
461 u16 log_rx_info; 469 u16 log_rx_info;
462 470
463 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS]; 471 struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
472 int tx_vector;
464 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; 473 struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
465 struct mlx4_en_cq tx_cq[MAX_TX_RINGS]; 474 struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
466 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 475 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
@@ -476,6 +485,13 @@ struct mlx4_en_priv {
476 int mc_addrs_cnt; 485 int mc_addrs_cnt;
477 struct mlx4_en_stat_out_mbox hw_stats; 486 struct mlx4_en_stat_out_mbox hw_stats;
478 int vids[128]; 487 int vids[128];
488 bool wol;
489};
490
491enum mlx4_en_wol {
492 MLX4_EN_WOL_MAGIC = (1ULL << 61),
493 MLX4_EN_WOL_ENABLED = (1ULL << 62),
494 MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
479}; 495};
480 496
481 497
@@ -486,12 +502,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
486int mlx4_en_start_port(struct net_device *dev); 502int mlx4_en_start_port(struct net_device *dev);
487void mlx4_en_stop_port(struct net_device *dev); 503void mlx4_en_stop_port(struct net_device *dev);
488 504
489void mlx4_en_free_resources(struct mlx4_en_priv *priv); 505void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors);
490int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); 506int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
491 507
492int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, 508int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
493 int entries, int ring, enum cq_type mode); 509 int entries, int ring, enum cq_type mode);
494void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 510void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
511 bool reserve_vectors);
495int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 512int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
496void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 513void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
497int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 514int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
@@ -503,7 +520,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
503netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 520netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
504 521
505int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, 522int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
506 u32 size, u16 stride); 523 int qpn, u32 size, u16 stride);
507void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 524void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
508int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 525int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
509 struct mlx4_en_tx_ring *ring, 526 struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index c4988d6bd5b2..1286b886dcea 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -32,12 +32,17 @@
32 */ 32 */
33 33
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/io-mapping.h>
35 36
36#include <asm/page.h> 37#include <asm/page.h>
37 38
38#include "mlx4.h" 39#include "mlx4.h"
39#include "icm.h" 40#include "icm.h"
40 41
42enum {
43 MLX4_NUM_RESERVED_UARS = 8
44};
45
41int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn) 46int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
42{ 47{
43 struct mlx4_priv *priv = mlx4_priv(dev); 48 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -77,6 +82,7 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
77 return -ENOMEM; 82 return -ENOMEM;
78 83
79 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index; 84 uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
85 uar->map = NULL;
80 86
81 return 0; 87 return 0;
82} 88}
@@ -88,6 +94,102 @@ void mlx4_uar_free(struct mlx4_dev *dev, struct mlx4_uar *uar)
88} 94}
89EXPORT_SYMBOL_GPL(mlx4_uar_free); 95EXPORT_SYMBOL_GPL(mlx4_uar_free);
90 96
97int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf)
98{
99 struct mlx4_priv *priv = mlx4_priv(dev);
100 struct mlx4_uar *uar;
101 int err = 0;
102 int idx;
103
104 if (!priv->bf_mapping)
105 return -ENOMEM;
106
107 mutex_lock(&priv->bf_mutex);
108 if (!list_empty(&priv->bf_list))
109 uar = list_entry(priv->bf_list.next, struct mlx4_uar, bf_list);
110 else {
111 if (mlx4_bitmap_avail(&priv->uar_table.bitmap) < MLX4_NUM_RESERVED_UARS) {
112 err = -ENOMEM;
113 goto out;
114 }
115 uar = kmalloc(sizeof *uar, GFP_KERNEL);
116 if (!uar) {
117 err = -ENOMEM;
118 goto out;
119 }
120 err = mlx4_uar_alloc(dev, uar);
121 if (err)
122 goto free_kmalloc;
123
124 uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE);
125 if (!uar->map) {
126 err = -ENOMEM;
127 goto free_uar;
128 }
129
130 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
131 if (!uar->bf_map) {
132 err = -ENOMEM;
133 goto unamp_uar;
134 }
135 uar->free_bf_bmap = 0;
136 list_add(&uar->bf_list, &priv->bf_list);
137 }
138
139 bf->uar = uar;
140 idx = ffz(uar->free_bf_bmap);
141 uar->free_bf_bmap |= 1 << idx;
142 bf->uar = uar;
143 bf->offset = 0;
144 bf->buf_size = dev->caps.bf_reg_size / 2;
145 bf->reg = uar->bf_map + idx * dev->caps.bf_reg_size;
146 if (uar->free_bf_bmap == (1 << dev->caps.bf_regs_per_page) - 1)
147 list_del_init(&uar->bf_list);
148
149 goto out;
150
151unamp_uar:
152 bf->uar = NULL;
153 iounmap(uar->map);
154
155free_uar:
156 mlx4_uar_free(dev, uar);
157
158free_kmalloc:
159 kfree(uar);
160
161out:
162 mutex_unlock(&priv->bf_mutex);
163 return err;
164}
165EXPORT_SYMBOL_GPL(mlx4_bf_alloc);
166
167void mlx4_bf_free(struct mlx4_dev *dev, struct mlx4_bf *bf)
168{
169 struct mlx4_priv *priv = mlx4_priv(dev);
170 int idx;
171
172 if (!bf->uar || !bf->uar->bf_map)
173 return;
174
175 mutex_lock(&priv->bf_mutex);
176 idx = (bf->reg - bf->uar->bf_map) / dev->caps.bf_reg_size;
177 bf->uar->free_bf_bmap &= ~(1 << idx);
178 if (!bf->uar->free_bf_bmap) {
179 if (!list_empty(&bf->uar->bf_list))
180 list_del(&bf->uar->bf_list);
181
182 io_mapping_unmap(bf->uar->bf_map);
183 iounmap(bf->uar->map);
184 mlx4_uar_free(dev, bf->uar);
185 kfree(bf->uar);
186 } else if (list_empty(&bf->uar->bf_list))
187 list_add(&bf->uar->bf_list, &priv->bf_list);
188
189 mutex_unlock(&priv->bf_mutex);
190}
191EXPORT_SYMBOL_GPL(mlx4_bf_free);
192
91int mlx4_init_uar_table(struct mlx4_dev *dev) 193int mlx4_init_uar_table(struct mlx4_dev *dev)
92{ 194{
93 if (dev->caps.num_uars <= 128) { 195 if (dev->caps.num_uars <= 128) {
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 451339559bdc..eca7d8596f87 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -90,12 +90,79 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
90 return err; 90 return err;
91} 91}
92 92
93int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index) 93static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
94 u64 mac, int *qpn, u8 reserve)
94{ 95{
95 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 96 struct mlx4_qp qp;
97 u8 gid[16] = {0};
98 int err;
99
100 if (reserve) {
101 err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
102 if (err) {
103 mlx4_err(dev, "Failed to reserve qp for mac registration\n");
104 return err;
105 }
106 }
107 qp.qpn = *qpn;
108
109 mac &= 0xffffffffffffULL;
110 mac = cpu_to_be64(mac << 16);
111 memcpy(&gid[10], &mac, ETH_ALEN);
112 gid[5] = port;
113 gid[7] = MLX4_UC_STEER << 1;
114
115 err = mlx4_qp_attach_common(dev, &qp, gid, 0,
116 MLX4_PROT_ETH, MLX4_UC_STEER);
117 if (err && reserve)
118 mlx4_qp_release_range(dev, *qpn, 1);
119
120 return err;
121}
122
123static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
124 u64 mac, int qpn, u8 free)
125{
126 struct mlx4_qp qp;
127 u8 gid[16] = {0};
128
129 qp.qpn = qpn;
130 mac &= 0xffffffffffffULL;
131 mac = cpu_to_be64(mac << 16);
132 memcpy(&gid[10], &mac, ETH_ALEN);
133 gid[5] = port;
134 gid[7] = MLX4_UC_STEER << 1;
135
136 mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
137 if (free)
138 mlx4_qp_release_range(dev, qpn, 1);
139}
140
141int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
142{
143 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
144 struct mlx4_mac_table *table = &info->mac_table;
145 struct mlx4_mac_entry *entry;
96 int i, err = 0; 146 int i, err = 0;
97 int free = -1; 147 int free = -1;
98 148
149 if (dev->caps.vep_uc_steering) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) {
152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
153 if (!entry) {
154 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
155 return -ENOMEM;
156 }
157 entry->mac = mac;
158 err = radix_tree_insert(&info->mac_tree, *qpn, entry);
159 if (err) {
160 mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
161 return err;
162 }
163 } else
164 return err;
165 }
99 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac); 166 mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
100 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
101 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
@@ -106,7 +173,6 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
106 173
107 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 174 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
108 /* MAC already registered, increase refernce count */ 175 /* MAC already registered, increase refernce count */
109 *index = i;
110 ++table->refs[i]; 176 ++table->refs[i];
111 goto out; 177 goto out;
112 } 178 }
@@ -137,7 +203,8 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
137 goto out; 203 goto out;
138 } 204 }
139 205
140 *index = free; 206 if (!dev->caps.vep_uc_steering)
207 *qpn = info->base_qpn + free;
141 ++table->total; 208 ++table->total;
142out: 209out:
143 mutex_unlock(&table->mutex); 210 mutex_unlock(&table->mutex);
@@ -145,20 +212,52 @@ out:
145} 212}
146EXPORT_SYMBOL_GPL(mlx4_register_mac); 213EXPORT_SYMBOL_GPL(mlx4_register_mac);
147 214
148void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index) 215static int validate_index(struct mlx4_dev *dev,
216 struct mlx4_mac_table *table, int index)
149{ 217{
150 struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table; 218 int err = 0;
151 219
152 mutex_lock(&table->mutex); 220 if (index < 0 || index >= table->max || !table->entries[index]) {
153 if (!table->refs[index]) { 221 mlx4_warn(dev, "No valid Mac entry for the given index\n");
154 mlx4_warn(dev, "No MAC entry for index %d\n", index); 222 err = -EINVAL;
155 goto out;
156 } 223 }
157 if (--table->refs[index]) { 224 return err;
158 mlx4_warn(dev, "Have more references for index %d," 225}
159 "no need to modify MAC table\n", index); 226
160 goto out; 227static int find_index(struct mlx4_dev *dev,
228 struct mlx4_mac_table *table, u64 mac)
229{
230 int i;
231 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
232 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
233 return i;
161 } 234 }
235 /* Mac not found */
236 return -EINVAL;
237}
238
239void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
240{
241 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
242 struct mlx4_mac_table *table = &info->mac_table;
243 int index = qpn - info->base_qpn;
244 struct mlx4_mac_entry *entry;
245
246 if (dev->caps.vep_uc_steering) {
247 entry = radix_tree_lookup(&info->mac_tree, qpn);
248 if (entry) {
249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
250 radix_tree_delete(&info->mac_tree, qpn);
251 index = find_index(dev, table, entry->mac);
252 kfree(entry);
253 }
254 }
255
256 mutex_lock(&table->mutex);
257
258 if (validate_index(dev, table, index))
259 goto out;
260
162 table->entries[index] = 0; 261 table->entries[index] = 0;
163 mlx4_set_port_mac_table(dev, port, table->entries); 262 mlx4_set_port_mac_table(dev, port, table->entries);
164 --table->total; 263 --table->total;
@@ -167,6 +266,44 @@ out:
167} 266}
168EXPORT_SYMBOL_GPL(mlx4_unregister_mac); 267EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
169 268
269int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
270{
271 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
272 struct mlx4_mac_table *table = &info->mac_table;
273 int index = qpn - info->base_qpn;
274 struct mlx4_mac_entry *entry;
275 int err;
276
277 if (dev->caps.vep_uc_steering) {
278 entry = radix_tree_lookup(&info->mac_tree, qpn);
279 if (!entry)
280 return -EINVAL;
281 index = find_index(dev, table, entry->mac);
282 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
283 entry->mac = new_mac;
284 err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
285 if (err || index < 0)
286 return err;
287 }
288
289 mutex_lock(&table->mutex);
290
291 err = validate_index(dev, table, index);
292 if (err)
293 goto out;
294
295 table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
296
297 err = mlx4_set_port_mac_table(dev, port, table->entries);
298 if (unlikely(err)) {
299 mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
300 table->entries[index] = 0;
301 }
302out:
303 mutex_unlock(&table->mutex);
304 return err;
305}
306EXPORT_SYMBOL_GPL(mlx4_replace_mac);
170static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, 307static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
171 __be32 *entries) 308 __be32 *entries)
172{ 309{
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index e749f82865fe..b967647d0c76 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -107,9 +107,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
107 profile[MLX4_RES_AUXC].num = request->num_qp; 107 profile[MLX4_RES_AUXC].num = request->num_qp;
108 profile[MLX4_RES_SRQ].num = request->num_srq; 108 profile[MLX4_RES_SRQ].num = request->num_srq;
109 profile[MLX4_RES_CQ].num = request->num_cq; 109 profile[MLX4_RES_CQ].num = request->num_cq;
110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 110 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
111 dev_cap->reserved_eqs +
112 num_possible_cpus() + 1);
113 profile[MLX4_RES_DMPT].num = request->num_mpt; 111 profile[MLX4_RES_DMPT].num = request->num_mpt;
114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 112 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
115 profile[MLX4_RES_MTT].num = request->num_mtt; 113 profile[MLX4_RES_MTT].num = request->num_mtt;