aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c161
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h17
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c31
-rw-r--r--drivers/net/bonding/bond_main.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c176
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--include/linux/mlx4/cmd.h7
-rw-r--r--include/linux/mlx4/device.h11
-rw-r--r--include/linux/mlx4/driver.h19
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/netdevice.h15
-rw-r--r--include/net/bonding.h17
-rw-r--r--net/core/dev.c20
-rw-r--r--net/core/rtnetlink.c1
24 files changed, 756 insertions, 64 deletions
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 2d8c3397774f..f50a546224ad 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -36,6 +36,7 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/string.h> 38#include <linux/string.h>
39#include <linux/mlx4/driver.h>
39 40
40#include "mlx4_ib.h" 41#include "mlx4_ib.h"
41 42
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 9db258f7c804..2ed5b996b2f4 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
351 enum ib_mtu tmp; 351 enum ib_mtu tmp;
352 struct mlx4_cmd_mailbox *mailbox; 352 struct mlx4_cmd_mailbox *mailbox;
353 int err = 0; 353 int err = 0;
354 int is_bonded = mlx4_is_bonded(mdev->dev);
354 355
355 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); 356 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
356 if (IS_ERR(mailbox)) 357 if (IS_ERR(mailbox))
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
374 props->state = IB_PORT_DOWN; 375 props->state = IB_PORT_DOWN;
375 props->phys_state = state_to_phys_state(props->state); 376 props->phys_state = state_to_phys_state(props->state);
376 props->active_mtu = IB_MTU_256; 377 props->active_mtu = IB_MTU_256;
378 if (is_bonded)
379 rtnl_lock(); /* required to get upper dev */
377 spin_lock_bh(&iboe->lock); 380 spin_lock_bh(&iboe->lock);
378 ndev = iboe->netdevs[port - 1]; 381 ndev = iboe->netdevs[port - 1];
382 if (ndev && is_bonded)
383 ndev = netdev_master_upper_dev_get(ndev);
379 if (!ndev) 384 if (!ndev)
380 goto out_unlock; 385 goto out_unlock;
381 386
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
387 props->phys_state = state_to_phys_state(props->state); 392 props->phys_state = state_to_phys_state(props->state);
388out_unlock: 393out_unlock:
389 spin_unlock_bh(&iboe->lock); 394 spin_unlock_bh(&iboe->lock);
395 if (is_bonded)
396 rtnl_unlock();
390out: 397out:
391 mlx4_free_cmd_mailbox(mdev->dev, mailbox); 398 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
392 return err; 399 return err;
@@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
844 851
845struct mlx4_ib_steering { 852struct mlx4_ib_steering {
846 struct list_head list; 853 struct list_head list;
847 u64 reg_id; 854 struct mlx4_flow_reg_id reg_id;
848 union ib_gid gid; 855 union ib_gid gid;
849}; 856};
850 857
@@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1135 struct ib_flow_attr *flow_attr, 1142 struct ib_flow_attr *flow_attr,
1136 int domain) 1143 int domain)
1137{ 1144{
1138 int err = 0, i = 0; 1145 int err = 0, i = 0, j = 0;
1139 struct mlx4_ib_flow *mflow; 1146 struct mlx4_ib_flow *mflow;
1140 enum mlx4_net_trans_promisc_mode type[2]; 1147 enum mlx4_net_trans_promisc_mode type[2];
1148 struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
1149 int is_bonded = mlx4_is_bonded(dev);
1141 1150
1142 memset(type, 0, sizeof(type)); 1151 memset(type, 0, sizeof(type));
1143 1152
@@ -1172,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1172 1181
1173 while (i < ARRAY_SIZE(type) && type[i]) { 1182 while (i < ARRAY_SIZE(type) && type[i]) {
1174 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1183 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1175 &mflow->reg_id[i]); 1184 &mflow->reg_id[i].id);
1176 if (err) 1185 if (err)
1177 goto err_create_flow; 1186 goto err_create_flow;
1178 i++; 1187 i++;
1188 if (is_bonded) {
1189 flow_attr->port = 2;
1190 err = __mlx4_ib_create_flow(qp, flow_attr,
1191 domain, type[j],
1192 &mflow->reg_id[j].mirror);
1193 flow_attr->port = 1;
1194 if (err)
1195 goto err_create_flow;
1196 j++;
1197 }
1198
1179 } 1199 }
1180 1200
1181 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1201 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1182 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); 1202 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1203 &mflow->reg_id[i].id);
1183 if (err) 1204 if (err)
1184 goto err_create_flow; 1205 goto err_create_flow;
1185 i++; 1206 i++;
1207 if (is_bonded) {
1208 flow_attr->port = 2;
1209 err = mlx4_ib_tunnel_steer_add(qp, flow_attr,
1210 &mflow->reg_id[j].mirror);
1211 flow_attr->port = 1;
1212 if (err)
1213 goto err_create_flow;
1214 j++;
1215 }
1216 /* function to create mirror rule */
1186 } 1217 }
1187 1218
1188 return &mflow->ibflow; 1219 return &mflow->ibflow;
1189 1220
1190err_create_flow: 1221err_create_flow:
1191 while (i) { 1222 while (i) {
1192 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); 1223 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1224 mflow->reg_id[i].id);
1193 i--; 1225 i--;
1194 } 1226 }
1227
1228 while (j) {
1229 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev,
1230 mflow->reg_id[j].mirror);
1231 j--;
1232 }
1195err_free: 1233err_free:
1196 kfree(mflow); 1234 kfree(mflow);
1197 return ERR_PTR(err); 1235 return ERR_PTR(err);
@@ -1204,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1204 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); 1242 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1205 struct mlx4_ib_flow *mflow = to_mflow(flow_id); 1243 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1206 1244
1207 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { 1245 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) {
1208 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); 1246 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id);
1209 if (err) 1247 if (err)
1210 ret = err; 1248 ret = err;
1249 if (mflow->reg_id[i].mirror) {
1250 err = __mlx4_ib_destroy_flow(mdev->dev,
1251 mflow->reg_id[i].mirror);
1252 if (err)
1253 ret = err;
1254 }
1211 i++; 1255 i++;
1212 } 1256 }
1213 1257
@@ -1219,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1219{ 1263{
1220 int err; 1264 int err;
1221 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1265 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1266 struct mlx4_dev *dev = mdev->dev;
1222 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1267 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1223 u64 reg_id;
1224 struct mlx4_ib_steering *ib_steering = NULL; 1268 struct mlx4_ib_steering *ib_steering = NULL;
1225 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1269 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1226 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1270 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1271 struct mlx4_flow_reg_id reg_id;
1227 1272
1228 if (mdev->dev->caps.steering_mode == 1273 if (mdev->dev->caps.steering_mode ==
1229 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1274 MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1235,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1235 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, 1280 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1236 !!(mqp->flags & 1281 !!(mqp->flags &
1237 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1282 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1238 prot, &reg_id); 1283 prot, &reg_id.id);
1239 if (err) 1284 if (err)
1240 goto err_malloc; 1285 goto err_malloc;
1241 1286
1287 reg_id.mirror = 0;
1288 if (mlx4_is_bonded(dev)) {
1289 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2,
1290 !!(mqp->flags &
1291 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1292 prot, &reg_id.mirror);
1293 if (err)
1294 goto err_add;
1295 }
1296
1242 err = add_gid_entry(ibqp, gid); 1297 err = add_gid_entry(ibqp, gid);
1243 if (err) 1298 if (err)
1244 goto err_add; 1299 goto err_add;
@@ -1254,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1254 1309
1255err_add: 1310err_add:
1256 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1311 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1257 prot, reg_id); 1312 prot, reg_id.id);
1313 if (reg_id.mirror)
1314 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1315 prot, reg_id.mirror);
1258err_malloc: 1316err_malloc:
1259 kfree(ib_steering); 1317 kfree(ib_steering);
1260 1318
@@ -1281,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1281{ 1339{
1282 int err; 1340 int err;
1283 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); 1341 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1342 struct mlx4_dev *dev = mdev->dev;
1284 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1343 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1285 struct net_device *ndev; 1344 struct net_device *ndev;
1286 struct mlx4_ib_gid_entry *ge; 1345 struct mlx4_ib_gid_entry *ge;
1287 u64 reg_id = 0; 1346 struct mlx4_flow_reg_id reg_id = {0, 0};
1347
1288 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1348 enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
1289 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1349 MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
1290 1350
@@ -1309,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1309 } 1369 }
1310 1370
1311 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, 1371 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1312 prot, reg_id); 1372 prot, reg_id.id);
1313 if (err) 1373 if (err)
1314 return err; 1374 return err;
1315 1375
1376 if (mlx4_is_bonded(dev)) {
1377 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1378 prot, reg_id.mirror);
1379 if (err)
1380 return err;
1381 }
1382
1316 mutex_lock(&mqp->mutex); 1383 mutex_lock(&mqp->mutex);
1317 ge = find_gid_entry(mqp, gid->raw); 1384 ge = find_gid_entry(mqp, gid->raw);
1318 if (ge) { 1385 if (ge) {
@@ -1440,6 +1507,7 @@ static void update_gids_task(struct work_struct *work)
1440 union ib_gid *gids; 1507 union ib_gid *gids;
1441 int err; 1508 int err;
1442 struct mlx4_dev *dev = gw->dev->dev; 1509 struct mlx4_dev *dev = gw->dev->dev;
1510 int is_bonded = mlx4_is_bonded(dev);
1443 1511
1444 if (!gw->dev->ib_active) 1512 if (!gw->dev->ib_active)
1445 return; 1513 return;
@@ -1459,7 +1527,10 @@ static void update_gids_task(struct work_struct *work)
1459 if (err) 1527 if (err)
1460 pr_warn("set port command failed\n"); 1528 pr_warn("set port command failed\n");
1461 else 1529 else
1462 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); 1530 if ((gw->port == 1) || !is_bonded)
1531 mlx4_ib_dispatch_event(gw->dev,
1532 is_bonded ? 1 : gw->port,
1533 IB_EVENT_GID_CHANGE);
1463 1534
1464 mlx4_free_cmd_mailbox(dev, mailbox); 1535 mlx4_free_cmd_mailbox(dev, mailbox);
1465 kfree(gw); 1536 kfree(gw);
@@ -1875,7 +1946,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1875 * don't want the bond IP based gids in the table since 1946 * don't want the bond IP based gids in the table since
1876 * flows that select port by gid may get the down port. 1947 * flows that select port by gid may get the down port.
1877 */ 1948 */
1878 if (port_state == IB_PORT_DOWN) { 1949 if (port_state == IB_PORT_DOWN &&
1950 !mlx4_is_bonded(ibdev->dev)) {
1879 reset_gid_table(ibdev, port); 1951 reset_gid_table(ibdev, port);
1880 mlx4_ib_set_default_gid(ibdev, 1952 mlx4_ib_set_default_gid(ibdev,
1881 curr_netdev, 1953 curr_netdev,
@@ -2047,6 +2119,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2047 int err; 2119 int err;
2048 struct mlx4_ib_iboe *iboe; 2120 struct mlx4_ib_iboe *iboe;
2049 int ib_num_ports = 0; 2121 int ib_num_ports = 0;
2122 int num_req_counters;
2050 2123
2051 pr_info_once("%s", mlx4_ib_version); 2124 pr_info_once("%s", mlx4_ib_version);
2052 2125
@@ -2080,13 +2153,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2080 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 2153 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2081 2154
2082 ibdev->dev = dev; 2155 ibdev->dev = dev;
2156 ibdev->bond_next_port = 0;
2083 2157
2084 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); 2158 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2085 ibdev->ib_dev.owner = THIS_MODULE; 2159 ibdev->ib_dev.owner = THIS_MODULE;
2086 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; 2160 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2087 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; 2161 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2088 ibdev->num_ports = num_ports; 2162 ibdev->num_ports = num_ports;
2089 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; 2163 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2164 1 : ibdev->num_ports;
2090 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 2165 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2091 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; 2166 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
2092 2167
@@ -2207,7 +2282,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2207 if (init_node_data(ibdev)) 2282 if (init_node_data(ibdev))
2208 goto err_map; 2283 goto err_map;
2209 2284
2210 for (i = 0; i < ibdev->num_ports; ++i) { 2285 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2286 for (i = 0; i < num_req_counters; ++i) {
2211 mutex_init(&ibdev->qp1_proxy_lock[i]); 2287 mutex_init(&ibdev->qp1_proxy_lock[i]);
2212 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == 2288 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2213 IB_LINK_LAYER_ETHERNET) { 2289 IB_LINK_LAYER_ETHERNET) {
@@ -2218,6 +2294,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2218 ibdev->counters[i] = -1; 2294 ibdev->counters[i] = -1;
2219 } 2295 }
2220 } 2296 }
2297 if (mlx4_is_bonded(dev))
2298 for (i = 1; i < ibdev->num_ports ; ++i)
2299 ibdev->counters[i] = ibdev->counters[0];
2300
2221 2301
2222 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 2302 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2223 ib_num_ports++; 2303 ib_num_ports++;
@@ -2538,6 +2618,38 @@ out:
2538 return; 2618 return;
2539} 2619}
2540 2620
2621static void handle_bonded_port_state_event(struct work_struct *work)
2622{
2623 struct ib_event_work *ew =
2624 container_of(work, struct ib_event_work, work);
2625 struct mlx4_ib_dev *ibdev = ew->ib_dev;
2626 enum ib_port_state bonded_port_state = IB_PORT_NOP;
2627 int i;
2628 struct ib_event ibev;
2629
2630 kfree(ew);
2631 spin_lock_bh(&ibdev->iboe.lock);
2632 for (i = 0; i < MLX4_MAX_PORTS; ++i) {
2633 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
2634
2635 enum ib_port_state curr_port_state =
2636 (netif_running(curr_netdev) &&
2637 netif_carrier_ok(curr_netdev)) ?
2638 IB_PORT_ACTIVE : IB_PORT_DOWN;
2639
2640 bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
2641 curr_port_state : IB_PORT_ACTIVE;
2642 }
2643 spin_unlock_bh(&ibdev->iboe.lock);
2644
2645 ibev.device = &ibdev->ib_dev;
2646 ibev.element.port_num = 1;
2647 ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ?
2648 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2649
2650 ib_dispatch_event(&ibev);
2651}
2652
2541static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 2653static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2542 enum mlx4_dev_event event, unsigned long param) 2654 enum mlx4_dev_event event, unsigned long param)
2543{ 2655{
@@ -2547,6 +2659,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2547 struct ib_event_work *ew; 2659 struct ib_event_work *ew;
2548 int p = 0; 2660 int p = 0;
2549 2661
2662 if (mlx4_is_bonded(dev) &&
2663 ((event == MLX4_DEV_EVENT_PORT_UP) ||
2664 (event == MLX4_DEV_EVENT_PORT_DOWN))) {
2665 ew = kmalloc(sizeof(*ew), GFP_ATOMIC);
2666 if (!ew)
2667 return;
2668 INIT_WORK(&ew->work, handle_bonded_port_state_event);
2669 ew->ib_dev = ibdev;
2670 queue_work(wq, &ew->work);
2671 return;
2672 }
2673
2550 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) 2674 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2551 eqe = (struct mlx4_eqe *)param; 2675 eqe = (struct mlx4_eqe *)param;
2552 else 2676 else
@@ -2607,7 +2731,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2607 } 2731 }
2608 2732
2609 ibev.device = ibdev_ptr; 2733 ibev.device = ibdev_ptr;
2610 ibev.element.port_num = (u8) p; 2734 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;
2611 2735
2612 ib_dispatch_event(&ibev); 2736 ib_dispatch_event(&ibev);
2613} 2737}
@@ -2616,7 +2740,8 @@ static struct mlx4_interface mlx4_ib_interface = {
2616 .add = mlx4_ib_add, 2740 .add = mlx4_ib_add,
2617 .remove = mlx4_ib_remove, 2741 .remove = mlx4_ib_remove,
2618 .event = mlx4_ib_event, 2742 .event = mlx4_ib_event,
2619 .protocol = MLX4_PROT_IB_IPV6 2743 .protocol = MLX4_PROT_IB_IPV6,
2744 .flags = MLX4_INTFF_BONDING
2620}; 2745};
2621 2746
2622static int __init mlx4_ib_init(void) 2747static int __init mlx4_ib_init(void)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6eb743f65f6f..721540c9163d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -134,10 +134,17 @@ struct mlx4_ib_fmr {
134 struct mlx4_fmr mfmr; 134 struct mlx4_fmr mfmr;
135}; 135};
136 136
137#define MAX_REGS_PER_FLOW 2
138
139struct mlx4_flow_reg_id {
140 u64 id;
141 u64 mirror;
142};
143
137struct mlx4_ib_flow { 144struct mlx4_ib_flow {
138 struct ib_flow ibflow; 145 struct ib_flow ibflow;
139 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ 146 /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
140 u64 reg_id[2]; 147 struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW];
141}; 148};
142 149
143struct mlx4_ib_wq { 150struct mlx4_ib_wq {
@@ -527,6 +534,7 @@ struct mlx4_ib_dev {
527 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; 534 struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS];
528 /* lock when destroying qp1_proxy and getting netdev events */ 535 /* lock when destroying qp1_proxy and getting netdev events */
529 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; 536 struct mutex qp1_proxy_lock[MLX4_MAX_PORTS];
537 u8 bond_next_port;
530}; 538};
531 539
532struct ib_event_work { 540struct ib_event_work {
@@ -622,6 +630,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
622 return container_of(ibah, struct mlx4_ib_ah, ibah); 630 return container_of(ibah, struct mlx4_ib_ah, ibah);
623} 631}
624 632
633static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev)
634{
635 dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports;
636
637 return dev->bond_next_port + 1;
638}
639
625int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); 640int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev);
626void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); 641void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev);
627 642
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf000b7ad64f..792f9dc86ada 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -40,6 +40,7 @@
40#include <rdma/ib_addr.h> 40#include <rdma/ib_addr.h>
41#include <rdma/ib_mad.h> 41#include <rdma/ib_mad.h>
42 42
43#include <linux/mlx4/driver.h>
43#include <linux/mlx4/qp.h> 44#include <linux/mlx4/qp.h>
44 45
45#include "mlx4_ib.h" 46#include "mlx4_ib.h"
@@ -93,17 +94,6 @@ enum {
93#ifndef ETH_ALEN 94#ifndef ETH_ALEN
94#define ETH_ALEN 6 95#define ETH_ALEN 6
95#endif 96#endif
96static inline u64 mlx4_mac_to_u64(u8 *addr)
97{
98 u64 mac = 0;
99 int i;
100
101 for (i = 0; i < ETH_ALEN; i++) {
102 mac <<= 8;
103 mac |= addr[i];
104 }
105 return mac;
106}
107 97
108static const __be32 mlx4_ib_opcode[] = { 98static const __be32 mlx4_ib_opcode[] = {
109 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), 99 [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
@@ -1915,6 +1905,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1915 goto out; 1905 goto out;
1916 } 1906 }
1917 1907
1908 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) {
1909 if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
1910 if ((ibqp->qp_type == IB_QPT_RC) ||
1911 (ibqp->qp_type == IB_QPT_UD) ||
1912 (ibqp->qp_type == IB_QPT_UC) ||
1913 (ibqp->qp_type == IB_QPT_RAW_PACKET) ||
1914 (ibqp->qp_type == IB_QPT_XRC_INI)) {
1915 attr->port_num = mlx4_ib_bond_next_port(dev);
1916 }
1917 } else {
1918 /* no sense in changing port_num
1919 * when ports are bonded */
1920 attr_mask &= ~IB_QP_PORT;
1921 }
1922 }
1923
1918 if ((attr_mask & IB_QP_PORT) && 1924 if ((attr_mask & IB_QP_PORT) &&
1919 (attr->port_num == 0 || attr->port_num > dev->num_ports)) { 1925 (attr->port_num == 0 || attr->port_num > dev->num_ports)) {
1920 pr_debug("qpn 0x%x: invalid port number (%d) specified " 1926 pr_debug("qpn 0x%x: invalid port number (%d) specified "
@@ -1965,6 +1971,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1965 1971
1966 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 1972 err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1967 1973
1974 if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT))
1975 attr->port_num = 1;
1976
1968out: 1977out:
1969 mutex_unlock(&qp->mutex); 1978 mutex_unlock(&qp->mutex);
1970 return err; 1979 return err;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c9e519cb9214..679ef00d6b16 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -790,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
790 } 790 }
791 791
792 new_active->delay = 0; 792 new_active->delay = 0;
793 new_active->link = BOND_LINK_UP; 793 bond_set_slave_link_state(new_active, BOND_LINK_UP);
794 794
795 if (BOND_MODE(bond) == BOND_MODE_8023AD) 795 if (BOND_MODE(bond) == BOND_MODE_8023AD)
796 bond_3ad_handle_link_change(new_active, BOND_LINK_UP); 796 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
@@ -1181,6 +1181,62 @@ static void bond_free_slave(struct slave *slave)
1181 kfree(slave); 1181 kfree(slave);
1182} 1182}
1183 1183
1184static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1185{
1186 info->bond_mode = BOND_MODE(bond);
1187 info->miimon = bond->params.miimon;
1188 info->num_slaves = bond->slave_cnt;
1189}
1190
1191static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1192{
1193 strcpy(info->slave_name, slave->dev->name);
1194 info->link = slave->link;
1195 info->state = bond_slave_state(slave);
1196 info->link_failure_count = slave->link_failure_count;
1197}
1198
1199static void bond_netdev_notify(struct slave *slave, struct net_device *dev)
1200{
1201 struct bonding *bond = slave->bond;
1202 struct netdev_bonding_info bonding_info;
1203
1204 rtnl_lock();
1205 /* make sure that slave is still valid */
1206 if (dev->priv_flags & IFF_BONDING) {
1207 bond_fill_ifslave(slave, &bonding_info.slave);
1208 bond_fill_ifbond(bond, &bonding_info.master);
1209 netdev_bonding_info_change(slave->dev, &bonding_info);
1210 }
1211 rtnl_unlock();
1212}
1213
1214static void bond_netdev_notify_work(struct work_struct *_work)
1215{
1216 struct netdev_notify_work *w =
1217 container_of(_work, struct netdev_notify_work, work.work);
1218
1219 bond_netdev_notify(w->slave, w->dev);
1220 dev_put(w->dev);
1221}
1222
1223void bond_queue_slave_event(struct slave *slave)
1224{
1225 struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
1226
1227 if (!nnw)
1228 return;
1229
1230 INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
1231 nnw->slave = slave;
1232 nnw->dev = slave->dev;
1233
1234 if (queue_delayed_work(slave->bond->wq, &nnw->work, 0))
1235 dev_hold(slave->dev);
1236 else
1237 kfree(nnw);
1238}
1239
1184/* enslave device <slave> to bond device <master> */ 1240/* enslave device <slave> to bond device <master> */
1185int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) 1241int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1186{ 1242{
@@ -1444,19 +1500,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1444 if (bond->params.miimon) { 1500 if (bond->params.miimon) {
1445 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { 1501 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1446 if (bond->params.updelay) { 1502 if (bond->params.updelay) {
1447 new_slave->link = BOND_LINK_BACK; 1503 bond_set_slave_link_state(new_slave,
1504 BOND_LINK_BACK);
1448 new_slave->delay = bond->params.updelay; 1505 new_slave->delay = bond->params.updelay;
1449 } else { 1506 } else {
1450 new_slave->link = BOND_LINK_UP; 1507 bond_set_slave_link_state(new_slave,
1508 BOND_LINK_UP);
1451 } 1509 }
1452 } else { 1510 } else {
1453 new_slave->link = BOND_LINK_DOWN; 1511 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN);
1454 } 1512 }
1455 } else if (bond->params.arp_interval) { 1513 } else if (bond->params.arp_interval) {
1456 new_slave->link = (netif_carrier_ok(slave_dev) ? 1514 bond_set_slave_link_state(new_slave,
1457 BOND_LINK_UP : BOND_LINK_DOWN); 1515 (netif_carrier_ok(slave_dev) ?
1516 BOND_LINK_UP : BOND_LINK_DOWN));
1458 } else { 1517 } else {
1459 new_slave->link = BOND_LINK_UP; 1518 bond_set_slave_link_state(new_slave, BOND_LINK_UP);
1460 } 1519 }
1461 1520
1462 if (new_slave->link != BOND_LINK_DOWN) 1521 if (new_slave->link != BOND_LINK_DOWN)
@@ -1572,6 +1631,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1572 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); 1631 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
1573 1632
1574 /* enslave is successful */ 1633 /* enslave is successful */
1634 bond_queue_slave_event(new_slave);
1575 return 0; 1635 return 0;
1576 1636
1577/* Undo stages on error */ 1637/* Undo stages on error */
@@ -1821,11 +1881,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1821static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) 1881static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1822{ 1882{
1823 struct bonding *bond = netdev_priv(bond_dev); 1883 struct bonding *bond = netdev_priv(bond_dev);
1824 1884 bond_fill_ifbond(bond, info);
1825 info->bond_mode = BOND_MODE(bond);
1826 info->miimon = bond->params.miimon;
1827 info->num_slaves = bond->slave_cnt;
1828
1829 return 0; 1885 return 0;
1830} 1886}
1831 1887
@@ -1839,10 +1895,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
1839 bond_for_each_slave(bond, slave, iter) { 1895 bond_for_each_slave(bond, slave, iter) {
1840 if (i++ == (int)info->slave_id) { 1896 if (i++ == (int)info->slave_id) {
1841 res = 0; 1897 res = 0;
1842 strcpy(info->slave_name, slave->dev->name); 1898 bond_fill_ifslave(slave, info);
1843 info->link = slave->link;
1844 info->state = bond_slave_state(slave);
1845 info->link_failure_count = slave->link_failure_count;
1846 break; 1899 break;
1847 } 1900 }
1848 } 1901 }
@@ -1872,7 +1925,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1872 if (link_state) 1925 if (link_state)
1873 continue; 1926 continue;
1874 1927
1875 slave->link = BOND_LINK_FAIL; 1928 bond_set_slave_link_state(slave, BOND_LINK_FAIL);
1876 slave->delay = bond->params.downdelay; 1929 slave->delay = bond->params.downdelay;
1877 if (slave->delay) { 1930 if (slave->delay) {
1878 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", 1931 netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
@@ -1887,7 +1940,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1887 case BOND_LINK_FAIL: 1940 case BOND_LINK_FAIL:
1888 if (link_state) { 1941 if (link_state) {
1889 /* recovered before downdelay expired */ 1942 /* recovered before downdelay expired */
1890 slave->link = BOND_LINK_UP; 1943 bond_set_slave_link_state(slave, BOND_LINK_UP);
1891 slave->last_link_up = jiffies; 1944 slave->last_link_up = jiffies;
1892 netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", 1945 netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
1893 (bond->params.downdelay - slave->delay) * 1946 (bond->params.downdelay - slave->delay) *
@@ -1909,7 +1962,7 @@ static int bond_miimon_inspect(struct bonding *bond)
1909 if (!link_state) 1962 if (!link_state)
1910 continue; 1963 continue;
1911 1964
1912 slave->link = BOND_LINK_BACK; 1965 bond_set_slave_link_state(slave, BOND_LINK_BACK);
1913 slave->delay = bond->params.updelay; 1966 slave->delay = bond->params.updelay;
1914 1967
1915 if (slave->delay) { 1968 if (slave->delay) {
@@ -1922,7 +1975,8 @@ static int bond_miimon_inspect(struct bonding *bond)
1922 /*FALLTHRU*/ 1975 /*FALLTHRU*/
1923 case BOND_LINK_BACK: 1976 case BOND_LINK_BACK:
1924 if (!link_state) { 1977 if (!link_state) {
1925 slave->link = BOND_LINK_DOWN; 1978 bond_set_slave_link_state(slave,
1979 BOND_LINK_DOWN);
1926 netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", 1980 netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
1927 (bond->params.updelay - slave->delay) * 1981 (bond->params.updelay - slave->delay) *
1928 bond->params.miimon, 1982 bond->params.miimon,
@@ -1960,7 +2014,7 @@ static void bond_miimon_commit(struct bonding *bond)
1960 continue; 2014 continue;
1961 2015
1962 case BOND_LINK_UP: 2016 case BOND_LINK_UP:
1963 slave->link = BOND_LINK_UP; 2017 bond_set_slave_link_state(slave, BOND_LINK_UP);
1964 slave->last_link_up = jiffies; 2018 slave->last_link_up = jiffies;
1965 2019
1966 primary = rtnl_dereference(bond->primary_slave); 2020 primary = rtnl_dereference(bond->primary_slave);
@@ -2000,7 +2054,7 @@ static void bond_miimon_commit(struct bonding *bond)
2000 if (slave->link_failure_count < UINT_MAX) 2054 if (slave->link_failure_count < UINT_MAX)
2001 slave->link_failure_count++; 2055 slave->link_failure_count++;
2002 2056
2003 slave->link = BOND_LINK_DOWN; 2057 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
2004 2058
2005 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || 2059 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2006 BOND_MODE(bond) == BOND_MODE_8023AD) 2060 BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2583,7 +2637,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2583 struct slave *current_arp_slave; 2637 struct slave *current_arp_slave;
2584 2638
2585 current_arp_slave = rtnl_dereference(bond->current_arp_slave); 2639 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
2586 slave->link = BOND_LINK_UP; 2640 bond_set_slave_link_state(slave, BOND_LINK_UP);
2587 if (current_arp_slave) { 2641 if (current_arp_slave) {
2588 bond_set_slave_inactive_flags( 2642 bond_set_slave_inactive_flags(
2589 current_arp_slave, 2643 current_arp_slave,
@@ -2606,7 +2660,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2606 if (slave->link_failure_count < UINT_MAX) 2660 if (slave->link_failure_count < UINT_MAX)
2607 slave->link_failure_count++; 2661 slave->link_failure_count++;
2608 2662
2609 slave->link = BOND_LINK_DOWN; 2663 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
2610 bond_set_slave_inactive_flags(slave, 2664 bond_set_slave_inactive_flags(slave,
2611 BOND_SLAVE_NOTIFY_NOW); 2665 BOND_SLAVE_NOTIFY_NOW);
2612 2666
@@ -2685,7 +2739,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2685 * up when it is actually down 2739 * up when it is actually down
2686 */ 2740 */
2687 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { 2741 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
2688 slave->link = BOND_LINK_DOWN; 2742 bond_set_slave_link_state(slave, BOND_LINK_DOWN);
2689 if (slave->link_failure_count < UINT_MAX) 2743 if (slave->link_failure_count < UINT_MAX)
2690 slave->link_failure_count++; 2744 slave->link_failure_count++;
2691 2745
@@ -2705,7 +2759,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
2705 if (!new_slave) 2759 if (!new_slave)
2706 goto check_state; 2760 goto check_state;
2707 2761
2708 new_slave->link = BOND_LINK_BACK; 2762 bond_set_slave_link_state(new_slave, BOND_LINK_BACK);
2709 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); 2763 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
2710 bond_arp_send_all(bond, new_slave); 2764 bond_arp_send_all(bond, new_slave);
2711 new_slave->last_link_up = jiffies; 2765 new_slave->last_link_up = jiffies;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 154effbfd8be..a681d7c0bb9f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1583,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = {
1583 .verify = NULL, 1583 .verify = NULL,
1584 .wrapper = mlx4_CMD_EPERM_wrapper 1584 .wrapper = mlx4_CMD_EPERM_wrapper
1585 }, 1585 },
1586 {
1587 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1588 .has_inbox = false,
1589 .has_outbox = false,
1590 .out_is_imm = false,
1591 .encode_slave_id = false,
1592 .verify = NULL,
1593 .wrapper = mlx4_CMD_EPERM_wrapper
1594 },
1586}; 1595};
1587 1596
1588static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, 1597static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index c643d2bbb7b9..58d5a07d0ff4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
214 iounmap(mdev->uar_map); 214 iounmap(mdev->uar_map);
215 mlx4_uar_free(dev, &mdev->priv_uar); 215 mlx4_uar_free(dev, &mdev->priv_uar);
216 mlx4_pd_free(dev, mdev->priv_pdn); 216 mlx4_pd_free(dev, mdev->priv_pdn);
217 if (mdev->nb.notifier_call)
218 unregister_netdevice_notifier(&mdev->nb);
217 kfree(mdev); 219 kfree(mdev);
218} 220}
219 221
@@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
298 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) 300 if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i]))
299 mdev->pndev[i] = NULL; 301 mdev->pndev[i] = NULL;
300 } 302 }
303 /* register notifier */
304 mdev->nb.notifier_call = mlx4_en_netdev_event;
305 if (register_netdevice_notifier(&mdev->nb)) {
306 mdev->nb.notifier_call = NULL;
307 mlx4_err(mdev, "Failed to create notifier\n");
308 }
301 309
302 return mdev; 310 return mdev;
303 311
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index e075ff1f4e80..028937b2a199 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2062,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
2062 /* Detach the netdev so tasks would not attempt to access it */ 2062 /* Detach the netdev so tasks would not attempt to access it */
2063 mutex_lock(&mdev->state_lock); 2063 mutex_lock(&mdev->state_lock);
2064 mdev->pndev[priv->port] = NULL; 2064 mdev->pndev[priv->port] = NULL;
2065 mdev->upper[priv->port] = NULL;
2065 mutex_unlock(&mdev->state_lock); 2066 mutex_unlock(&mdev->state_lock);
2066 2067
2067 mlx4_en_free_resources(priv); 2068 mlx4_en_free_resources(priv);
@@ -2441,6 +2442,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2441#endif 2442#endif
2442}; 2443};
2443 2444
2445struct mlx4_en_bond {
2446 struct work_struct work;
2447 struct mlx4_en_priv *priv;
2448 int is_bonded;
2449 struct mlx4_port_map port_map;
2450};
2451
2452static void mlx4_en_bond_work(struct work_struct *work)
2453{
2454 struct mlx4_en_bond *bond = container_of(work,
2455 struct mlx4_en_bond,
2456 work);
2457 int err = 0;
2458 struct mlx4_dev *dev = bond->priv->mdev->dev;
2459
2460 if (bond->is_bonded) {
2461 if (!mlx4_is_bonded(dev)) {
2462 err = mlx4_bond(dev);
2463 if (err)
2464 en_err(bond->priv, "Fail to bond device\n");
2465 }
2466 if (!err) {
2467 err = mlx4_port_map_set(dev, &bond->port_map);
2468 if (err)
2469 en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n",
2470 bond->port_map.port1,
2471 bond->port_map.port2,
2472 err);
2473 }
2474 } else if (mlx4_is_bonded(dev)) {
2475 err = mlx4_unbond(dev);
2476 if (err)
2477 en_err(bond->priv, "Fail to unbond device\n");
2478 }
2479 dev_put(bond->priv->dev);
2480 kfree(bond);
2481}
2482
2483static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded,
2484 u8 v2p_p1, u8 v2p_p2)
2485{
2486 struct mlx4_en_bond *bond = NULL;
2487
2488 bond = kzalloc(sizeof(*bond), GFP_ATOMIC);
2489 if (!bond)
2490 return -ENOMEM;
2491
2492 INIT_WORK(&bond->work, mlx4_en_bond_work);
2493 bond->priv = priv;
2494 bond->is_bonded = is_bonded;
2495 bond->port_map.port1 = v2p_p1;
2496 bond->port_map.port2 = v2p_p2;
2497 dev_hold(priv->dev);
2498 queue_work(priv->mdev->workqueue, &bond->work);
2499 return 0;
2500}
2501
2502int mlx4_en_netdev_event(struct notifier_block *this,
2503 unsigned long event, void *ptr)
2504{
2505 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
2506 u8 port = 0;
2507 struct mlx4_en_dev *mdev;
2508 struct mlx4_dev *dev;
2509 int i, num_eth_ports = 0;
2510 bool do_bond = true;
2511 struct mlx4_en_priv *priv;
2512 u8 v2p_port1 = 0;
2513 u8 v2p_port2 = 0;
2514
2515 if (!net_eq(dev_net(ndev), &init_net))
2516 return NOTIFY_DONE;
2517
2518 mdev = container_of(this, struct mlx4_en_dev, nb);
2519 dev = mdev->dev;
2520
2521 /* Go into this mode only when two network devices set on two ports
2522 * of the same mlx4 device are slaves of the same bonding master
2523 */
2524 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
2525 ++num_eth_ports;
2526 if (!port && (mdev->pndev[i] == ndev))
2527 port = i;
2528 mdev->upper[i] = mdev->pndev[i] ?
2529 netdev_master_upper_dev_get(mdev->pndev[i]) : NULL;
2530 /* condition not met: network device is a slave */
2531 if (!mdev->upper[i])
2532 do_bond = false;
2533 if (num_eth_ports < 2)
2534 continue;
2535 /* condition not met: same master */
2536 if (mdev->upper[i] != mdev->upper[i-1])
2537 do_bond = false;
2538 }
2539 /* condition not met: 2 salves */
2540 do_bond = (num_eth_ports == 2) ? do_bond : false;
2541
2542 /* handle only events that come with enough info */
2543 if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port)
2544 return NOTIFY_DONE;
2545
2546 priv = netdev_priv(ndev);
2547 if (do_bond) {
2548 struct netdev_notifier_bonding_info *notifier_info = ptr;
2549 struct netdev_bonding_info *bonding_info =
2550 &notifier_info->bonding_info;
2551
2552 /* required mode 1, 2 or 4 */
2553 if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) &&
2554 (bonding_info->master.bond_mode != BOND_MODE_XOR) &&
2555 (bonding_info->master.bond_mode != BOND_MODE_8023AD))
2556 do_bond = false;
2557
2558 /* require exactly 2 slaves */
2559 if (bonding_info->master.num_slaves != 2)
2560 do_bond = false;
2561
2562 /* calc v2p */
2563 if (do_bond) {
2564 if (bonding_info->master.bond_mode ==
2565 BOND_MODE_ACTIVEBACKUP) {
2566 /* in active-backup mode virtual ports are
2567 * mapped to the physical port of the active
2568 * slave */
2569 if (bonding_info->slave.state ==
2570 BOND_STATE_BACKUP) {
2571 if (port == 1) {
2572 v2p_port1 = 2;
2573 v2p_port2 = 2;
2574 } else {
2575 v2p_port1 = 1;
2576 v2p_port2 = 1;
2577 }
2578 } else { /* BOND_STATE_ACTIVE */
2579 if (port == 1) {
2580 v2p_port1 = 1;
2581 v2p_port2 = 1;
2582 } else {
2583 v2p_port1 = 2;
2584 v2p_port2 = 2;
2585 }
2586 }
2587 } else { /* Active-Active */
2588 /* in active-active mode a virtual port is
2589 * mapped to the native physical port if and only
2590 * if the physical port is up */
2591 __s8 link = bonding_info->slave.link;
2592
2593 if (port == 1)
2594 v2p_port2 = 2;
2595 else
2596 v2p_port1 = 1;
2597 if ((link == BOND_LINK_UP) ||
2598 (link == BOND_LINK_FAIL)) {
2599 if (port == 1)
2600 v2p_port1 = 1;
2601 else
2602 v2p_port2 = 2;
2603 } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */
2604 if (port == 1)
2605 v2p_port1 = 2;
2606 else
2607 v2p_port2 = 1;
2608 }
2609 }
2610 }
2611 }
2612
2613 mlx4_en_queue_bond_work(priv, do_bond,
2614 v2p_port1, v2p_port2);
2615
2616 return NOTIFY_DONE;
2617}
2618
2444int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2619int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2445 struct mlx4_en_port_profile *prof) 2620 struct mlx4_en_port_profile *prof)
2446{ 2621{
@@ -2623,6 +2798,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2623 } 2798 }
2624 2799
2625 mdev->pndev[port] = dev; 2800 mdev->pndev[port] = dev;
2801 mdev->upper[port] = NULL;
2626 2802
2627 netif_carrier_off(dev); 2803 netif_carrier_off(dev);
2628 mlx4_en_set_default_moderation(priv); 2804 mlx4_en_set_default_moderation(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index f1a5500ff72d..34f2fdf4fe5d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
50 context->mtu_msgmax = 0xff; 50 context->mtu_msgmax = 0xff;
51 if (!is_tx && !rss) 51 if (!is_tx && !rss)
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
53 if (is_tx) 53 if (is_tx) {
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); 54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
55 else 55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
56 context->params2 |= MLX4_QP_BIT_FPP;
57
58 } else {
56 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 }
57 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mdev->priv_uar.index);
58 context->local_qpn = cpu_to_be32(qpn); 62 context->local_qpn = cpu_to_be32(qpn);
59 context->pri_path.ackto = 1 & 0x07; 63 context->pri_path.ackto = 1 & 0x07;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dbabfae3a3de..4b08a393ebcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -142,7 +142,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
142 [17] = "Asymmetric EQs support", 142 [17] = "Asymmetric EQs support",
143 [18] = "More than 80 VFs support", 143 [18] = "More than 80 VFs support",
144 [19] = "Performance optimized for limited rule configuration flow steering support", 144 [19] = "Performance optimized for limited rule configuration flow steering support",
145 [20] = "Recoverable error events support" 145 [20] = "Recoverable error events support",
146 [21] = "Port Remap support"
146 }; 147 };
147 int i; 148 int i;
148 149
@@ -863,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
863 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 864 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
864 MLX4_GET(dev_cap->bmme_flags, outbox, 865 MLX4_GET(dev_cap->bmme_flags, outbox,
865 QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 866 QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
867 if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP)
868 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP;
866 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); 869 MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET);
867 if (field & 0x20) 870 if (field & 0x20)
868 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; 871 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
@@ -1120,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1120 field &= 0x7f; 1123 field &= 0x7f;
1121 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); 1124 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
1122 1125
1123 /* For guests, disable mw type 2 */ 1126 /* For guests, disable mw type 2 and port remap*/
1124 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1127 MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1125 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 1128 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
1129 bmme_flags &= ~MLX4_FLAG_PORT_REMAP;
1126 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 1130 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
1127 1131
1128 /* turn off device-managed steering capability if not enabled */ 1132 /* turn off device-managed steering capability if not enabled */
@@ -2100,13 +2104,16 @@ struct mlx4_config_dev {
2100 __be32 rsvd1[3]; 2104 __be32 rsvd1[3];
2101 __be16 vxlan_udp_dport; 2105 __be16 vxlan_udp_dport;
2102 __be16 rsvd2; 2106 __be16 rsvd2;
2103 __be32 rsvd3[27]; 2107 __be32 rsvd3;
2104 __be16 rsvd4; 2108 __be32 roce_flags;
2105 u8 rsvd5; 2109 __be32 rsvd4[25];
2110 __be16 rsvd5;
2111 u8 rsvd6;
2106 u8 rx_checksum_val; 2112 u8 rx_checksum_val;
2107}; 2113};
2108 2114
2109#define MLX4_VXLAN_UDP_DPORT (1 << 0) 2115#define MLX4_VXLAN_UDP_DPORT (1 << 0)
2116#define MLX4_DISABLE_RX_PORT BIT(18)
2110 2117
2111static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) 2118static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev)
2112{ 2119{
@@ -2209,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port)
2209} 2216}
2210EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); 2217EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port);
2211 2218
2219#define CONFIG_DISABLE_RX_PORT BIT(15)
2220int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis)
2221{
2222 struct mlx4_config_dev config_dev;
2223
2224 memset(&config_dev, 0, sizeof(config_dev));
2225 config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT);
2226 if (dis)
2227 config_dev.roce_flags =
2228 cpu_to_be32(CONFIG_DISABLE_RX_PORT);
2229
2230 return mlx4_CONFIG_DEV_set(dev, &config_dev);
2231}
2232
2233int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2)
2234{
2235 struct mlx4_cmd_mailbox *mailbox;
2236 struct {
2237 __be32 v_port1;
2238 __be32 v_port2;
2239 } *v2p;
2240 int err;
2241
2242 mailbox = mlx4_alloc_cmd_mailbox(dev);
2243 if (IS_ERR(mailbox))
2244 return -ENOMEM;
2245
2246 v2p = mailbox->buf;
2247 v2p->v_port1 = cpu_to_be32(port1);
2248 v2p->v_port2 = cpu_to_be32(port2);
2249
2250 err = mlx4_cmd(dev, mailbox->dma, 0,
2251 MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP,
2252 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
2253
2254 mlx4_free_cmd_mailbox(dev, mailbox);
2255 return err;
2256}
2257
2212 2258
2213int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) 2259int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
2214{ 2260{
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 68d2bad325d5..6fce58718837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -33,11 +33,13 @@
33 33
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/export.h> 35#include <linux/export.h>
36#include <linux/errno.h>
36 37
37#include "mlx4.h" 38#include "mlx4.h"
38 39
39struct mlx4_device_context { 40struct mlx4_device_context {
40 struct list_head list; 41 struct list_head list;
42 struct list_head bond_list;
41 struct mlx4_interface *intf; 43 struct mlx4_interface *intf;
42 void *context; 44 void *context;
43}; 45};
@@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
115} 117}
116EXPORT_SYMBOL_GPL(mlx4_unregister_interface); 118EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
117 119
120int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
121{
122 struct mlx4_priv *priv = mlx4_priv(dev);
123 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
124 unsigned long flags;
125 int ret;
126 LIST_HEAD(bond_list);
127
128 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
129 return -ENOTSUPP;
130
131 ret = mlx4_disable_rx_port_check(dev, enable);
132 if (ret) {
133 mlx4_err(dev, "Fail to %s rx port check\n",
134 enable ? "enable" : "disable");
135 return ret;
136 }
137 if (enable) {
138 dev->flags |= MLX4_FLAG_BONDED;
139 } else {
140 ret = mlx4_virt2phy_port_map(dev, 1, 2);
141 if (ret) {
142 mlx4_err(dev, "Fail to reset port map\n");
143 return ret;
144 }
145 dev->flags &= ~MLX4_FLAG_BONDED;
146 }
147
148 spin_lock_irqsave(&priv->ctx_lock, flags);
149 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
150 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
151 list_add_tail(&dev_ctx->bond_list, &bond_list);
152 list_del(&dev_ctx->list);
153 }
154 }
155 spin_unlock_irqrestore(&priv->ctx_lock, flags);
156
157 list_for_each_entry(dev_ctx, &bond_list, bond_list) {
158 dev_ctx->intf->remove(dev, dev_ctx->context);
159 dev_ctx->context = dev_ctx->intf->add(dev);
160
161 spin_lock_irqsave(&priv->ctx_lock, flags);
162 list_add_tail(&dev_ctx->list, &priv->ctx_list);
163 spin_unlock_irqrestore(&priv->ctx_lock, flags);
164
165 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
166 dev_ctx->intf->protocol, enable ?
167 "enabled" : "disabled");
168 }
169 return 0;
170}
171
118void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, 172void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
119 unsigned long param) 173 unsigned long param)
120{ 174{
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index cc9f48439244..f3245fe0f442 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1160,6 +1160,91 @@ err_set_port:
1160 return err ? err : count; 1160 return err ? err : count;
1161} 1161}
1162 1162
1163int mlx4_bond(struct mlx4_dev *dev)
1164{
1165 int ret = 0;
1166 struct mlx4_priv *priv = mlx4_priv(dev);
1167
1168 mutex_lock(&priv->bond_mutex);
1169
1170 if (!mlx4_is_bonded(dev))
1171 ret = mlx4_do_bond(dev, true);
1172 else
1173 ret = 0;
1174
1175 mutex_unlock(&priv->bond_mutex);
1176 if (ret)
1177 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1178 else
1179 mlx4_dbg(dev, "Device is bonded\n");
1180 return ret;
1181}
1182EXPORT_SYMBOL_GPL(mlx4_bond);
1183
1184int mlx4_unbond(struct mlx4_dev *dev)
1185{
1186 int ret = 0;
1187 struct mlx4_priv *priv = mlx4_priv(dev);
1188
1189 mutex_lock(&priv->bond_mutex);
1190
1191 if (mlx4_is_bonded(dev))
1192 ret = mlx4_do_bond(dev, false);
1193
1194 mutex_unlock(&priv->bond_mutex);
1195 if (ret)
1196 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1197 else
1198 mlx4_dbg(dev, "Device is unbonded\n");
1199 return ret;
1200}
1201EXPORT_SYMBOL_GPL(mlx4_unbond);
1202
1203
1204int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1205{
1206 u8 port1 = v2p->port1;
1207 u8 port2 = v2p->port2;
1208 struct mlx4_priv *priv = mlx4_priv(dev);
1209 int err;
1210
1211 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1212 return -ENOTSUPP;
1213
1214 mutex_lock(&priv->bond_mutex);
1215
1216 /* zero means keep current mapping for this port */
1217 if (port1 == 0)
1218 port1 = priv->v2p.port1;
1219 if (port2 == 0)
1220 port2 = priv->v2p.port2;
1221
1222 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1223 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1224 (port1 == 2 && port2 == 1)) {
1225 /* besides boundary checks cross mapping makes
1226 * no sense and therefore not allowed */
1227 err = -EINVAL;
1228 } else if ((port1 == priv->v2p.port1) &&
1229 (port2 == priv->v2p.port2)) {
1230 err = 0;
1231 } else {
1232 err = mlx4_virt2phy_port_map(dev, port1, port2);
1233 if (!err) {
1234 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1235 port1, port2);
1236 priv->v2p.port1 = port1;
1237 priv->v2p.port2 = port2;
1238 } else {
1239 mlx4_err(dev, "Failed to change port mape: %d\n", err);
1240 }
1241 }
1242
1243 mutex_unlock(&priv->bond_mutex);
1244 return err;
1245}
1246EXPORT_SYMBOL_GPL(mlx4_port_map_set);
1247
1163static int mlx4_load_fw(struct mlx4_dev *dev) 1248static int mlx4_load_fw(struct mlx4_dev *dev)
1164{ 1249{
1165 struct mlx4_priv *priv = mlx4_priv(dev); 1250 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2638,6 +2723,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2638 spin_lock_init(&priv->ctx_lock); 2723 spin_lock_init(&priv->ctx_lock);
2639 2724
2640 mutex_init(&priv->port_mutex); 2725 mutex_init(&priv->port_mutex);
2726 mutex_init(&priv->bond_mutex);
2641 2727
2642 INIT_LIST_HEAD(&priv->pgdir_list); 2728 INIT_LIST_HEAD(&priv->pgdir_list);
2643 mutex_init(&priv->pgdir_mutex); 2729 mutex_init(&priv->pgdir_mutex);
@@ -2934,6 +3020,9 @@ slave_start:
2934 goto err_port; 3020 goto err_port;
2935 } 3021 }
2936 3022
3023 priv->v2p.port1 = 1;
3024 priv->v2p.port2 = 2;
3025
2937 err = mlx4_register_device(dev); 3026 err = mlx4_register_device(dev);
2938 if (err) 3027 if (err)
2939 goto err_port; 3028 goto err_port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 148dc0945aab..803f17653da7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -885,6 +885,8 @@ struct mlx4_priv {
885 int reserved_mtts; 885 int reserved_mtts;
886 int fs_hash_mode; 886 int fs_hash_mode;
887 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 887 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
888 struct mlx4_port_map v2p; /* cached port mapping configuration */
889 struct mutex bond_mutex; /* for bond mode */
888 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 890 __be64 slave_node_guids[MLX4_MFUNC_MAX];
889 891
890 atomic_t opreq_count; 892 atomic_t opreq_count;
@@ -1364,6 +1366,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1364/* Returns the VF index of slave */ 1366/* Returns the VF index of slave */
1365int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); 1367int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1366int mlx4_config_mad_demux(struct mlx4_dev *dev); 1368int mlx4_config_mad_demux(struct mlx4_dev *dev);
1369int mlx4_do_bond(struct mlx4_dev *dev, bool enable);
1367 1370
1368enum mlx4_zone_flags { 1371enum mlx4_zone_flags {
1369 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, 1372 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 944a112dff37..2a8268e6be15 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -390,6 +390,7 @@ struct mlx4_en_dev {
390 struct pci_dev *pdev; 390 struct pci_dev *pdev;
391 struct mutex state_lock; 391 struct mutex state_lock;
392 struct net_device *pndev[MLX4_MAX_PORTS + 1]; 392 struct net_device *pndev[MLX4_MAX_PORTS + 1];
393 struct net_device *upper[MLX4_MAX_PORTS + 1];
393 u32 port_cnt; 394 u32 port_cnt;
394 bool device_up; 395 bool device_up;
395 struct mlx4_en_profile profile; 396 struct mlx4_en_profile profile;
@@ -410,6 +411,7 @@ struct mlx4_en_dev {
410 unsigned long overflow_period; 411 unsigned long overflow_period;
411 struct ptp_clock *ptp_clock; 412 struct ptp_clock *ptp_clock;
412 struct ptp_clock_info ptp_clock_info; 413 struct ptp_clock_info ptp_clock_info;
414 struct notifier_block nb;
413}; 415};
414 416
415 417
@@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev,
845 struct hwtstamp_config ts_config, 847 struct hwtstamp_config ts_config,
846 netdev_features_t new_features); 848 netdev_features_t new_features);
847 849
850int mlx4_en_netdev_event(struct notifier_block *this,
851 unsigned long event, void *ptr);
852
848/* 853/*
849 * Functions for time stamping 854 * Functions for time stamping
850 */ 855 */
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 1586ecce13c7..2bb8553bd905 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 882 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
883 context->flags &= cpu_to_be32(~(0xf << 28)); 883 context->flags &= cpu_to_be32(~(0xf << 28));
884 context->flags |= cpu_to_be32(states[i + 1] << 28); 884 context->flags |= cpu_to_be32(states[i + 1] << 28);
885 if (states[i + 1] != MLX4_QP_STATE_RTR)
886 context->params2 &= ~MLX4_QP_BIT_FPP;
885 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 887 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
886 context, 0, 0, qp); 888 context, 0, 0, qp);
887 if (err) { 889 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 79feeb6b0d87..c5f3dfca226b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2944,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2944 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2944 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2945 optpar = be32_to_cpu(*(__be32 *) inbox->buf); 2945 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2946 2946
2947 if (slave != mlx4_master_func_num(dev))
2948 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2949
2947 switch (qp_type) { 2950 switch (qp_type) {
2948 case MLX4_QP_ST_RC: 2951 case MLX4_QP_ST_RC:
2949 case MLX4_QP_ST_XRC: 2952 case MLX4_QP_ST_XRC:
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index ae95adc78509..7b6d4e9ff603 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -71,6 +71,7 @@ enum {
71 71
72 /*master notify fw on finish for slave's flr*/ 72 /*master notify fw on finish for slave's flr*/
73 MLX4_CMD_INFORM_FLR_DONE = 0x5b, 73 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
74 MLX4_CMD_VIRT_PORT_MAP = 0x5c,
74 MLX4_CMD_GET_OP_REQ = 0x59, 75 MLX4_CMD_GET_OP_REQ = 0x59,
75 76
76 /* TPT commands */ 77 /* TPT commands */
@@ -171,6 +172,12 @@ enum {
171}; 172};
172 173
173enum { 174enum {
175 /* virtual to physical port mapping opcode modifiers */
176 MLX4_GET_PORT_VIRT2PHY = 0x0,
177 MLX4_SET_PORT_VIRT2PHY = 0x1,
178};
179
180enum {
174 MLX4_MAILBOX_SIZE = 4096, 181 MLX4_MAILBOX_SIZE = 4096,
175 MLX4_ACCESS_MEM_ALIGN = 256, 182 MLX4_ACCESS_MEM_ALIGN = 256,
176}; 183};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index c95d659a39f2..977b0b164431 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -70,6 +70,7 @@ enum {
70 MLX4_FLAG_SLAVE = 1 << 3, 70 MLX4_FLAG_SLAVE = 1 << 3,
71 MLX4_FLAG_SRIOV = 1 << 4, 71 MLX4_FLAG_SRIOV = 1 << 4,
72 MLX4_FLAG_OLD_REG_MAC = 1 << 6, 72 MLX4_FLAG_OLD_REG_MAC = 1 << 6,
73 MLX4_FLAG_BONDED = 1 << 7
73}; 74};
74 75
75enum { 76enum {
@@ -201,7 +202,8 @@ enum {
201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 202 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, 203 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, 204 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19,
204 MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20 205 MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20,
206 MLX4_DEV_CAP_FLAG2_PORT_REMAP = 1LL << 21
205}; 207};
206 208
207enum { 209enum {
@@ -253,9 +255,14 @@ enum {
253 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9, 255 MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
254 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10, 256 MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
255 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11, 257 MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
258 MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
256 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28, 259 MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
257}; 260};
258 261
262enum {
263 MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
264};
265
259enum mlx4_event { 266enum mlx4_event {
260 MLX4_EVENT_TYPE_COMP = 0x00, 267 MLX4_EVENT_TYPE_COMP = 0x00,
261 MLX4_EVENT_TYPE_PATH_MIG = 0x01, 268 MLX4_EVENT_TYPE_PATH_MIG = 0x01,
@@ -1378,6 +1385,8 @@ int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port);
1378int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port); 1385int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
1379 1386
1380int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port); 1387int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
1388int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
1389int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
1381int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); 1390int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
1382int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); 1391int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
1383int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, 1392int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 022055c8fb26..9553a73d2049 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -49,6 +49,10 @@ enum mlx4_dev_event {
49 MLX4_DEV_EVENT_SLAVE_SHUTDOWN, 49 MLX4_DEV_EVENT_SLAVE_SHUTDOWN,
50}; 50};
51 51
52enum {
53 MLX4_INTFF_BONDING = 1 << 0
54};
55
52struct mlx4_interface { 56struct mlx4_interface {
53 void * (*add) (struct mlx4_dev *dev); 57 void * (*add) (struct mlx4_dev *dev);
54 void (*remove)(struct mlx4_dev *dev, void *context); 58 void (*remove)(struct mlx4_dev *dev, void *context);
@@ -57,11 +61,26 @@ struct mlx4_interface {
57 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); 61 void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
58 struct list_head list; 62 struct list_head list;
59 enum mlx4_protocol protocol; 63 enum mlx4_protocol protocol;
64 int flags;
60}; 65};
61 66
62int mlx4_register_interface(struct mlx4_interface *intf); 67int mlx4_register_interface(struct mlx4_interface *intf);
63void mlx4_unregister_interface(struct mlx4_interface *intf); 68void mlx4_unregister_interface(struct mlx4_interface *intf);
64 69
70int mlx4_bond(struct mlx4_dev *dev);
71int mlx4_unbond(struct mlx4_dev *dev);
72static inline int mlx4_is_bonded(struct mlx4_dev *dev)
73{
74 return !!(dev->flags & MLX4_FLAG_BONDED);
75}
76
77struct mlx4_port_map {
78 u8 port1;
79 u8 port2;
80};
81
82int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p);
83
65void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); 84void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
66 85
67static inline u64 mlx4_mac_to_u64(u8 *addr) 86static inline u64 mlx4_mac_to_u64(u8 *addr)
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 467ccdf94c98..2bbc62aa818a 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -96,6 +96,7 @@ enum {
96 MLX4_QP_BIT_RRE = 1 << 15, 96 MLX4_QP_BIT_RRE = 1 << 15,
97 MLX4_QP_BIT_RWE = 1 << 14, 97 MLX4_QP_BIT_RWE = 1 << 14,
98 MLX4_QP_BIT_RAE = 1 << 13, 98 MLX4_QP_BIT_RAE = 1 << 13,
99 MLX4_QP_BIT_FPP = 1 << 3,
99 MLX4_QP_BIT_RIC = 1 << 4, 100 MLX4_QP_BIT_RIC = 1 << 4,
100}; 101};
101 102
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1347ac50d2af..ce784d5018e0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -51,6 +51,7 @@
51#include <linux/netdev_features.h> 51#include <linux/netdev_features.h>
52#include <linux/neighbour.h> 52#include <linux/neighbour.h>
53#include <uapi/linux/netdevice.h> 53#include <uapi/linux/netdevice.h>
54#include <uapi/linux/if_bonding.h>
54 55
55struct netpoll_info; 56struct netpoll_info;
56struct device; 57struct device;
@@ -2056,6 +2057,7 @@ struct pcpu_sw_netstats {
2056#define NETDEV_RESEND_IGMP 0x0016 2057#define NETDEV_RESEND_IGMP 0x0016
2057#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 2058#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2058#define NETDEV_CHANGEINFODATA 0x0018 2059#define NETDEV_CHANGEINFODATA 0x0018
2060#define NETDEV_BONDING_INFO 0x0019
2059 2061
2060int register_netdevice_notifier(struct notifier_block *nb); 2062int register_netdevice_notifier(struct notifier_block *nb);
2061int unregister_netdevice_notifier(struct notifier_block *nb); 2063int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -3494,6 +3496,19 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
3494struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 3496struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
3495 netdev_features_t features); 3497 netdev_features_t features);
3496 3498
3499struct netdev_bonding_info {
3500 ifslave slave;
3501 ifbond master;
3502};
3503
3504struct netdev_notifier_bonding_info {
3505 struct netdev_notifier_info info; /* must be first */
3506 struct netdev_bonding_info bonding_info;
3507};
3508
3509void netdev_bonding_info_change(struct net_device *dev,
3510 struct netdev_bonding_info *bonding_info);
3511
3497static inline 3512static inline
3498struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 3513struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
3499{ 3514{
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 29f53eacac0a..4e17095ad46a 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -150,6 +150,12 @@ struct bond_parm_tbl {
150 int mode; 150 int mode;
151}; 151};
152 152
153struct netdev_notify_work {
154 struct delayed_work work;
155 struct slave *slave;
156 struct net_device *dev;
157};
158
153struct slave { 159struct slave {
154 struct net_device *dev; /* first - useful for panic debug */ 160 struct net_device *dev; /* first - useful for panic debug */
155 struct bonding *bond; /* our master */ 161 struct bonding *bond; /* our master */
@@ -243,6 +249,8 @@ struct bonding {
243#define bond_slave_get_rtnl(dev) \ 249#define bond_slave_get_rtnl(dev) \
244 ((struct slave *) rtnl_dereference(dev->rx_handler_data)) 250 ((struct slave *) rtnl_dereference(dev->rx_handler_data))
245 251
252void bond_queue_slave_event(struct slave *slave);
253
246struct bond_vlan_tag { 254struct bond_vlan_tag {
247 __be16 vlan_proto; 255 __be16 vlan_proto;
248 unsigned short vlan_id; 256 unsigned short vlan_id;
@@ -315,6 +323,7 @@ static inline void bond_set_active_slave(struct slave *slave)
315{ 323{
316 if (slave->backup) { 324 if (slave->backup) {
317 slave->backup = 0; 325 slave->backup = 0;
326 bond_queue_slave_event(slave);
318 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); 327 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
319 } 328 }
320} 329}
@@ -323,6 +332,7 @@ static inline void bond_set_backup_slave(struct slave *slave)
323{ 332{
324 if (!slave->backup) { 333 if (!slave->backup) {
325 slave->backup = 1; 334 slave->backup = 1;
335 bond_queue_slave_event(slave);
326 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); 336 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
327 } 337 }
328} 338}
@@ -336,6 +346,7 @@ static inline void bond_set_slave_state(struct slave *slave,
336 slave->backup = slave_state; 346 slave->backup = slave_state;
337 if (notify) { 347 if (notify) {
338 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC); 348 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_ATOMIC);
349 bond_queue_slave_event(slave);
339 slave->should_notify = 0; 350 slave->should_notify = 0;
340 } else { 351 } else {
341 if (slave->should_notify) 352 if (slave->should_notify)
@@ -490,6 +501,12 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
490 return slave->inactive; 501 return slave->inactive;
491} 502}
492 503
504static inline void bond_set_slave_link_state(struct slave *slave, int state)
505{
506 slave->link = state;
507 bond_queue_slave_event(slave);
508}
509
493static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local) 510static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be32 local)
494{ 511{
495 struct in_device *in_dev; 512 struct in_device *in_dev;
diff --git a/net/core/dev.c b/net/core/dev.c
index 1d564d68e31a..ede0b161b115 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5355,6 +5355,26 @@ void netdev_upper_dev_unlink(struct net_device *dev,
5355} 5355}
5356EXPORT_SYMBOL(netdev_upper_dev_unlink); 5356EXPORT_SYMBOL(netdev_upper_dev_unlink);
5357 5357
5358/**
5359 * netdev_bonding_info_change - Dispatch event about slave change
5360 * @dev: device
5361 * @netdev_bonding_info: info to dispatch
5362 *
5363 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5364 * The caller must hold the RTNL lock.
5365 */
5366void netdev_bonding_info_change(struct net_device *dev,
5367 struct netdev_bonding_info *bonding_info)
5368{
5369 struct netdev_notifier_bonding_info info;
5370
5371 memcpy(&info.bonding_info, bonding_info,
5372 sizeof(struct netdev_bonding_info));
5373 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5374 &info.info);
5375}
5376EXPORT_SYMBOL(netdev_bonding_info_change);
5377
5358void netdev_adjacent_add_links(struct net_device *dev) 5378void netdev_adjacent_add_links(struct net_device *dev)
5359{ 5379{
5360 struct netdev_adjacent *iter; 5380 struct netdev_adjacent *iter;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 673cb4c6f391..4cd5e350d129 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3180,6 +3180,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
3180 case NETDEV_UNREGISTER_FINAL: 3180 case NETDEV_UNREGISTER_FINAL:
3181 case NETDEV_RELEASE: 3181 case NETDEV_RELEASE:
3182 case NETDEV_JOIN: 3182 case NETDEV_JOIN:
3183 case NETDEV_BONDING_INFO:
3183 break; 3184 break;
3184 default: 3185 default:
3185 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 3186 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);