diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
| -rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 161 |
1 files changed, 143 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9db258f7c804..2ed5b996b2f4 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 351 | enum ib_mtu tmp; | 351 | enum ib_mtu tmp; |
| 352 | struct mlx4_cmd_mailbox *mailbox; | 352 | struct mlx4_cmd_mailbox *mailbox; |
| 353 | int err = 0; | 353 | int err = 0; |
| 354 | int is_bonded = mlx4_is_bonded(mdev->dev); | ||
| 354 | 355 | ||
| 355 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | 356 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); |
| 356 | if (IS_ERR(mailbox)) | 357 | if (IS_ERR(mailbox)) |
| @@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 374 | props->state = IB_PORT_DOWN; | 375 | props->state = IB_PORT_DOWN; |
| 375 | props->phys_state = state_to_phys_state(props->state); | 376 | props->phys_state = state_to_phys_state(props->state); |
| 376 | props->active_mtu = IB_MTU_256; | 377 | props->active_mtu = IB_MTU_256; |
| 378 | if (is_bonded) | ||
| 379 | rtnl_lock(); /* required to get upper dev */ | ||
| 377 | spin_lock_bh(&iboe->lock); | 380 | spin_lock_bh(&iboe->lock); |
| 378 | ndev = iboe->netdevs[port - 1]; | 381 | ndev = iboe->netdevs[port - 1]; |
| 382 | if (ndev && is_bonded) | ||
| 383 | ndev = netdev_master_upper_dev_get(ndev); | ||
| 379 | if (!ndev) | 384 | if (!ndev) |
| 380 | goto out_unlock; | 385 | goto out_unlock; |
| 381 | 386 | ||
| @@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
| 387 | props->phys_state = state_to_phys_state(props->state); | 392 | props->phys_state = state_to_phys_state(props->state); |
| 388 | out_unlock: | 393 | out_unlock: |
| 389 | spin_unlock_bh(&iboe->lock); | 394 | spin_unlock_bh(&iboe->lock); |
| 395 | if (is_bonded) | ||
| 396 | rtnl_unlock(); | ||
| 390 | out: | 397 | out: |
| 391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 398 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
| 392 | return err; | 399 | return err; |
| @@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | |||
| 844 | 851 | ||
| 845 | struct mlx4_ib_steering { | 852 | struct mlx4_ib_steering { |
| 846 | struct list_head list; | 853 | struct list_head list; |
| 847 | u64 reg_id; | 854 | struct mlx4_flow_reg_id reg_id; |
| 848 | union ib_gid gid; | 855 | union ib_gid gid; |
| 849 | }; | 856 | }; |
| 850 | 857 | ||
| @@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
| 1135 | struct ib_flow_attr *flow_attr, | 1142 | struct ib_flow_attr *flow_attr, |
| 1136 | int domain) | 1143 | int domain) |
| 1137 | { | 1144 | { |
| 1138 | int err = 0, i = 0; | 1145 | int err = 0, i = 0, j = 0; |
| 1139 | struct mlx4_ib_flow *mflow; | 1146 | struct mlx4_ib_flow *mflow; |
| 1140 | enum mlx4_net_trans_promisc_mode type[2]; | 1147 | enum mlx4_net_trans_promisc_mode type[2]; |
| 1148 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; | ||
| 1149 | int is_bonded = mlx4_is_bonded(dev); | ||
| 1141 | 1150 | ||
| 1142 | memset(type, 0, sizeof(type)); | 1151 | memset(type, 0, sizeof(type)); |
| 1143 | 1152 | ||
| @@ -1172,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
| 1172 | 1181 | ||
| 1173 | while (i < ARRAY_SIZE(type) && type[i]) { | 1182 | while (i < ARRAY_SIZE(type) && type[i]) { |
| 1174 | err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], | 1183 | err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], |
| 1175 | &mflow->reg_id[i]); | 1184 | &mflow->reg_id[i].id); |
| 1176 | if (err) | 1185 | if (err) |
| 1177 | goto err_create_flow; | 1186 | goto err_create_flow; |
| 1178 | i++; | 1187 | i++; |
| 1188 | if (is_bonded) { | ||
| 1189 | flow_attr->port = 2; | ||
| 1190 | err = __mlx4_ib_create_flow(qp, flow_attr, | ||
| 1191 | domain, type[j], | ||
| 1192 | &mflow->reg_id[j].mirror); | ||
| 1193 | flow_attr->port = 1; | ||
| 1194 | if (err) | ||
| 1195 | goto err_create_flow; | ||
| 1196 | j++; | ||
| 1197 | } | ||
| 1198 | |||
| 1179 | } | 1199 | } |
| 1180 | 1200 | ||
| 1181 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { | 1201 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
| 1182 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); | 1202 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, |
| 1203 | &mflow->reg_id[i].id); | ||
| 1183 | if (err) | 1204 | if (err) |
| 1184 | goto err_create_flow; | 1205 | goto err_create_flow; |
| 1185 | i++; | 1206 | i++; |
| 1207 | if (is_bonded) { | ||
| 1208 | flow_attr->port = 2; | ||
| 1209 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, | ||
| 1210 | &mflow->reg_id[j].mirror); | ||
| 1211 | flow_attr->port = 1; | ||
| 1212 | if (err) | ||
| 1213 | goto err_create_flow; | ||
| 1214 | j++; | ||
| 1215 | } | ||
| 1216 | /* function to create mirror rule */ | ||
| 1186 | } | 1217 | } |
| 1187 | 1218 | ||
| 1188 | return &mflow->ibflow; | 1219 | return &mflow->ibflow; |
| 1189 | 1220 | ||
| 1190 | err_create_flow: | 1221 | err_create_flow: |
| 1191 | while (i) { | 1222 | while (i) { |
| 1192 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); | 1223 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, |
| 1224 | mflow->reg_id[i].id); | ||
| 1193 | i--; | 1225 | i--; |
| 1194 | } | 1226 | } |
| 1227 | |||
| 1228 | while (j) { | ||
| 1229 | (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, | ||
| 1230 | mflow->reg_id[j].mirror); | ||
| 1231 | j--; | ||
| 1232 | } | ||
| 1195 | err_free: | 1233 | err_free: |
| 1196 | kfree(mflow); | 1234 | kfree(mflow); |
| 1197 | return ERR_PTR(err); | 1235 | return ERR_PTR(err); |
| @@ -1204,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) | |||
| 1204 | struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); | 1242 | struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); |
| 1205 | struct mlx4_ib_flow *mflow = to_mflow(flow_id); | 1243 | struct mlx4_ib_flow *mflow = to_mflow(flow_id); |
| 1206 | 1244 | ||
| 1207 | while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { | 1245 | while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { |
| 1208 | err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); | 1246 | err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); |
| 1209 | if (err) | 1247 | if (err) |
| 1210 | ret = err; | 1248 | ret = err; |
| 1249 | if (mflow->reg_id[i].mirror) { | ||
| 1250 | err = __mlx4_ib_destroy_flow(mdev->dev, | ||
| 1251 | mflow->reg_id[i].mirror); | ||
| 1252 | if (err) | ||
| 1253 | ret = err; | ||
| 1254 | } | ||
| 1211 | i++; | 1255 | i++; |
| 1212 | } | 1256 | } |
| 1213 | 1257 | ||
| @@ -1219,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1219 | { | 1263 | { |
| 1220 | int err; | 1264 | int err; |
| 1221 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | 1265 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
| 1266 | struct mlx4_dev *dev = mdev->dev; | ||
| 1222 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 1267 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 1223 | u64 reg_id; | ||
| 1224 | struct mlx4_ib_steering *ib_steering = NULL; | 1268 | struct mlx4_ib_steering *ib_steering = NULL; |
| 1225 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? | 1269 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? |
| 1226 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; | 1270 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; |
| 1271 | struct mlx4_flow_reg_id reg_id; | ||
| 1227 | 1272 | ||
| 1228 | if (mdev->dev->caps.steering_mode == | 1273 | if (mdev->dev->caps.steering_mode == |
| 1229 | MLX4_STEERING_MODE_DEVICE_MANAGED) { | 1274 | MLX4_STEERING_MODE_DEVICE_MANAGED) { |
| @@ -1235,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1235 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, | 1280 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, |
| 1236 | !!(mqp->flags & | 1281 | !!(mqp->flags & |
| 1237 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | 1282 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
| 1238 | prot, ®_id); | 1283 | prot, ®_id.id); |
| 1239 | if (err) | 1284 | if (err) |
| 1240 | goto err_malloc; | 1285 | goto err_malloc; |
| 1241 | 1286 | ||
| 1287 | reg_id.mirror = 0; | ||
| 1288 | if (mlx4_is_bonded(dev)) { | ||
| 1289 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2, | ||
| 1290 | !!(mqp->flags & | ||
| 1291 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), | ||
| 1292 | prot, ®_id.mirror); | ||
| 1293 | if (err) | ||
| 1294 | goto err_add; | ||
| 1295 | } | ||
| 1296 | |||
| 1242 | err = add_gid_entry(ibqp, gid); | 1297 | err = add_gid_entry(ibqp, gid); |
| 1243 | if (err) | 1298 | if (err) |
| 1244 | goto err_add; | 1299 | goto err_add; |
| @@ -1254,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1254 | 1309 | ||
| 1255 | err_add: | 1310 | err_add: |
| 1256 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | 1311 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, |
| 1257 | prot, reg_id); | 1312 | prot, reg_id.id); |
| 1313 | if (reg_id.mirror) | ||
| 1314 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | ||
| 1315 | prot, reg_id.mirror); | ||
| 1258 | err_malloc: | 1316 | err_malloc: |
| 1259 | kfree(ib_steering); | 1317 | kfree(ib_steering); |
| 1260 | 1318 | ||
| @@ -1281,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1281 | { | 1339 | { |
| 1282 | int err; | 1340 | int err; |
| 1283 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | 1341 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
| 1342 | struct mlx4_dev *dev = mdev->dev; | ||
| 1284 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | 1343 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 1285 | struct net_device *ndev; | 1344 | struct net_device *ndev; |
| 1286 | struct mlx4_ib_gid_entry *ge; | 1345 | struct mlx4_ib_gid_entry *ge; |
| 1287 | u64 reg_id = 0; | 1346 | struct mlx4_flow_reg_id reg_id = {0, 0}; |
| 1347 | |||
| 1288 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? | 1348 | enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? |
| 1289 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; | 1349 | MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; |
| 1290 | 1350 | ||
| @@ -1309,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | |||
| 1309 | } | 1369 | } |
| 1310 | 1370 | ||
| 1311 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | 1371 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, |
| 1312 | prot, reg_id); | 1372 | prot, reg_id.id); |
| 1313 | if (err) | 1373 | if (err) |
| 1314 | return err; | 1374 | return err; |
| 1315 | 1375 | ||
| 1376 | if (mlx4_is_bonded(dev)) { | ||
| 1377 | err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, | ||
| 1378 | prot, reg_id.mirror); | ||
| 1379 | if (err) | ||
| 1380 | return err; | ||
| 1381 | } | ||
| 1382 | |||
| 1316 | mutex_lock(&mqp->mutex); | 1383 | mutex_lock(&mqp->mutex); |
| 1317 | ge = find_gid_entry(mqp, gid->raw); | 1384 | ge = find_gid_entry(mqp, gid->raw); |
| 1318 | if (ge) { | 1385 | if (ge) { |
| @@ -1440,6 +1507,7 @@ static void update_gids_task(struct work_struct *work) | |||
| 1440 | union ib_gid *gids; | 1507 | union ib_gid *gids; |
| 1441 | int err; | 1508 | int err; |
| 1442 | struct mlx4_dev *dev = gw->dev->dev; | 1509 | struct mlx4_dev *dev = gw->dev->dev; |
| 1510 | int is_bonded = mlx4_is_bonded(dev); | ||
| 1443 | 1511 | ||
| 1444 | if (!gw->dev->ib_active) | 1512 | if (!gw->dev->ib_active) |
| 1445 | return; | 1513 | return; |
| @@ -1459,7 +1527,10 @@ static void update_gids_task(struct work_struct *work) | |||
| 1459 | if (err) | 1527 | if (err) |
| 1460 | pr_warn("set port command failed\n"); | 1528 | pr_warn("set port command failed\n"); |
| 1461 | else | 1529 | else |
| 1462 | mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); | 1530 | if ((gw->port == 1) || !is_bonded) |
| 1531 | mlx4_ib_dispatch_event(gw->dev, | ||
| 1532 | is_bonded ? 1 : gw->port, | ||
| 1533 | IB_EVENT_GID_CHANGE); | ||
| 1463 | 1534 | ||
| 1464 | mlx4_free_cmd_mailbox(dev, mailbox); | 1535 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 1465 | kfree(gw); | 1536 | kfree(gw); |
| @@ -1875,7 +1946,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
| 1875 | * don't want the bond IP based gids in the table since | 1946 | * don't want the bond IP based gids in the table since |
| 1876 | * flows that select port by gid may get the down port. | 1947 | * flows that select port by gid may get the down port. |
| 1877 | */ | 1948 | */ |
| 1878 | if (port_state == IB_PORT_DOWN) { | 1949 | if (port_state == IB_PORT_DOWN && |
| 1950 | !mlx4_is_bonded(ibdev->dev)) { | ||
| 1879 | reset_gid_table(ibdev, port); | 1951 | reset_gid_table(ibdev, port); |
| 1880 | mlx4_ib_set_default_gid(ibdev, | 1952 | mlx4_ib_set_default_gid(ibdev, |
| 1881 | curr_netdev, | 1953 | curr_netdev, |
| @@ -2047,6 +2119,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2047 | int err; | 2119 | int err; |
| 2048 | struct mlx4_ib_iboe *iboe; | 2120 | struct mlx4_ib_iboe *iboe; |
| 2049 | int ib_num_ports = 0; | 2121 | int ib_num_ports = 0; |
| 2122 | int num_req_counters; | ||
| 2050 | 2123 | ||
| 2051 | pr_info_once("%s", mlx4_ib_version); | 2124 | pr_info_once("%s", mlx4_ib_version); |
| 2052 | 2125 | ||
| @@ -2080,13 +2153,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2080 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | 2153 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
| 2081 | 2154 | ||
| 2082 | ibdev->dev = dev; | 2155 | ibdev->dev = dev; |
| 2156 | ibdev->bond_next_port = 0; | ||
| 2083 | 2157 | ||
| 2084 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); | 2158 | strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); |
| 2085 | ibdev->ib_dev.owner = THIS_MODULE; | 2159 | ibdev->ib_dev.owner = THIS_MODULE; |
| 2086 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 2160 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
| 2087 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | 2161 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
| 2088 | ibdev->num_ports = num_ports; | 2162 | ibdev->num_ports = num_ports; |
| 2089 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | 2163 | ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? |
| 2164 | 1 : ibdev->num_ports; | ||
| 2090 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 2165 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
| 2091 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; | 2166 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; |
| 2092 | 2167 | ||
| @@ -2207,7 +2282,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2207 | if (init_node_data(ibdev)) | 2282 | if (init_node_data(ibdev)) |
| 2208 | goto err_map; | 2283 | goto err_map; |
| 2209 | 2284 | ||
| 2210 | for (i = 0; i < ibdev->num_ports; ++i) { | 2285 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
| 2286 | for (i = 0; i < num_req_counters; ++i) { | ||
| 2211 | mutex_init(&ibdev->qp1_proxy_lock[i]); | 2287 | mutex_init(&ibdev->qp1_proxy_lock[i]); |
| 2212 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2288 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
| 2213 | IB_LINK_LAYER_ETHERNET) { | 2289 | IB_LINK_LAYER_ETHERNET) { |
| @@ -2218,6 +2294,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 2218 | ibdev->counters[i] = -1; | 2294 | ibdev->counters[i] = -1; |
| 2219 | } | 2295 | } |
| 2220 | } | 2296 | } |
| 2297 | if (mlx4_is_bonded(dev)) | ||
| 2298 | for (i = 1; i < ibdev->num_ports ; ++i) | ||
| 2299 | ibdev->counters[i] = ibdev->counters[0]; | ||
| 2300 | |||
| 2221 | 2301 | ||
| 2222 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 2302 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 2223 | ib_num_ports++; | 2303 | ib_num_ports++; |
| @@ -2538,6 +2618,38 @@ out: | |||
| 2538 | return; | 2618 | return; |
| 2539 | } | 2619 | } |
| 2540 | 2620 | ||
| 2621 | static void handle_bonded_port_state_event(struct work_struct *work) | ||
| 2622 | { | ||
| 2623 | struct ib_event_work *ew = | ||
| 2624 | container_of(work, struct ib_event_work, work); | ||
| 2625 | struct mlx4_ib_dev *ibdev = ew->ib_dev; | ||
| 2626 | enum ib_port_state bonded_port_state = IB_PORT_NOP; | ||
| 2627 | int i; | ||
| 2628 | struct ib_event ibev; | ||
| 2629 | |||
| 2630 | kfree(ew); | ||
| 2631 | spin_lock_bh(&ibdev->iboe.lock); | ||
| 2632 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { | ||
| 2633 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; | ||
| 2634 | |||
| 2635 | enum ib_port_state curr_port_state = | ||
| 2636 | (netif_running(curr_netdev) && | ||
| 2637 | netif_carrier_ok(curr_netdev)) ? | ||
| 2638 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
| 2639 | |||
| 2640 | bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? | ||
| 2641 | curr_port_state : IB_PORT_ACTIVE; | ||
| 2642 | } | ||
| 2643 | spin_unlock_bh(&ibdev->iboe.lock); | ||
| 2644 | |||
| 2645 | ibev.device = &ibdev->ib_dev; | ||
| 2646 | ibev.element.port_num = 1; | ||
| 2647 | ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? | ||
| 2648 | IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | ||
| 2649 | |||
| 2650 | ib_dispatch_event(&ibev); | ||
| 2651 | } | ||
| 2652 | |||
| 2541 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | 2653 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, |
| 2542 | enum mlx4_dev_event event, unsigned long param) | 2654 | enum mlx4_dev_event event, unsigned long param) |
| 2543 | { | 2655 | { |
| @@ -2547,6 +2659,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
| 2547 | struct ib_event_work *ew; | 2659 | struct ib_event_work *ew; |
| 2548 | int p = 0; | 2660 | int p = 0; |
| 2549 | 2661 | ||
| 2662 | if (mlx4_is_bonded(dev) && | ||
| 2663 | ((event == MLX4_DEV_EVENT_PORT_UP) || | ||
| 2664 | (event == MLX4_DEV_EVENT_PORT_DOWN))) { | ||
| 2665 | ew = kmalloc(sizeof(*ew), GFP_ATOMIC); | ||
| 2666 | if (!ew) | ||
| 2667 | return; | ||
| 2668 | INIT_WORK(&ew->work, handle_bonded_port_state_event); | ||
| 2669 | ew->ib_dev = ibdev; | ||
| 2670 | queue_work(wq, &ew->work); | ||
| 2671 | return; | ||
| 2672 | } | ||
| 2673 | |||
| 2550 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) | 2674 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) |
| 2551 | eqe = (struct mlx4_eqe *)param; | 2675 | eqe = (struct mlx4_eqe *)param; |
| 2552 | else | 2676 | else |
| @@ -2607,7 +2731,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
| 2607 | } | 2731 | } |
| 2608 | 2732 | ||
| 2609 | ibev.device = ibdev_ptr; | 2733 | ibev.device = ibdev_ptr; |
| 2610 | ibev.element.port_num = (u8) p; | 2734 | ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; |
| 2611 | 2735 | ||
| 2612 | ib_dispatch_event(&ibev); | 2736 | ib_dispatch_event(&ibev); |
| 2613 | } | 2737 | } |
| @@ -2616,7 +2740,8 @@ static struct mlx4_interface mlx4_ib_interface = { | |||
| 2616 | .add = mlx4_ib_add, | 2740 | .add = mlx4_ib_add, |
| 2617 | .remove = mlx4_ib_remove, | 2741 | .remove = mlx4_ib_remove, |
| 2618 | .event = mlx4_ib_event, | 2742 | .event = mlx4_ib_event, |
| 2619 | .protocol = MLX4_PROT_IB_IPV6 | 2743 | .protocol = MLX4_PROT_IB_IPV6, |
| 2744 | .flags = MLX4_INTFF_BONDING | ||
| 2620 | }; | 2745 | }; |
| 2621 | 2746 | ||
| 2622 | static int __init mlx4_ib_init(void) | 2747 | static int __init mlx4_ib_init(void) |
