diff options
author | Moni Shoua <monis@mellanox.com> | 2015-02-03 09:48:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-02-04 19:14:25 -0500 |
commit | a575009030931cd8a35c88ec81eb26b9e9f73539 (patch) | |
tree | 788a8394f35b844b65597d8a7f27aff68702bafb /drivers/infiniband | |
parent | 2f48485d1cea5b6c1ce04969ab0228aa7b4659e5 (diff) |
IB/mlx4: Add port aggregation support
Register the interface with the mlx4 core driver with port aggregation support
and check for port aggregation mode when the 'add' function is called.
In this mode, only one physical port is reported to the upper layer
(RoCE/IB core stack and ULPs).
Signed-off-by: Moni Shoua <monis@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 76 |
1 files changed, 70 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9db258f7c804..ed21ae68a977 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
351 | enum ib_mtu tmp; | 351 | enum ib_mtu tmp; |
352 | struct mlx4_cmd_mailbox *mailbox; | 352 | struct mlx4_cmd_mailbox *mailbox; |
353 | int err = 0; | 353 | int err = 0; |
354 | int is_bonded = mlx4_is_bonded(mdev->dev); | ||
354 | 355 | ||
355 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | 356 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); |
356 | if (IS_ERR(mailbox)) | 357 | if (IS_ERR(mailbox)) |
@@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
374 | props->state = IB_PORT_DOWN; | 375 | props->state = IB_PORT_DOWN; |
375 | props->phys_state = state_to_phys_state(props->state); | 376 | props->phys_state = state_to_phys_state(props->state); |
376 | props->active_mtu = IB_MTU_256; | 377 | props->active_mtu = IB_MTU_256; |
378 | if (is_bonded) | ||
379 | rtnl_lock(); /* required to get upper dev */ | ||
377 | spin_lock_bh(&iboe->lock); | 380 | spin_lock_bh(&iboe->lock); |
378 | ndev = iboe->netdevs[port - 1]; | 381 | ndev = iboe->netdevs[port - 1]; |
382 | if (ndev && is_bonded) | ||
383 | ndev = netdev_master_upper_dev_get(ndev); | ||
379 | if (!ndev) | 384 | if (!ndev) |
380 | goto out_unlock; | 385 | goto out_unlock; |
381 | 386 | ||
@@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, | |||
387 | props->phys_state = state_to_phys_state(props->state); | 392 | props->phys_state = state_to_phys_state(props->state); |
388 | out_unlock: | 393 | out_unlock: |
389 | spin_unlock_bh(&iboe->lock); | 394 | spin_unlock_bh(&iboe->lock); |
395 | if (is_bonded) | ||
396 | rtnl_unlock(); | ||
390 | out: | 397 | out: |
391 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 398 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
392 | return err; | 399 | return err; |
@@ -1440,6 +1447,7 @@ static void update_gids_task(struct work_struct *work) | |||
1440 | union ib_gid *gids; | 1447 | union ib_gid *gids; |
1441 | int err; | 1448 | int err; |
1442 | struct mlx4_dev *dev = gw->dev->dev; | 1449 | struct mlx4_dev *dev = gw->dev->dev; |
1450 | int is_bonded = mlx4_is_bonded(dev); | ||
1443 | 1451 | ||
1444 | if (!gw->dev->ib_active) | 1452 | if (!gw->dev->ib_active) |
1445 | return; | 1453 | return; |
@@ -1459,7 +1467,10 @@ static void update_gids_task(struct work_struct *work) | |||
1459 | if (err) | 1467 | if (err) |
1460 | pr_warn("set port command failed\n"); | 1468 | pr_warn("set port command failed\n"); |
1461 | else | 1469 | else |
1462 | mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); | 1470 | if ((gw->port == 1) || !is_bonded) |
1471 | mlx4_ib_dispatch_event(gw->dev, | ||
1472 | is_bonded ? 1 : gw->port, | ||
1473 | IB_EVENT_GID_CHANGE); | ||
1463 | 1474 | ||
1464 | mlx4_free_cmd_mailbox(dev, mailbox); | 1475 | mlx4_free_cmd_mailbox(dev, mailbox); |
1465 | kfree(gw); | 1476 | kfree(gw); |
@@ -1875,7 +1886,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, | |||
1875 | * don't want the bond IP based gids in the table since | 1886 | * don't want the bond IP based gids in the table since |
1876 | * flows that select port by gid may get the down port. | 1887 | * flows that select port by gid may get the down port. |
1877 | */ | 1888 | */ |
1878 | if (port_state == IB_PORT_DOWN) { | 1889 | if (port_state == IB_PORT_DOWN && |
1890 | !mlx4_is_bonded(ibdev->dev)) { | ||
1879 | reset_gid_table(ibdev, port); | 1891 | reset_gid_table(ibdev, port); |
1880 | mlx4_ib_set_default_gid(ibdev, | 1892 | mlx4_ib_set_default_gid(ibdev, |
1881 | curr_netdev, | 1893 | curr_netdev, |
@@ -2047,6 +2059,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2047 | int err; | 2059 | int err; |
2048 | struct mlx4_ib_iboe *iboe; | 2060 | struct mlx4_ib_iboe *iboe; |
2049 | int ib_num_ports = 0; | 2061 | int ib_num_ports = 0; |
2062 | int num_req_counters; | ||
2050 | 2063 | ||
2051 | pr_info_once("%s", mlx4_ib_version); | 2064 | pr_info_once("%s", mlx4_ib_version); |
2052 | 2065 | ||
@@ -2086,7 +2099,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2086 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; | 2099 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
2087 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; | 2100 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
2088 | ibdev->num_ports = num_ports; | 2101 | ibdev->num_ports = num_ports; |
2089 | ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; | 2102 | ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? |
2103 | 1 : ibdev->num_ports; | ||
2090 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; | 2104 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
2091 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; | 2105 | ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; |
2092 | 2106 | ||
@@ -2207,7 +2221,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2207 | if (init_node_data(ibdev)) | 2221 | if (init_node_data(ibdev)) |
2208 | goto err_map; | 2222 | goto err_map; |
2209 | 2223 | ||
2210 | for (i = 0; i < ibdev->num_ports; ++i) { | 2224 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
2225 | for (i = 0; i < num_req_counters; ++i) { | ||
2211 | mutex_init(&ibdev->qp1_proxy_lock[i]); | 2226 | mutex_init(&ibdev->qp1_proxy_lock[i]); |
2212 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == | 2227 | if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == |
2213 | IB_LINK_LAYER_ETHERNET) { | 2228 | IB_LINK_LAYER_ETHERNET) { |
@@ -2218,6 +2233,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
2218 | ibdev->counters[i] = -1; | 2233 | ibdev->counters[i] = -1; |
2219 | } | 2234 | } |
2220 | } | 2235 | } |
2236 | if (mlx4_is_bonded(dev)) | ||
2237 | for (i = 1; i < ibdev->num_ports ; ++i) | ||
2238 | ibdev->counters[i] = ibdev->counters[0]; | ||
2239 | |||
2221 | 2240 | ||
2222 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 2241 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
2223 | ib_num_ports++; | 2242 | ib_num_ports++; |
@@ -2538,6 +2557,38 @@ out: | |||
2538 | return; | 2557 | return; |
2539 | } | 2558 | } |
2540 | 2559 | ||
2560 | static void handle_bonded_port_state_event(struct work_struct *work) | ||
2561 | { | ||
2562 | struct ib_event_work *ew = | ||
2563 | container_of(work, struct ib_event_work, work); | ||
2564 | struct mlx4_ib_dev *ibdev = ew->ib_dev; | ||
2565 | enum ib_port_state bonded_port_state = IB_PORT_NOP; | ||
2566 | int i; | ||
2567 | struct ib_event ibev; | ||
2568 | |||
2569 | kfree(ew); | ||
2570 | spin_lock_bh(&ibdev->iboe.lock); | ||
2571 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { | ||
2572 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; | ||
2573 | |||
2574 | enum ib_port_state curr_port_state = | ||
2575 | (netif_running(curr_netdev) && | ||
2576 | netif_carrier_ok(curr_netdev)) ? | ||
2577 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
2578 | |||
2579 | bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? | ||
2580 | curr_port_state : IB_PORT_ACTIVE; | ||
2581 | } | ||
2582 | spin_unlock_bh(&ibdev->iboe.lock); | ||
2583 | |||
2584 | ibev.device = &ibdev->ib_dev; | ||
2585 | ibev.element.port_num = 1; | ||
2586 | ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? | ||
2587 | IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; | ||
2588 | |||
2589 | ib_dispatch_event(&ibev); | ||
2590 | } | ||
2591 | |||
2541 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | 2592 | static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, |
2542 | enum mlx4_dev_event event, unsigned long param) | 2593 | enum mlx4_dev_event event, unsigned long param) |
2543 | { | 2594 | { |
@@ -2547,6 +2598,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
2547 | struct ib_event_work *ew; | 2598 | struct ib_event_work *ew; |
2548 | int p = 0; | 2599 | int p = 0; |
2549 | 2600 | ||
2601 | if (mlx4_is_bonded(dev) && | ||
2602 | ((event == MLX4_DEV_EVENT_PORT_UP) || | ||
2603 | (event == MLX4_DEV_EVENT_PORT_DOWN))) { | ||
2604 | ew = kmalloc(sizeof(*ew), GFP_ATOMIC); | ||
2605 | if (!ew) | ||
2606 | return; | ||
2607 | INIT_WORK(&ew->work, handle_bonded_port_state_event); | ||
2608 | ew->ib_dev = ibdev; | ||
2609 | queue_work(wq, &ew->work); | ||
2610 | return; | ||
2611 | } | ||
2612 | |||
2550 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) | 2613 | if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) |
2551 | eqe = (struct mlx4_eqe *)param; | 2614 | eqe = (struct mlx4_eqe *)param; |
2552 | else | 2615 | else |
@@ -2607,7 +2670,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
2607 | } | 2670 | } |
2608 | 2671 | ||
2609 | ibev.device = ibdev_ptr; | 2672 | ibev.device = ibdev_ptr; |
2610 | ibev.element.port_num = (u8) p; | 2673 | ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; |
2611 | 2674 | ||
2612 | ib_dispatch_event(&ibev); | 2675 | ib_dispatch_event(&ibev); |
2613 | } | 2676 | } |
@@ -2616,7 +2679,8 @@ static struct mlx4_interface mlx4_ib_interface = { | |||
2616 | .add = mlx4_ib_add, | 2679 | .add = mlx4_ib_add, |
2617 | .remove = mlx4_ib_remove, | 2680 | .remove = mlx4_ib_remove, |
2618 | .event = mlx4_ib_event, | 2681 | .event = mlx4_ib_event, |
2619 | .protocol = MLX4_PROT_IB_IPV6 | 2682 | .protocol = MLX4_PROT_IB_IPV6, |
2683 | .flags = MLX4_INTFF_BONDING | ||
2620 | }; | 2684 | }; |
2621 | 2685 | ||
2622 | static int __init mlx4_ib_init(void) | 2686 | static int __init mlx4_ib_init(void) |