aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAviv Heller <avivh@mellanox.com>2016-09-18 13:48:00 -0400
committerDoug Ledford <dledford@redhat.com>2016-10-07 16:54:20 -0400
commit5ec8c83e3ad3ea4ea78798edcd4ad61e0041a174 (patch)
tree4e6699d3b8b6ff840638526a097ac58bead20ce0
parent350d0e4c7e4b03ed5646ac39ba4aac98bb3d9c56 (diff)
IB/mlx5: Port events in RoCE now rely on netdev events
Since ib_query_port() in RoCE returns the state of its netdev as the port state, it makes sense to propagate the port up/down events to ib_core when the netdev port state changes, instead of relying on traditional core events. This also keeps both the event and ib_query_port() synchronized. Signed-off-by: Aviv Heller <avivh@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/main.c66
1 files changed, 51 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 98844c1db555..ed038b7a96fc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -107,13 +107,32 @@ static int mlx5_netdev_event(struct notifier_block *this,
107 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, 107 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev,
108 roce.nb); 108 roce.nb);
109 109
110 if ((event != NETDEV_UNREGISTER) && (event != NETDEV_REGISTER)) 110 switch (event) {
111 return NOTIFY_DONE; 111 case NETDEV_REGISTER:
112 case NETDEV_UNREGISTER:
113 write_lock(&ibdev->roce.netdev_lock);
114 if (ndev->dev.parent == &ibdev->mdev->pdev->dev)
115 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ?
116 NULL : ndev;
117 write_unlock(&ibdev->roce.netdev_lock);
118 break;
112 119
113 write_lock(&ibdev->roce.netdev_lock); 120 case NETDEV_UP:
114 if (ndev->dev.parent == &ibdev->mdev->pdev->dev) 121 case NETDEV_DOWN:
115 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? NULL : ndev; 122 if (ndev == ibdev->roce.netdev && ibdev->ib_active) {
116 write_unlock(&ibdev->roce.netdev_lock); 123 struct ib_event ibev = {0};
124
125 ibev.device = &ibdev->ib_dev;
126 ibev.event = (event == NETDEV_UP) ?
127 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
128 ibev.element.port_num = 1;
129 ib_dispatch_event(&ibev);
130 }
131 break;
132
133 default:
134 break;
135 }
117 136
118 return NOTIFY_DONE; 137 return NOTIFY_DONE;
119} 138}
@@ -2267,14 +2286,19 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
2267 break; 2286 break;
2268 2287
2269 case MLX5_DEV_EVENT_PORT_UP: 2288 case MLX5_DEV_EVENT_PORT_UP:
2270 ibev.event = IB_EVENT_PORT_ACTIVE;
2271 port = (u8)param;
2272 break;
2273
2274 case MLX5_DEV_EVENT_PORT_DOWN: 2289 case MLX5_DEV_EVENT_PORT_DOWN:
2275 case MLX5_DEV_EVENT_PORT_INITIALIZED: 2290 case MLX5_DEV_EVENT_PORT_INITIALIZED:
2276 ibev.event = IB_EVENT_PORT_ERR;
2277 port = (u8)param; 2291 port = (u8)param;
2292
2293 /* In RoCE, port up/down events are handled in
2294 * mlx5_netdev_event().
2295 */
2296 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
2297 IB_LINK_LAYER_ETHERNET)
2298 return;
2299
2300 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ?
2301 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
2278 break; 2302 break;
2279 2303
2280 case MLX5_DEV_EVENT_LID_CHANGE: 2304 case MLX5_DEV_EVENT_LID_CHANGE:
@@ -2679,14 +2703,24 @@ static void get_dev_fw_str(struct ib_device *ibdev, char *str,
2679 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2703 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
2680} 2704}
2681 2705
2706static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev)
2707{
2708 if (dev->roce.nb.notifier_call) {
2709 unregister_netdevice_notifier(&dev->roce.nb);
2710 dev->roce.nb.notifier_call = NULL;
2711 }
2712}
2713
2682static int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2714static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
2683{ 2715{
2684 int err; 2716 int err;
2685 2717
2686 dev->roce.nb.notifier_call = mlx5_netdev_event; 2718 dev->roce.nb.notifier_call = mlx5_netdev_event;
2687 err = register_netdevice_notifier(&dev->roce.nb); 2719 err = register_netdevice_notifier(&dev->roce.nb);
2688 if (err) 2720 if (err) {
2721 dev->roce.nb.notifier_call = NULL;
2689 return err; 2722 return err;
2723 }
2690 2724
2691 err = mlx5_nic_vport_enable_roce(dev->mdev); 2725 err = mlx5_nic_vport_enable_roce(dev->mdev);
2692 if (err) 2726 if (err)
@@ -2695,14 +2729,13 @@ static int mlx5_enable_roce(struct mlx5_ib_dev *dev)
2695 return 0; 2729 return 0;
2696 2730
2697err_unregister_netdevice_notifier: 2731err_unregister_netdevice_notifier:
2698 unregister_netdevice_notifier(&dev->roce.nb); 2732 mlx5_remove_roce_notifier(dev);
2699 return err; 2733 return err;
2700} 2734}
2701 2735
2702static void mlx5_disable_roce(struct mlx5_ib_dev *dev) 2736static void mlx5_disable_roce(struct mlx5_ib_dev *dev)
2703{ 2737{
2704 mlx5_nic_vport_disable_roce(dev->mdev); 2738 mlx5_nic_vport_disable_roce(dev->mdev);
2705 unregister_netdevice_notifier(&dev->roce.nb);
2706} 2739}
2707 2740
2708static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2741static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
@@ -3051,8 +3084,10 @@ err_rsrc:
3051 destroy_dev_resources(&dev->devr); 3084 destroy_dev_resources(&dev->devr);
3052 3085
3053err_disable_roce: 3086err_disable_roce:
3054 if (ll == IB_LINK_LAYER_ETHERNET) 3087 if (ll == IB_LINK_LAYER_ETHERNET) {
3055 mlx5_disable_roce(dev); 3088 mlx5_disable_roce(dev);
3089 mlx5_remove_roce_notifier(dev);
3090 }
3056 3091
3057err_free_port: 3092err_free_port:
3058 kfree(dev->port); 3093 kfree(dev->port);
@@ -3068,6 +3103,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
3068 struct mlx5_ib_dev *dev = context; 3103 struct mlx5_ib_dev *dev = context;
3069 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 3104 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1);
3070 3105
3106 mlx5_remove_roce_notifier(dev);
3071 ib_unregister_device(&dev->ib_dev); 3107 ib_unregister_device(&dev->ib_dev);
3072 mlx5_ib_dealloc_q_counters(dev); 3108 mlx5_ib_dealloc_q_counters(dev);
3073 destroy_umrc_res(dev); 3109 destroy_umrc_res(dev);