aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/main.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-03-23 11:24:57 -0400
committerDavid S. Miller <davem@davemloft.net>2018-03-23 11:31:58 -0400
commit03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch)
treefbaf8738296b2e9dcba81c6daef2d515b6c4948c /drivers/infiniband/hw/mlx5/main.c
parent6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff)
parentf36b7534b83357cf52e747905de6d65b4f7c2512 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here... For the mac80211 stuff, these were fortunately just parallel adds. Trivially resolved. In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the function phy_disable_interrupts() earlier in the file, whilst in 'net-next' the phy_error() call from this function was removed. In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the 'rt_table_id' member of rtable collided with a bug fix in 'net' that added a new struct member "rt_mtu_locked" which needs to be copied over here. The mlxsw driver conflict consisted of net-next separating the span code and definitions into separate files, whilst a 'net' bug fix made some changes to that moved code. The mlx5 infiniband conflict resolution was quite non-trivial, the RDMA tree's merge commit was used as a guide here, and here are their notes: ==================== Due to bug fixes found by the syzkaller bot and taken into the for-rc branch after development for the 4.17 merge window had already started being taken into the for-next branch, there were fairly non-trivial merge issues that would need to be resolved between the for-rc branch and the for-next branch. This merge resolves those conflicts and provides a unified base upon which ongoing development for 4.17 can be based. Conflicts: drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524 (IB/mlx5: Fix cleanup order on unload) added to for-rc and commit b5ca15ad7e61 (IB/mlx5: Add proper representors support) add as part of the devel cycle both needed to modify the init/de-init functions used by mlx5. To support the new representors, the new functions added by the cleanup patch needed to be made non-static, and the init/de-init list added by the representors patch needed to be modified to match the init/de-init list changes made by the cleanup patch. Updates: drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function prototypes added by representors patch to reflect new function names as changed by cleanup patch drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init stage list to match new order from cleanup patch ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d9474b95d8e5..390e4375647e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -256,12 +256,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
256 struct mlx5_ib_multiport_info *mpi; 256 struct mlx5_ib_multiport_info *mpi;
257 struct mlx5_ib_port *port; 257 struct mlx5_ib_port *port;
258 258
259 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
260 ll != IB_LINK_LAYER_ETHERNET) {
261 if (native_port_num)
262 *native_port_num = ib_port_num;
263 return ibdev->mdev;
264 }
265
259 if (native_port_num) 266 if (native_port_num)
260 *native_port_num = 1; 267 *native_port_num = 1;
261 268
262 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
263 return ibdev->mdev;
264
265 port = &ibdev->port[ib_port_num - 1]; 269 port = &ibdev->port[ib_port_num - 1];
266 if (!port) 270 if (!port)
267 return NULL; 271 return NULL;
@@ -3297,7 +3301,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3297 struct mlx5_ib_dev *ibdev; 3301 struct mlx5_ib_dev *ibdev;
3298 struct ib_event ibev; 3302 struct ib_event ibev;
3299 bool fatal = false; 3303 bool fatal = false;
3300 u8 port = 0; 3304 u8 port = (u8)work->param;
3301 3305
3302 if (mlx5_core_is_mp_slave(work->dev)) { 3306 if (mlx5_core_is_mp_slave(work->dev)) {
3303 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3307 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3317,8 +3321,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3317 case MLX5_DEV_EVENT_PORT_UP: 3321 case MLX5_DEV_EVENT_PORT_UP:
3318 case MLX5_DEV_EVENT_PORT_DOWN: 3322 case MLX5_DEV_EVENT_PORT_DOWN:
3319 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3323 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3320 port = (u8)work->param;
3321
3322 /* In RoCE, port up/down events are handled in 3324 /* In RoCE, port up/down events are handled in
3323 * mlx5_netdev_event(). 3325 * mlx5_netdev_event().
3324 */ 3326 */
@@ -3332,24 +3334,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3332 3334
3333 case MLX5_DEV_EVENT_LID_CHANGE: 3335 case MLX5_DEV_EVENT_LID_CHANGE:
3334 ibev.event = IB_EVENT_LID_CHANGE; 3336 ibev.event = IB_EVENT_LID_CHANGE;
3335 port = (u8)work->param;
3336 break; 3337 break;
3337 3338
3338 case MLX5_DEV_EVENT_PKEY_CHANGE: 3339 case MLX5_DEV_EVENT_PKEY_CHANGE:
3339 ibev.event = IB_EVENT_PKEY_CHANGE; 3340 ibev.event = IB_EVENT_PKEY_CHANGE;
3340 port = (u8)work->param;
3341
3342 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3341 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3343 break; 3342 break;
3344 3343
3345 case MLX5_DEV_EVENT_GUID_CHANGE: 3344 case MLX5_DEV_EVENT_GUID_CHANGE:
3346 ibev.event = IB_EVENT_GID_CHANGE; 3345 ibev.event = IB_EVENT_GID_CHANGE;
3347 port = (u8)work->param;
3348 break; 3346 break;
3349 3347
3350 case MLX5_DEV_EVENT_CLIENT_REREG: 3348 case MLX5_DEV_EVENT_CLIENT_REREG:
3351 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3349 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3352 port = (u8)work->param;
3353 break; 3350 break;
3354 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3351 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3355 schedule_work(&ibdev->delay_drop.delay_drop_work); 3352 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3361,7 +3358,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3361 ibev.device = &ibdev->ib_dev; 3358 ibev.device = &ibdev->ib_dev;
3362 ibev.element.port_num = port; 3359 ibev.element.port_num = port;
3363 3360
3364 if (port < 1 || port > ibdev->num_ports) { 3361 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3365 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3362 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3366 goto out; 3363 goto out;
3367 } 3364 }
@@ -4999,19 +4996,19 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4999 return ib_register_device(&dev->ib_dev, NULL); 4996 return ib_register_device(&dev->ib_dev, NULL);
5000} 4997}
5001 4998
5002void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 4999void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
5003{ 5000{
5004 ib_unregister_device(&dev->ib_dev); 5001 destroy_umrc_res(dev);
5005} 5002}
5006 5003
5007int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) 5004void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5008{ 5005{
5009 return create_umr_res(dev); 5006 ib_unregister_device(&dev->ib_dev);
5010} 5007}
5011 5008
5012void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) 5009int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5013{ 5010{
5014 destroy_umrc_res(dev); 5011 return create_umr_res(dev);
5015} 5012}
5016 5013
5017static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 5014static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -5130,12 +5127,15 @@ static const struct mlx5_ib_profile pf_profile = {
5130 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5127 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5131 mlx5_ib_stage_bfrag_init, 5128 mlx5_ib_stage_bfrag_init,
5132 mlx5_ib_stage_bfrag_cleanup), 5129 mlx5_ib_stage_bfrag_cleanup),
5130 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5131 NULL,
5132 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5134 mlx5_ib_stage_ib_reg_init, 5134 mlx5_ib_stage_ib_reg_init,
5135 mlx5_ib_stage_ib_reg_cleanup), 5135 mlx5_ib_stage_ib_reg_cleanup),
5136 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5136 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5137 mlx5_ib_stage_umr_res_init, 5137 mlx5_ib_stage_post_ib_reg_umr_init,
5138 mlx5_ib_stage_umr_res_cleanup), 5138 NULL),
5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5140 mlx5_ib_stage_delay_drop_init, 5140 mlx5_ib_stage_delay_drop_init,
5141 mlx5_ib_stage_delay_drop_cleanup), 5141 mlx5_ib_stage_delay_drop_cleanup),
@@ -5172,12 +5172,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5173 mlx5_ib_stage_bfrag_init, 5173 mlx5_ib_stage_bfrag_init,
5174 mlx5_ib_stage_bfrag_cleanup), 5174 mlx5_ib_stage_bfrag_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5176 NULL,
5177 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5178 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5176 mlx5_ib_stage_ib_reg_init, 5179 mlx5_ib_stage_ib_reg_init,
5177 mlx5_ib_stage_ib_reg_cleanup), 5180 mlx5_ib_stage_ib_reg_cleanup),
5178 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5181 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5179 mlx5_ib_stage_umr_res_init, 5182 mlx5_ib_stage_post_ib_reg_umr_init,
5180 mlx5_ib_stage_umr_res_cleanup), 5183 NULL),
5181 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 5184 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5182 mlx5_ib_stage_class_attr_init, 5185 mlx5_ib_stage_class_attr_init,
5183 NULL), 5186 NULL),