aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2012-06-19 04:21:40 -0400
committerRoland Dreier <roland@purestorage.com>2012-07-10 12:47:10 -0400
commit00f5ce99dc6ee46c3113393cc8fa12173f9bbcd7 (patch)
treecbcd8e2afcd492b3677bf3eedec29b34f249129c /drivers/infiniband
parent3045f0920367e625bbec7d66fadb444e673515af (diff)
mlx4: Use port management change event instead of smp_snoop
The port management change event can replace smp_snoop. If the capability bit for this event is set in dev-caps, the event is used (by the driver setting the PORT_MNG_CHG_EVENT bit in the async event mask in the MAP_EQ fw command). In this case, when the driver passes incoming SMP PORT_INFO SET mads to the FW, the FW generates port management change events to signal any changes to the driver. If the FW generates these events, smp_snoop shouldn't be invoked in ib_process_mad(), or duplicate events will occur (once from the FW-generated event, and once from smp_snoop). In the case where the FW does not generate port management change events smp_snoop needs to be invoked to create these events. The flow in smp_snoop has been modified to make use of the same procedures as in the fw-generated-event event case to generate the port management events (LID change, Client-rereg, Pkey change, and/or GID change). Port management change event handling required changing the mlx4_ib_event and mlx4_dispatch_event prototypes; the "param" argument (last argument) had to be changed to unsigned long in order to accomodate passing the EQE pointer. We also needed to move the definition of struct mlx4_eqe from net/mlx4.h to file device.h -- to make it available to the IB driver, to handle port management change events. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c118
-rw-r--r--drivers/infiniband/hw/mlx4/main.c29
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
3 files changed, 122 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 84786a9fb64f..58c45fb5bd31 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,47 +147,49 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147} 147}
148 148
149/* 149/*
150 * Snoop SM MADs for port info and P_Key table sets, so we can 150 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
151 * synthesize LID change and P_Key change events. 151 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
152 */ 152 */
153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, 153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
154 u16 prev_lid) 154 u16 prev_lid)
155{ 155{
156 struct ib_event event; 156 struct ib_port_info *pinfo;
157 u16 lid;
157 158
159 struct mlx4_ib_dev *dev = to_mdev(ibdev);
158 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 160 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
159 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 161 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
160 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 162 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
161 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 163 switch (mad->mad_hdr.attr_id) {
162 struct ib_port_info *pinfo = 164 case IB_SMP_ATTR_PORT_INFO:
163 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 165 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
164 u16 lid = be16_to_cpu(pinfo->lid); 166 lid = be16_to_cpu(pinfo->lid);
165 167
166 update_sm_ah(to_mdev(ibdev), port_num, 168 update_sm_ah(dev, port_num,
167 be16_to_cpu(pinfo->sm_lid), 169 be16_to_cpu(pinfo->sm_lid),
168 pinfo->neighbormtu_mastersmsl & 0xf); 170 pinfo->neighbormtu_mastersmsl & 0xf);
169 171
170 event.device = ibdev; 172 if (pinfo->clientrereg_resv_subnetto & 0x80)
171 event.element.port_num = port_num; 173 mlx4_ib_dispatch_event(dev, port_num,
174 IB_EVENT_CLIENT_REREGISTER);
172 175
173 if (pinfo->clientrereg_resv_subnetto & 0x80) { 176 if (prev_lid != lid)
174 event.event = IB_EVENT_CLIENT_REREGISTER; 177 mlx4_ib_dispatch_event(dev, port_num,
175 ib_dispatch_event(&event); 178 IB_EVENT_LID_CHANGE);
176 } 179 break;
177 180
178 if (prev_lid != lid) { 181 case IB_SMP_ATTR_PKEY_TABLE:
179 event.event = IB_EVENT_LID_CHANGE; 182 mlx4_ib_dispatch_event(dev, port_num,
180 ib_dispatch_event(&event); 183 IB_EVENT_PKEY_CHANGE);
181 } 184 break;
182 }
183 185
184 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 186 case IB_SMP_ATTR_GUID_INFO:
185 event.device = ibdev; 187 mlx4_ib_dispatch_event(dev, port_num,
186 event.event = IB_EVENT_PKEY_CHANGE; 188 IB_EVENT_GID_CHANGE);
187 event.element.port_num = port_num; 189 break;
188 ib_dispatch_event(&event); 190 default:
191 break;
189 } 192 }
190 }
191} 193}
192 194
193static void node_desc_override(struct ib_device *dev, 195static void node_desc_override(struct ib_device *dev,
@@ -305,7 +307,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
305 return IB_MAD_RESULT_FAILURE; 307 return IB_MAD_RESULT_FAILURE;
306 308
307 if (!out_mad->mad_hdr.status) { 309 if (!out_mad->mad_hdr.status) {
308 smp_snoop(ibdev, port_num, in_mad, prev_lid); 310 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
311 smp_snoop(ibdev, port_num, in_mad, prev_lid);
309 node_desc_override(ibdev, out_mad); 312 node_desc_override(ibdev, out_mad);
310 } 313 }
311 314
@@ -446,3 +449,62 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
446 ib_destroy_ah(dev->sm_ah[p]); 449 ib_destroy_ah(dev->sm_ah[p]);
447 } 450 }
448} 451}
452
453void handle_port_mgmt_change_event(struct work_struct *work)
454{
455 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
456 struct mlx4_ib_dev *dev = ew->ib_dev;
457 struct mlx4_eqe *eqe = &(ew->ib_eqe);
458 u8 port = eqe->event.port_mgmt_change.port;
459 u32 changed_attr;
460
461 switch (eqe->subtype) {
462 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
463 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
464
465 /* Update the SM ah - This should be done before handling
466 the other changed attributes so that MADs can be sent to the SM */
467 if (changed_attr & MSTR_SM_CHANGE_MASK) {
468 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
469 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
470 update_sm_ah(dev, port, lid, sl);
471 }
472
473 /* Check if it is a lid change event */
474 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
475 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
476
477 /* Generate GUID changed event */
478 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
479 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
480
481 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
482 mlx4_ib_dispatch_event(dev, port,
483 IB_EVENT_CLIENT_REREGISTER);
484 break;
485
486 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
487 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
488 break;
489 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
490 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
491 break;
492 default:
493 pr_warn("Unsupported subtype 0x%x for "
494 "Port Management Change event\n", eqe->subtype);
495 }
496
497 kfree(ew);
498}
499
500void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
501 enum ib_event_type type)
502{
503 struct ib_event event;
504
505 event.device = &dev->ib_dev;
506 event.element.port_num = port_num;
507 event.event = type;
508
509 ib_dispatch_event(&event);
510}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 5266b49c46ee..4f230c26622d 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -898,7 +898,6 @@ static void update_gids_task(struct work_struct *work)
898 union ib_gid *gids; 898 union ib_gid *gids;
899 int err; 899 int err;
900 struct mlx4_dev *dev = gw->dev->dev; 900 struct mlx4_dev *dev = gw->dev->dev;
901 struct ib_event event;
902 901
903 mailbox = mlx4_alloc_cmd_mailbox(dev); 902 mailbox = mlx4_alloc_cmd_mailbox(dev);
904 if (IS_ERR(mailbox)) { 903 if (IS_ERR(mailbox)) {
@@ -916,10 +915,7 @@ static void update_gids_task(struct work_struct *work)
916 pr_warn("set port command failed\n"); 915 pr_warn("set port command failed\n");
917 else { 916 else {
918 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 917 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
919 event.device = &gw->dev->ib_dev; 918 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
920 event.element.port_num = gw->port;
921 event.event = IB_EVENT_GID_CHANGE;
922 ib_dispatch_event(&event);
923 } 919 }
924 920
925 mlx4_free_cmd_mailbox(dev, mailbox); 921 mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1383,10 +1379,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1383} 1379}
1384 1380
1385static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1381static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1386 enum mlx4_dev_event event, int port) 1382 enum mlx4_dev_event event, unsigned long param)
1387{ 1383{
1388 struct ib_event ibev; 1384 struct ib_event ibev;
1389 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 1385 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1386 struct mlx4_eqe *eqe = NULL;
1387 struct ib_event_work *ew;
1388 int port = 0;
1389
1390 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
1391 eqe = (struct mlx4_eqe *)param;
1392 else
1393 port = (u8)param;
1390 1394
1391 if (port > ibdev->num_ports) 1395 if (port > ibdev->num_ports)
1392 return; 1396 return;
@@ -1405,6 +1409,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1405 ibev.event = IB_EVENT_DEVICE_FATAL; 1409 ibev.event = IB_EVENT_DEVICE_FATAL;
1406 break; 1410 break;
1407 1411
1412 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
1413 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
1414 if (!ew) {
1415 pr_err("failed to allocate memory for events work\n");
1416 break;
1417 }
1418
1419 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
1420 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
1421 ew->ib_dev = ibdev;
1422 handle_port_mgmt_change_event(&ew->work);
1423 return;
1424
1408 default: 1425 default:
1409 return; 1426 return;
1410 } 1427 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 5f298afaa81f..23bfbf9ee0e0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -224,6 +224,12 @@ struct mlx4_ib_dev {
224 int eq_added; 224 int eq_added;
225}; 225};
226 226
227struct ib_event_work {
228 struct work_struct work;
229 struct mlx4_ib_dev *ib_dev;
230 struct mlx4_eqe ib_eqe;
231};
232
227static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 233static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
228{ 234{
229 return container_of(ibdev, struct mlx4_ib_dev, ib_dev); 235 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -381,4 +387,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
381int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 387int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
382 union ib_gid *gid); 388 union ib_gid *gid);
383 389
390void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
391 enum ib_event_type type);
392
384#endif /* MLX4_IB_H */ 393#endif /* MLX4_IB_H */