aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c141
-rw-r--r--drivers/infiniband/hw/mlx4/main.c33
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c27
4 files changed, 182 insertions, 38 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 259b0670b51c..c27141fef1ab 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,47 +147,51 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
147} 147}
148 148
149/* 149/*
150 * Snoop SM MADs for port info and P_Key table sets, so we can 150 * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
151 * synthesize LID change and P_Key change events. 151 * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
152 */ 152 */
153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, 153static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
154 u16 prev_lid) 154 u16 prev_lid)
155{ 155{
156 struct ib_event event; 156 struct ib_port_info *pinfo;
157 u16 lid;
157 158
159 struct mlx4_ib_dev *dev = to_mdev(ibdev);
158 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 160 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
159 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 161 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
160 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 162 mad->mad_hdr.method == IB_MGMT_METHOD_SET)
161 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 163 switch (mad->mad_hdr.attr_id) {
162 struct ib_port_info *pinfo = 164 case IB_SMP_ATTR_PORT_INFO:
163 (struct ib_port_info *) ((struct ib_smp *) mad)->data; 165 pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
164 u16 lid = be16_to_cpu(pinfo->lid); 166 lid = be16_to_cpu(pinfo->lid);
165 167
166 update_sm_ah(to_mdev(ibdev), port_num, 168 update_sm_ah(dev, port_num,
167 be16_to_cpu(pinfo->sm_lid), 169 be16_to_cpu(pinfo->sm_lid),
168 pinfo->neighbormtu_mastersmsl & 0xf); 170 pinfo->neighbormtu_mastersmsl & 0xf);
169 171
170 event.device = ibdev; 172 if (pinfo->clientrereg_resv_subnetto & 0x80)
171 event.element.port_num = port_num; 173 mlx4_ib_dispatch_event(dev, port_num,
174 IB_EVENT_CLIENT_REREGISTER);
172 175
173 if (pinfo->clientrereg_resv_subnetto & 0x80) { 176 if (prev_lid != lid)
174 event.event = IB_EVENT_CLIENT_REREGISTER; 177 mlx4_ib_dispatch_event(dev, port_num,
175 ib_dispatch_event(&event); 178 IB_EVENT_LID_CHANGE);
176 } 179 break;
177 180
178 if (prev_lid != lid) { 181 case IB_SMP_ATTR_PKEY_TABLE:
179 event.event = IB_EVENT_LID_CHANGE; 182 mlx4_ib_dispatch_event(dev, port_num,
180 ib_dispatch_event(&event); 183 IB_EVENT_PKEY_CHANGE);
181 } 184 break;
182 }
183 185
184 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 186 case IB_SMP_ATTR_GUID_INFO:
185 event.device = ibdev; 187 /* paravirtualized master's guid is guid 0 -- does not change */
186 event.event = IB_EVENT_PKEY_CHANGE; 188 if (!mlx4_is_master(dev->dev))
187 event.element.port_num = port_num; 189 mlx4_ib_dispatch_event(dev, port_num,
188 ib_dispatch_event(&event); 190 IB_EVENT_GID_CHANGE);
191 break;
192 default:
193 break;
189 } 194 }
190 }
191} 195}
192 196
193static void node_desc_override(struct ib_device *dev, 197static void node_desc_override(struct ib_device *dev,
@@ -242,6 +246,25 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
242 int err; 246 int err;
243 struct ib_port_attr pattr; 247 struct ib_port_attr pattr;
244 248
249 if (in_wc && in_wc->qp->qp_num) {
250 pr_debug("received MAD: slid:%d sqpn:%d "
251 "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
252 in_wc->slid, in_wc->src_qp,
253 in_wc->dlid_path_bits,
254 in_wc->qp->qp_num,
255 in_wc->wc_flags,
256 in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
257 be16_to_cpu(in_mad->mad_hdr.attr_id));
258 if (in_wc->wc_flags & IB_WC_GRH) {
259 pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
260 be64_to_cpu(in_grh->sgid.global.subnet_prefix),
261 be64_to_cpu(in_grh->sgid.global.interface_id));
262 pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
263 be64_to_cpu(in_grh->dgid.global.subnet_prefix),
264 be64_to_cpu(in_grh->dgid.global.interface_id));
265 }
266 }
267
245 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 268 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
246 269
247 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) { 270 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
@@ -286,7 +309,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
286 return IB_MAD_RESULT_FAILURE; 309 return IB_MAD_RESULT_FAILURE;
287 310
288 if (!out_mad->mad_hdr.status) { 311 if (!out_mad->mad_hdr.status) {
289 smp_snoop(ibdev, port_num, in_mad, prev_lid); 312 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
313 smp_snoop(ibdev, port_num, in_mad, prev_lid);
290 node_desc_override(ibdev, out_mad); 314 node_desc_override(ibdev, out_mad);
291 } 315 }
292 316
@@ -427,3 +451,64 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
427 ib_destroy_ah(dev->sm_ah[p]); 451 ib_destroy_ah(dev->sm_ah[p]);
428 } 452 }
429} 453}
454
455void handle_port_mgmt_change_event(struct work_struct *work)
456{
457 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
458 struct mlx4_ib_dev *dev = ew->ib_dev;
459 struct mlx4_eqe *eqe = &(ew->ib_eqe);
460 u8 port = eqe->event.port_mgmt_change.port;
461 u32 changed_attr;
462
463 switch (eqe->subtype) {
464 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
465 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
466
467 /* Update the SM ah - This should be done before handling
468 the other changed attributes so that MADs can be sent to the SM */
469 if (changed_attr & MSTR_SM_CHANGE_MASK) {
470 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
471 u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
472 update_sm_ah(dev, port, lid, sl);
473 }
474
475 /* Check if it is a lid change event */
476 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
477 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
478
479 /* Generate GUID changed event */
480 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
481 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
482
483 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
484 mlx4_ib_dispatch_event(dev, port,
485 IB_EVENT_CLIENT_REREGISTER);
486 break;
487
488 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
489 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
490 break;
491 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
492 /* paravirtualized master's guid is guid 0 -- does not change */
493 if (!mlx4_is_master(dev->dev))
494 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
495 break;
496 default:
497 pr_warn("Unsupported subtype 0x%x for "
498 "Port Management Change event\n", eqe->subtype);
499 }
500
501 kfree(ew);
502}
503
504void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
505 enum ib_event_type type)
506{
507 struct ib_event event;
508
509 event.device = &dev->ib_dev;
510 event.element.port_num = port_num;
511 event.event = type;
512
513 ib_dispatch_event(&event);
514}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3530c41fcd1f..5f50d4fae255 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -50,7 +50,7 @@
50#include "mlx4_ib.h" 50#include "mlx4_ib.h"
51#include "user.h" 51#include "user.h"
52 52
53#define DRV_NAME "mlx4_ib" 53#define DRV_NAME MLX4_IB_DRV_NAME
54#define DRV_VERSION "1.0" 54#define DRV_VERSION "1.0"
55#define DRV_RELDATE "April 4, 2008" 55#define DRV_RELDATE "April 4, 2008"
56 56
@@ -157,7 +157,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
157 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 157 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
158 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 158 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
159 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 159 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
160 props->masked_atomic_cap = IB_ATOMIC_HCA; 160 props->masked_atomic_cap = props->atomic_cap;
161 props->max_pkeys = dev->dev->caps.pkey_table_len[1]; 161 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
162 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; 162 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
163 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; 163 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
@@ -898,7 +898,6 @@ static void update_gids_task(struct work_struct *work)
898 union ib_gid *gids; 898 union ib_gid *gids;
899 int err; 899 int err;
900 struct mlx4_dev *dev = gw->dev->dev; 900 struct mlx4_dev *dev = gw->dev->dev;
901 struct ib_event event;
902 901
903 mailbox = mlx4_alloc_cmd_mailbox(dev); 902 mailbox = mlx4_alloc_cmd_mailbox(dev);
904 if (IS_ERR(mailbox)) { 903 if (IS_ERR(mailbox)) {
@@ -916,10 +915,7 @@ static void update_gids_task(struct work_struct *work)
916 pr_warn("set port command failed\n"); 915 pr_warn("set port command failed\n");
917 else { 916 else {
918 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 917 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
919 event.device = &gw->dev->ib_dev; 918 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
920 event.element.port_num = gw->port;
921 event.event = IB_EVENT_GID_CHANGE;
922 ib_dispatch_event(&event);
923 } 919 }
924 920
925 mlx4_free_cmd_mailbox(dev, mailbox); 921 mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1383,10 +1379,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1383} 1379}
1384 1380
1385static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, 1381static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1386 enum mlx4_dev_event event, int port) 1382 enum mlx4_dev_event event, unsigned long param)
1387{ 1383{
1388 struct ib_event ibev; 1384 struct ib_event ibev;
1389 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr); 1385 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1386 struct mlx4_eqe *eqe = NULL;
1387 struct ib_event_work *ew;
1388 int port = 0;
1389
1390 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
1391 eqe = (struct mlx4_eqe *)param;
1392 else
1393 port = (u8)param;
1390 1394
1391 if (port > ibdev->num_ports) 1395 if (port > ibdev->num_ports)
1392 return; 1396 return;
@@ -1405,6 +1409,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
1405 ibev.event = IB_EVENT_DEVICE_FATAL; 1409 ibev.event = IB_EVENT_DEVICE_FATAL;
1406 break; 1410 break;
1407 1411
1412 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
1413 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
1414 if (!ew) {
1415 pr_err("failed to allocate memory for events work\n");
1416 break;
1417 }
1418
1419 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
1420 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
1421 ew->ib_dev = ibdev;
1422 handle_port_mgmt_change_event(&ew->work);
1423 return;
1424
1408 default: 1425 default:
1409 return; 1426 return;
1410 } 1427 }
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ff36655d23d3..23bfbf9ee0e0 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,16 @@
44#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
45#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
46 46
47#define MLX4_IB_DRV_NAME "mlx4_ib"
48
49#ifdef pr_fmt
50#undef pr_fmt
51#endif
52#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
53
54#define mlx4_ib_warn(ibdev, format, arg...) \
55 dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
56
47enum { 57enum {
48 MLX4_IB_SQ_MIN_WQE_SHIFT = 6, 58 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
49 MLX4_IB_MAX_HEADROOM = 2048 59 MLX4_IB_MAX_HEADROOM = 2048
@@ -214,6 +224,12 @@ struct mlx4_ib_dev {
214 int eq_added; 224 int eq_added;
215}; 225};
216 226
227struct ib_event_work {
228 struct work_struct work;
229 struct mlx4_ib_dev *ib_dev;
230 struct mlx4_eqe ib_eqe;
231};
232
217static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 233static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
218{ 234{
219 return container_of(ibdev, struct mlx4_ib_dev, ib_dev); 235 return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -371,4 +387,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
371int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 387int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
372 union ib_gid *gid); 388 union ib_gid *gid);
373 389
390void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
391 enum ib_event_type type);
392
374#endif /* MLX4_IB_H */ 393#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d4ed24aef93..84b26963c8d4 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1335,11 +1335,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1335 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1335 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1336 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 1336 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1337 1337
1338 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) 1338 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
1339 pr_debug("qpn 0x%x: invalid attribute mask specified "
1340 "for transition %d to %d. qp_type %d,"
1341 " attr_mask 0x%x\n",
1342 ibqp->qp_num, cur_state, new_state,
1343 ibqp->qp_type, attr_mask);
1339 goto out; 1344 goto out;
1345 }
1340 1346
1341 if ((attr_mask & IB_QP_PORT) && 1347 if ((attr_mask & IB_QP_PORT) &&
1342 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) { 1348 (attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
1349 pr_debug("qpn 0x%x: invalid port number (%d) specified "
1350 "for transition %d to %d. qp_type %d\n",
1351 ibqp->qp_num, attr->port_num, cur_state,
1352 new_state, ibqp->qp_type);
1343 goto out; 1353 goto out;
1344 } 1354 }
1345 1355
@@ -1350,17 +1360,30 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1350 1360
1351 if (attr_mask & IB_QP_PKEY_INDEX) { 1361 if (attr_mask & IB_QP_PKEY_INDEX) {
1352 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1362 int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1353 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) 1363 if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
1364 pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
1365 "for transition %d to %d. qp_type %d\n",
1366 ibqp->qp_num, attr->pkey_index, cur_state,
1367 new_state, ibqp->qp_type);
1354 goto out; 1368 goto out;
1369 }
1355 } 1370 }
1356 1371
1357 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1372 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1358 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) { 1373 attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
1374 pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
1375 "Transition %d to %d. qp_type %d\n",
1376 ibqp->qp_num, attr->max_rd_atomic, cur_state,
1377 new_state, ibqp->qp_type);
1359 goto out; 1378 goto out;
1360 } 1379 }
1361 1380
1362 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1381 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1363 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) { 1382 attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
1383 pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
1384 "Transition %d to %d. qp_type %d\n",
1385 ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
1386 new_state, ibqp->qp_type);
1364 goto out; 1387 goto out;
1365 } 1388 }
1366 1389