aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/mad.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/mad.c')
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1573
1 files changed, 1554 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 9c2ae7efd00f..21a794152d15 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -32,7 +32,10 @@
32 32
33#include <rdma/ib_mad.h> 33#include <rdma/ib_mad.h>
34#include <rdma/ib_smi.h> 34#include <rdma/ib_smi.h>
35#include <rdma/ib_sa.h>
36#include <rdma/ib_cache.h>
35 37
38#include <linux/random.h>
36#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
37#include <linux/gfp.h> 40#include <linux/gfp.h>
38#include <rdma/ib_pma.h> 41#include <rdma/ib_pma.h>
@@ -44,7 +47,62 @@ enum {
44 MLX4_IB_VENDOR_CLASS2 = 0xa 47 MLX4_IB_VENDOR_CLASS2 = 0xa
45}; 48};
46 49
47int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey, 50#define MLX4_TUN_SEND_WRID_SHIFT 34
51#define MLX4_TUN_QPN_SHIFT 32
52#define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
53#define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
54
55#define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
56#define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
57
58 /* Port mgmt change event handling */
59
60#define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
61#define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
62#define NUM_IDX_IN_PKEY_TBL_BLK 32
63#define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
64#define GUID_TBL_BLK_NUM_ENTRIES 8
65#define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
66
67struct mlx4_mad_rcv_buf {
68 struct ib_grh grh;
69 u8 payload[256];
70} __packed;
71
72struct mlx4_mad_snd_buf {
73 u8 payload[256];
74} __packed;
75
76struct mlx4_tunnel_mad {
77 struct ib_grh grh;
78 struct mlx4_ib_tunnel_header hdr;
79 struct ib_mad mad;
80} __packed;
81
82struct mlx4_rcv_tunnel_mad {
83 struct mlx4_rcv_tunnel_hdr hdr;
84 struct ib_grh grh;
85 struct ib_mad mad;
86} __packed;
87
88static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
89static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
90static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
91 int block, u32 change_bitmap);
92
93__be64 mlx4_ib_gen_node_guid(void)
94{
95#define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
96 return cpu_to_be64(NODE_GUID_HI | random32());
97}
98
99__be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
100{
101 return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
102 cpu_to_be64(0xff00000000000000LL);
103}
104
105int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
48 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 106 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
49 void *in_mad, void *response_mad) 107 void *in_mad, void *response_mad)
50{ 108{
@@ -71,10 +129,13 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
71 * Key check traps can't be generated unless we have in_wc to 129 * Key check traps can't be generated unless we have in_wc to
72 * tell us where to send the trap. 130 * tell us where to send the trap.
73 */ 131 */
74 if (ignore_mkey || !in_wc) 132 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
75 op_modifier |= 0x1; 133 op_modifier |= 0x1;
76 if (ignore_bkey || !in_wc) 134 if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
77 op_modifier |= 0x2; 135 op_modifier |= 0x2;
136 if (mlx4_is_mfunc(dev->dev) &&
137 (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
138 op_modifier |= 0x8;
78 139
79 if (in_wc) { 140 if (in_wc) {
80 struct { 141 struct {
@@ -107,10 +168,10 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
107 in_modifier |= in_wc->slid << 16; 168 in_modifier |= in_wc->slid << 16;
108 } 169 }
109 170
110 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, 171 err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
111 in_modifier, op_modifier, 172 mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
112 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, 173 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
113 MLX4_CMD_NATIVE); 174 (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
114 175
115 if (!err) 176 if (!err)
116 memcpy(response_mad, outmailbox->buf, 256); 177 memcpy(response_mad, outmailbox->buf, 256);
@@ -156,6 +217,10 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
156{ 217{
157 struct ib_port_info *pinfo; 218 struct ib_port_info *pinfo;
158 u16 lid; 219 u16 lid;
220 __be16 *base;
221 u32 bn, pkey_change_bitmap;
222 int i;
223
159 224
160 struct mlx4_ib_dev *dev = to_mdev(ibdev); 225 struct mlx4_ib_dev *dev = to_mdev(ibdev);
161 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 226 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
@@ -171,17 +236,46 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
171 pinfo->neighbormtu_mastersmsl & 0xf); 236 pinfo->neighbormtu_mastersmsl & 0xf);
172 237
173 if (pinfo->clientrereg_resv_subnetto & 0x80) 238 if (pinfo->clientrereg_resv_subnetto & 0x80)
174 mlx4_ib_dispatch_event(dev, port_num, 239 handle_client_rereg_event(dev, port_num);
175 IB_EVENT_CLIENT_REREGISTER);
176 240
177 if (prev_lid != lid) 241 if (prev_lid != lid)
178 mlx4_ib_dispatch_event(dev, port_num, 242 handle_lid_change_event(dev, port_num);
179 IB_EVENT_LID_CHANGE);
180 break; 243 break;
181 244
182 case IB_SMP_ATTR_PKEY_TABLE: 245 case IB_SMP_ATTR_PKEY_TABLE:
183 mlx4_ib_dispatch_event(dev, port_num, 246 if (!mlx4_is_mfunc(dev->dev)) {
184 IB_EVENT_PKEY_CHANGE); 247 mlx4_ib_dispatch_event(dev, port_num,
248 IB_EVENT_PKEY_CHANGE);
249 break;
250 }
251
252 /* at this point, we are running in the master.
253 * Slaves do not receive SMPs.
254 */
255 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
256 base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
257 pkey_change_bitmap = 0;
258 for (i = 0; i < 32; i++) {
259 pr_debug("PKEY[%d] = x%x\n",
260 i + bn*32, be16_to_cpu(base[i]));
261 if (be16_to_cpu(base[i]) !=
262 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
263 pkey_change_bitmap |= (1 << i);
264 dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
265 be16_to_cpu(base[i]);
266 }
267 }
268 pr_debug("PKEY Change event: port=%d, "
269 "block=0x%x, change_bitmap=0x%x\n",
270 port_num, bn, pkey_change_bitmap);
271
272 if (pkey_change_bitmap) {
273 mlx4_ib_dispatch_event(dev, port_num,
274 IB_EVENT_PKEY_CHANGE);
275 if (!dev->sriov.is_going_down)
276 __propagate_pkey_ev(dev, port_num, bn,
277 pkey_change_bitmap);
278 }
185 break; 279 break;
186 280
187 case IB_SMP_ATTR_GUID_INFO: 281 case IB_SMP_ATTR_GUID_INFO:
@@ -189,12 +283,56 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
189 if (!mlx4_is_master(dev->dev)) 283 if (!mlx4_is_master(dev->dev))
190 mlx4_ib_dispatch_event(dev, port_num, 284 mlx4_ib_dispatch_event(dev, port_num,
191 IB_EVENT_GID_CHANGE); 285 IB_EVENT_GID_CHANGE);
286 /*if master, notify relevant slaves*/
287 if (mlx4_is_master(dev->dev) &&
288 !dev->sriov.is_going_down) {
289 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
290 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
291 (u8 *)(&((struct ib_smp *)mad)->data));
292 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
293 (u8 *)(&((struct ib_smp *)mad)->data));
294 }
192 break; 295 break;
296
193 default: 297 default:
194 break; 298 break;
195 } 299 }
196} 300}
197 301
302static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
303 int block, u32 change_bitmap)
304{
305 int i, ix, slave, err;
306 int have_event = 0;
307
308 for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
309 if (slave == mlx4_master_func_num(dev->dev))
310 continue;
311 if (!mlx4_is_slave_active(dev->dev, slave))
312 continue;
313
314 have_event = 0;
315 for (i = 0; i < 32; i++) {
316 if (!(change_bitmap & (1 << i)))
317 continue;
318 for (ix = 0;
319 ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
320 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
321 [ix] == i + 32 * block) {
322 err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
323 pr_debug("propagate_pkey_ev: slave %d,"
324 " port %d, ix %d (%d)\n",
325 slave, port_num, ix, err);
326 have_event = 1;
327 break;
328 }
329 }
330 if (have_event)
331 break;
332 }
333 }
334}
335
198static void node_desc_override(struct ib_device *dev, 336static void node_desc_override(struct ib_device *dev,
199 struct ib_mad *mad) 337 struct ib_mad *mad)
200{ 338{
@@ -242,6 +380,268 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
242 } 380 }
243} 381}
244 382
383static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
384 struct ib_sa_mad *sa_mad)
385{
386 int ret = 0;
387
388 /* dispatch to different sa handlers */
389 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
390 case IB_SA_ATTR_MC_MEMBER_REC:
391 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
392 break;
393 default:
394 break;
395 }
396 return ret;
397}
398
399int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
400{
401 struct mlx4_ib_dev *dev = to_mdev(ibdev);
402 int i;
403
404 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
405 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
406 return i;
407 }
408 return -1;
409}
410
411
412static int get_pkey_phys_indices(struct mlx4_ib_dev *ibdev, u8 port, u8 ph_pkey_ix,
413 u8 *full_pk_ix, u8 *partial_pk_ix,
414 int *is_full_member)
415{
416 u16 search_pkey;
417 int fm;
418 int err = 0;
419 u16 pk;
420
421 err = ib_get_cached_pkey(&ibdev->ib_dev, port, ph_pkey_ix, &search_pkey);
422 if (err)
423 return err;
424
425 fm = (search_pkey & 0x8000) ? 1 : 0;
426 if (fm) {
427 *full_pk_ix = ph_pkey_ix;
428 search_pkey &= 0x7FFF;
429 } else {
430 *partial_pk_ix = ph_pkey_ix;
431 search_pkey |= 0x8000;
432 }
433
434 if (ib_find_exact_cached_pkey(&ibdev->ib_dev, port, search_pkey, &pk))
435 pk = 0xFFFF;
436
437 if (fm)
438 *partial_pk_ix = (pk & 0xFF);
439 else
440 *full_pk_ix = (pk & 0xFF);
441
442 *is_full_member = fm;
443 return err;
444}
445
446int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
447 enum ib_qp_type dest_qpt, struct ib_wc *wc,
448 struct ib_grh *grh, struct ib_mad *mad)
449{
450 struct ib_sge list;
451 struct ib_send_wr wr, *bad_wr;
452 struct mlx4_ib_demux_pv_ctx *tun_ctx;
453 struct mlx4_ib_demux_pv_qp *tun_qp;
454 struct mlx4_rcv_tunnel_mad *tun_mad;
455 struct ib_ah_attr attr;
456 struct ib_ah *ah;
457 struct ib_qp *src_qp = NULL;
458 unsigned tun_tx_ix = 0;
459 int dqpn;
460 int ret = 0;
461 int i;
462 int is_full_member = 0;
463 u16 tun_pkey_ix;
464 u8 ph_pkey_ix, full_pk_ix = 0, partial_pk_ix = 0;
465
466 if (dest_qpt > IB_QPT_GSI)
467 return -EINVAL;
468
469 tun_ctx = dev->sriov.demux[port-1].tun[slave];
470
471 /* check if proxy qp created */
472 if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
473 return -EAGAIN;
474
475 /* QP0 forwarding only for Dom0 */
476 if (!dest_qpt && (mlx4_master_func_num(dev->dev) != slave))
477 return -EINVAL;
478
479 if (!dest_qpt)
480 tun_qp = &tun_ctx->qp[0];
481 else
482 tun_qp = &tun_ctx->qp[1];
483
484 /* compute pkey index for slave */
485 /* get physical pkey -- virtualized Dom0 pkey to phys*/
486 if (dest_qpt) {
487 ph_pkey_ix =
488 dev->pkeys.virt2phys_pkey[mlx4_master_func_num(dev->dev)][port - 1][wc->pkey_index];
489
490 /* now, translate this to the slave pkey index */
491 ret = get_pkey_phys_indices(dev, port, ph_pkey_ix, &full_pk_ix,
492 &partial_pk_ix, &is_full_member);
493 if (ret)
494 return -EINVAL;
495
496 for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
497 if ((dev->pkeys.virt2phys_pkey[slave][port - 1][i] == full_pk_ix) ||
498 (is_full_member &&
499 (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == partial_pk_ix)))
500 break;
501 }
502 if (i == dev->dev->caps.pkey_table_len[port])
503 return -EINVAL;
504 tun_pkey_ix = i;
505 } else
506 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
507
508 dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
509
510 /* get tunnel tx data buf for slave */
511 src_qp = tun_qp->qp;
512
513 /* create ah. Just need an empty one with the port num for the post send.
514 * The driver will set the force loopback bit in post_send */
515 memset(&attr, 0, sizeof attr);
516 attr.port_num = port;
517 ah = ib_create_ah(tun_ctx->pd, &attr);
518 if (IS_ERR(ah))
519 return -ENOMEM;
520
521 /* allocate tunnel tx buf after pass failure returns */
522 spin_lock(&tun_qp->tx_lock);
523 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
524 (MLX4_NUM_TUNNEL_BUFS - 1))
525 ret = -EAGAIN;
526 else
527 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
528 spin_unlock(&tun_qp->tx_lock);
529 if (ret)
530 goto out;
531
532 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
533 if (tun_qp->tx_ring[tun_tx_ix].ah)
534 ib_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
535 tun_qp->tx_ring[tun_tx_ix].ah = ah;
536 ib_dma_sync_single_for_cpu(&dev->ib_dev,
537 tun_qp->tx_ring[tun_tx_ix].buf.map,
538 sizeof (struct mlx4_rcv_tunnel_mad),
539 DMA_TO_DEVICE);
540
541 /* copy over to tunnel buffer */
542 if (grh)
543 memcpy(&tun_mad->grh, grh, sizeof *grh);
544 memcpy(&tun_mad->mad, mad, sizeof *mad);
545
546 /* adjust tunnel data */
547 tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
548 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
549 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
550 tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
551 tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
552
553 ib_dma_sync_single_for_device(&dev->ib_dev,
554 tun_qp->tx_ring[tun_tx_ix].buf.map,
555 sizeof (struct mlx4_rcv_tunnel_mad),
556 DMA_TO_DEVICE);
557
558 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
559 list.length = sizeof (struct mlx4_rcv_tunnel_mad);
560 list.lkey = tun_ctx->mr->lkey;
561
562 wr.wr.ud.ah = ah;
563 wr.wr.ud.port_num = port;
564 wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
565 wr.wr.ud.remote_qpn = dqpn;
566 wr.next = NULL;
567 wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
568 wr.sg_list = &list;
569 wr.num_sge = 1;
570 wr.opcode = IB_WR_SEND;
571 wr.send_flags = IB_SEND_SIGNALED;
572
573 ret = ib_post_send(src_qp, &wr, &bad_wr);
574out:
575 if (ret)
576 ib_destroy_ah(ah);
577 return ret;
578}
579
580static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
581 struct ib_wc *wc, struct ib_grh *grh,
582 struct ib_mad *mad)
583{
584 struct mlx4_ib_dev *dev = to_mdev(ibdev);
585 int err;
586 int slave;
587 u8 *slave_id;
588
589 /* Initially assume that this mad is for us */
590 slave = mlx4_master_func_num(dev->dev);
591
592 /* See if the slave id is encoded in a response mad */
593 if (mad->mad_hdr.method & 0x80) {
594 slave_id = (u8 *) &mad->mad_hdr.tid;
595 slave = *slave_id;
596 if (slave != 255) /*255 indicates the dom0*/
597 *slave_id = 0; /* remap tid */
598 }
599
600 /* If a grh is present, we demux according to it */
601 if (wc->wc_flags & IB_WC_GRH) {
602 slave = mlx4_ib_find_real_gid(ibdev, port, grh->dgid.global.interface_id);
603 if (slave < 0) {
604 mlx4_ib_warn(ibdev, "failed matching grh\n");
605 return -ENOENT;
606 }
607 }
608 /* Class-specific handling */
609 switch (mad->mad_hdr.mgmt_class) {
610 case IB_MGMT_CLASS_SUBN_ADM:
611 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
612 (struct ib_sa_mad *) mad))
613 return 0;
614 break;
615 case IB_MGMT_CLASS_CM:
616 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
617 return 0;
618 break;
619 case IB_MGMT_CLASS_DEVICE_MGMT:
620 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
621 return 0;
622 break;
623 default:
624 /* Drop unsupported classes for slaves in tunnel mode */
625 if (slave != mlx4_master_func_num(dev->dev)) {
626 pr_debug("dropping unsupported ingress mad from class:%d "
627 "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
628 return 0;
629 }
630 }
631 /*make sure that no slave==255 was not handled yet.*/
632 if (slave >= dev->dev->caps.sqp_demux) {
633 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
634 slave, dev->dev->caps.sqp_demux);
635 return -ENOENT;
636 }
637
638 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
639 if (err)
640 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
641 slave, err);
642 return 0;
643}
644
245static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 645static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
246 struct ib_wc *in_wc, struct ib_grh *in_grh, 646 struct ib_wc *in_wc, struct ib_grh *in_grh,
247 struct ib_mad *in_mad, struct ib_mad *out_mad) 647 struct ib_mad *in_mad, struct ib_mad *out_mad)
@@ -306,8 +706,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
306 prev_lid = pattr.lid; 706 prev_lid = pattr.lid;
307 707
308 err = mlx4_MAD_IFC(to_mdev(ibdev), 708 err = mlx4_MAD_IFC(to_mdev(ibdev),
309 mad_flags & IB_MAD_IGNORE_MKEY, 709 (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
310 mad_flags & IB_MAD_IGNORE_BKEY, 710 (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
711 MLX4_MAD_IFC_NET_VIEW,
311 port_num, in_wc, in_grh, in_mad, out_mad); 712 port_num, in_wc, in_grh, in_mad, out_mad);
312 if (err) 713 if (err)
313 return IB_MAD_RESULT_FAILURE; 714 return IB_MAD_RESULT_FAILURE;
@@ -315,7 +716,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
315 if (!out_mad->mad_hdr.status) { 716 if (!out_mad->mad_hdr.status) {
316 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)) 717 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
317 smp_snoop(ibdev, port_num, in_mad, prev_lid); 718 smp_snoop(ibdev, port_num, in_mad, prev_lid);
318 node_desc_override(ibdev, out_mad); 719 /* slaves get node desc from FW */
720 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
721 node_desc_override(ibdev, out_mad);
319 } 722 }
320 723
321 /* set return bit in status of directed route responses */ 724 /* set return bit in status of directed route responses */
@@ -398,6 +801,8 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
398static void send_handler(struct ib_mad_agent *agent, 801static void send_handler(struct ib_mad_agent *agent,
399 struct ib_mad_send_wc *mad_send_wc) 802 struct ib_mad_send_wc *mad_send_wc)
400{ 803{
804 if (mad_send_wc->send_buf->context[0])
805 ib_destroy_ah(mad_send_wc->send_buf->context[0]);
401 ib_free_send_mad(mad_send_wc->send_buf); 806 ib_free_send_mad(mad_send_wc->send_buf);
402} 807}
403 808
@@ -456,6 +861,90 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
456 } 861 }
457} 862}
458 863
864static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
865{
866 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
867
868 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
869 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
870 MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
871}
872
873static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
874{
875 /* re-configure the alias-guid and mcg's */
876 if (mlx4_is_master(dev->dev)) {
877 mlx4_ib_invalidate_all_guid_record(dev, port_num);
878
879 if (!dev->sriov.is_going_down) {
880 mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
881 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
882 MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
883 }
884 }
885 mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
886}
887
888static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
889 struct mlx4_eqe *eqe)
890{
891 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
892 GET_MASK_FROM_EQE(eqe));
893}
894
895static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
896 u32 guid_tbl_blk_num, u32 change_bitmap)
897{
898 struct ib_smp *in_mad = NULL;
899 struct ib_smp *out_mad = NULL;
900 u16 i;
901
902 if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
903 return;
904
905 in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
906 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
907 if (!in_mad || !out_mad) {
908 mlx4_ib_warn(&dev->ib_dev, "failed to allocate memory for guid info mads\n");
909 goto out;
910 }
911
912 guid_tbl_blk_num *= 4;
913
914 for (i = 0; i < 4; i++) {
915 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
916 continue;
917 memset(in_mad, 0, sizeof *in_mad);
918 memset(out_mad, 0, sizeof *out_mad);
919
920 in_mad->base_version = 1;
921 in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
922 in_mad->class_version = 1;
923 in_mad->method = IB_MGMT_METHOD_GET;
924 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
925 in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
926
927 if (mlx4_MAD_IFC(dev,
928 MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
929 port_num, NULL, NULL, in_mad, out_mad)) {
930 mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
931 goto out;
932 }
933
934 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
935 port_num,
936 (u8 *)(&((struct ib_smp *)out_mad)->data));
937 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
938 port_num,
939 (u8 *)(&((struct ib_smp *)out_mad)->data));
940 }
941
942out:
943 kfree(in_mad);
944 kfree(out_mad);
945 return;
946}
947
459void handle_port_mgmt_change_event(struct work_struct *work) 948void handle_port_mgmt_change_event(struct work_struct *work)
460{ 949{
461 struct ib_event_work *ew = container_of(work, struct ib_event_work, work); 950 struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
@@ -463,6 +952,8 @@ void handle_port_mgmt_change_event(struct work_struct *work)
463 struct mlx4_eqe *eqe = &(ew->ib_eqe); 952 struct mlx4_eqe *eqe = &(ew->ib_eqe);
464 u8 port = eqe->event.port_mgmt_change.port; 953 u8 port = eqe->event.port_mgmt_change.port;
465 u32 changed_attr; 954 u32 changed_attr;
955 u32 tbl_block;
956 u32 change_bitmap;
466 957
467 switch (eqe->subtype) { 958 switch (eqe->subtype) {
468 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO: 959 case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
@@ -478,24 +969,36 @@ void handle_port_mgmt_change_event(struct work_struct *work)
478 969
479 /* Check if it is a lid change event */ 970 /* Check if it is a lid change event */
480 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK) 971 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
481 mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE); 972 handle_lid_change_event(dev, port);
482 973
483 /* Generate GUID changed event */ 974 /* Generate GUID changed event */
484 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) 975 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
485 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 976 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
977 /*if master, notify all slaves*/
978 if (mlx4_is_master(dev->dev))
979 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
980 MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
981 }
486 982
487 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK) 983 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
488 mlx4_ib_dispatch_event(dev, port, 984 handle_client_rereg_event(dev, port);
489 IB_EVENT_CLIENT_REREGISTER);
490 break; 985 break;
491 986
492 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE: 987 case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
493 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE); 988 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
989 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
990 propagate_pkey_ev(dev, port, eqe);
494 break; 991 break;
495 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO: 992 case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
496 /* paravirtualized master's guid is guid 0 -- does not change */ 993 /* paravirtualized master's guid is guid 0 -- does not change */
497 if (!mlx4_is_master(dev->dev)) 994 if (!mlx4_is_master(dev->dev))
498 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE); 995 mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
996 /*if master, notify relevant slaves*/
997 else if (!dev->sriov.is_going_down) {
998 tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
999 change_bitmap = GET_MASK_FROM_EQE(eqe);
1000 handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1001 }
499 break; 1002 break;
500 default: 1003 default:
501 pr_warn("Unsupported subtype 0x%x for " 1004 pr_warn("Unsupported subtype 0x%x for "
@@ -516,3 +1019,1035 @@ void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
516 1019
517 ib_dispatch_event(&event); 1020 ib_dispatch_event(&event);
518} 1021}
1022
1023static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1024{
1025 unsigned long flags;
1026 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1027 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1028 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1029 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1030 queue_work(ctx->wq, &ctx->work);
1031 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1032}
1033
1034static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1035 struct mlx4_ib_demux_pv_qp *tun_qp,
1036 int index)
1037{
1038 struct ib_sge sg_list;
1039 struct ib_recv_wr recv_wr, *bad_recv_wr;
1040 int size;
1041
1042 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1043 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1044
1045 sg_list.addr = tun_qp->ring[index].map;
1046 sg_list.length = size;
1047 sg_list.lkey = ctx->mr->lkey;
1048
1049 recv_wr.next = NULL;
1050 recv_wr.sg_list = &sg_list;
1051 recv_wr.num_sge = 1;
1052 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1053 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1054 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1055 size, DMA_FROM_DEVICE);
1056 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1057}
1058
1059static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1060 int slave, struct ib_sa_mad *sa_mad)
1061{
1062 int ret = 0;
1063
1064 /* dispatch to different sa handlers */
1065 switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1066 case IB_SA_ATTR_MC_MEMBER_REC:
1067 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1068 break;
1069 default:
1070 break;
1071 }
1072 return ret;
1073}
1074
1075static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1076{
1077 int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1078
1079 return (qpn >= proxy_start && qpn <= proxy_start + 1);
1080}
1081
1082
1083int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1084 enum ib_qp_type dest_qpt, u16 pkey_index, u32 remote_qpn,
1085 u32 qkey, struct ib_ah_attr *attr, struct ib_mad *mad)
1086{
1087 struct ib_sge list;
1088 struct ib_send_wr wr, *bad_wr;
1089 struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1090 struct mlx4_ib_demux_pv_qp *sqp;
1091 struct mlx4_mad_snd_buf *sqp_mad;
1092 struct ib_ah *ah;
1093 struct ib_qp *send_qp = NULL;
1094 unsigned wire_tx_ix = 0;
1095 int ret = 0;
1096 u16 wire_pkey_ix;
1097 int src_qpnum;
1098 u8 sgid_index;
1099
1100
1101 sqp_ctx = dev->sriov.sqps[port-1];
1102
1103 /* check if proxy qp created */
1104 if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1105 return -EAGAIN;
1106
1107 /* QP0 forwarding only for Dom0 */
1108 if (dest_qpt == IB_QPT_SMI && (mlx4_master_func_num(dev->dev) != slave))
1109 return -EINVAL;
1110
1111 if (dest_qpt == IB_QPT_SMI) {
1112 src_qpnum = 0;
1113 sqp = &sqp_ctx->qp[0];
1114 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1115 } else {
1116 src_qpnum = 1;
1117 sqp = &sqp_ctx->qp[1];
1118 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1119 }
1120
1121 send_qp = sqp->qp;
1122
1123 /* create ah */
1124 sgid_index = attr->grh.sgid_index;
1125 attr->grh.sgid_index = 0;
1126 ah = ib_create_ah(sqp_ctx->pd, attr);
1127 if (IS_ERR(ah))
1128 return -ENOMEM;
1129 attr->grh.sgid_index = sgid_index;
1130 to_mah(ah)->av.ib.gid_index = sgid_index;
1131 /* get rid of force-loopback bit */
1132 to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1133 spin_lock(&sqp->tx_lock);
1134 if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1135 (MLX4_NUM_TUNNEL_BUFS - 1))
1136 ret = -EAGAIN;
1137 else
1138 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1139 spin_unlock(&sqp->tx_lock);
1140 if (ret)
1141 goto out;
1142
1143 sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1144 if (sqp->tx_ring[wire_tx_ix].ah)
1145 ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1146 sqp->tx_ring[wire_tx_ix].ah = ah;
1147 ib_dma_sync_single_for_cpu(&dev->ib_dev,
1148 sqp->tx_ring[wire_tx_ix].buf.map,
1149 sizeof (struct mlx4_mad_snd_buf),
1150 DMA_TO_DEVICE);
1151
1152 memcpy(&sqp_mad->payload, mad, sizeof *mad);
1153
1154 ib_dma_sync_single_for_device(&dev->ib_dev,
1155 sqp->tx_ring[wire_tx_ix].buf.map,
1156 sizeof (struct mlx4_mad_snd_buf),
1157 DMA_TO_DEVICE);
1158
1159 list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1160 list.length = sizeof (struct mlx4_mad_snd_buf);
1161 list.lkey = sqp_ctx->mr->lkey;
1162
1163 wr.wr.ud.ah = ah;
1164 wr.wr.ud.port_num = port;
1165 wr.wr.ud.pkey_index = wire_pkey_ix;
1166 wr.wr.ud.remote_qkey = qkey;
1167 wr.wr.ud.remote_qpn = remote_qpn;
1168 wr.next = NULL;
1169 wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1170 wr.sg_list = &list;
1171 wr.num_sge = 1;
1172 wr.opcode = IB_WR_SEND;
1173 wr.send_flags = IB_SEND_SIGNALED;
1174
1175 ret = ib_post_send(send_qp, &wr, &bad_wr);
1176out:
1177 if (ret)
1178 ib_destroy_ah(ah);
1179 return ret;
1180}
1181
1182static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1183{
1184 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1185 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1186 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1187 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1188 struct mlx4_ib_ah ah;
1189 struct ib_ah_attr ah_attr;
1190 u8 *slave_id;
1191 int slave;
1192
1193 /* Get slave that sent this packet */
1194 if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1195 wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1196 (wc->src_qp & 0x1) != ctx->port - 1 ||
1197 wc->src_qp & 0x4) {
1198 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1199 return;
1200 }
1201 slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1202 if (slave != ctx->slave) {
1203 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1204 "belongs to another slave\n", wc->src_qp);
1205 return;
1206 }
1207 if (slave != mlx4_master_func_num(dev->dev) && !(wc->src_qp & 0x2)) {
1208 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1209 "non-master trying to send QP0 packets\n", wc->src_qp);
1210 return;
1211 }
1212
1213 /* Map transaction ID */
1214 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1215 sizeof (struct mlx4_tunnel_mad),
1216 DMA_FROM_DEVICE);
1217 switch (tunnel->mad.mad_hdr.method) {
1218 case IB_MGMT_METHOD_SET:
1219 case IB_MGMT_METHOD_GET:
1220 case IB_MGMT_METHOD_REPORT:
1221 case IB_SA_METHOD_GET_TABLE:
1222 case IB_SA_METHOD_DELETE:
1223 case IB_SA_METHOD_GET_MULTI:
1224 case IB_SA_METHOD_GET_TRACE_TBL:
1225 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1226 if (*slave_id) {
1227 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1228 "class:%d slave:%d\n", *slave_id,
1229 tunnel->mad.mad_hdr.mgmt_class, slave);
1230 return;
1231 } else
1232 *slave_id = slave;
1233 default:
1234 /* nothing */;
1235 }
1236
1237 /* Class-specific handling */
1238 switch (tunnel->mad.mad_hdr.mgmt_class) {
1239 case IB_MGMT_CLASS_SUBN_ADM:
1240 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1241 (struct ib_sa_mad *) &tunnel->mad))
1242 return;
1243 break;
1244 case IB_MGMT_CLASS_CM:
1245 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1246 (struct ib_mad *) &tunnel->mad))
1247 return;
1248 break;
1249 case IB_MGMT_CLASS_DEVICE_MGMT:
1250 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1251 tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1252 return;
1253 break;
1254 default:
1255 /* Drop unsupported classes for slaves in tunnel mode */
1256 if (slave != mlx4_master_func_num(dev->dev)) {
1257 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1258 "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1259 return;
1260 }
1261 }
1262
1263 /* We are using standard ib_core services to send the mad, so generate a
1264 * stadard address handle by decoding the tunnelled mlx4_ah fields */
1265 memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1266 ah.ibah.device = ctx->ib_dev;
1267 mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1268 if ((ah_attr.ah_flags & IB_AH_GRH) &&
1269 (ah_attr.grh.sgid_index != slave)) {
1270 mlx4_ib_warn(ctx->ib_dev, "slave:%d accessed invalid sgid_index:%d\n",
1271 slave, ah_attr.grh.sgid_index);
1272 return;
1273 }
1274
1275 mlx4_ib_send_to_wire(dev, slave, ctx->port,
1276 is_proxy_qp0(dev, wc->src_qp, slave) ?
1277 IB_QPT_SMI : IB_QPT_GSI,
1278 be16_to_cpu(tunnel->hdr.pkey_index),
1279 be32_to_cpu(tunnel->hdr.remote_qpn),
1280 be32_to_cpu(tunnel->hdr.qkey),
1281 &ah_attr, &tunnel->mad);
1282}
1283
1284static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1285 enum ib_qp_type qp_type, int is_tun)
1286{
1287 int i;
1288 struct mlx4_ib_demux_pv_qp *tun_qp;
1289 int rx_buf_size, tx_buf_size;
1290
1291 if (qp_type > IB_QPT_GSI)
1292 return -EINVAL;
1293
1294 tun_qp = &ctx->qp[qp_type];
1295
1296 tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1297 GFP_KERNEL);
1298 if (!tun_qp->ring)
1299 return -ENOMEM;
1300
1301 tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1302 sizeof (struct mlx4_ib_tun_tx_buf),
1303 GFP_KERNEL);
1304 if (!tun_qp->tx_ring) {
1305 kfree(tun_qp->ring);
1306 tun_qp->ring = NULL;
1307 return -ENOMEM;
1308 }
1309
1310 if (is_tun) {
1311 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1312 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1313 } else {
1314 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1315 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1316 }
1317
1318 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1319 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1320 if (!tun_qp->ring[i].addr)
1321 goto err;
1322 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1323 tun_qp->ring[i].addr,
1324 rx_buf_size,
1325 DMA_FROM_DEVICE);
1326 }
1327
1328 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1329 tun_qp->tx_ring[i].buf.addr =
1330 kmalloc(tx_buf_size, GFP_KERNEL);
1331 if (!tun_qp->tx_ring[i].buf.addr)
1332 goto tx_err;
1333 tun_qp->tx_ring[i].buf.map =
1334 ib_dma_map_single(ctx->ib_dev,
1335 tun_qp->tx_ring[i].buf.addr,
1336 tx_buf_size,
1337 DMA_TO_DEVICE);
1338 tun_qp->tx_ring[i].ah = NULL;
1339 }
1340 spin_lock_init(&tun_qp->tx_lock);
1341 tun_qp->tx_ix_head = 0;
1342 tun_qp->tx_ix_tail = 0;
1343 tun_qp->proxy_qpt = qp_type;
1344
1345 return 0;
1346
1347tx_err:
1348 while (i > 0) {
1349 --i;
1350 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1351 tx_buf_size, DMA_TO_DEVICE);
1352 kfree(tun_qp->tx_ring[i].buf.addr);
1353 }
1354 kfree(tun_qp->tx_ring);
1355 tun_qp->tx_ring = NULL;
1356 i = MLX4_NUM_TUNNEL_BUFS;
1357err:
1358 while (i > 0) {
1359 --i;
1360 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1361 rx_buf_size, DMA_FROM_DEVICE);
1362 kfree(tun_qp->ring[i].addr);
1363 }
1364 kfree(tun_qp->ring);
1365 tun_qp->ring = NULL;
1366 return -ENOMEM;
1367}
1368
1369static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1370 enum ib_qp_type qp_type, int is_tun)
1371{
1372 int i;
1373 struct mlx4_ib_demux_pv_qp *tun_qp;
1374 int rx_buf_size, tx_buf_size;
1375
1376 if (qp_type > IB_QPT_GSI)
1377 return;
1378
1379 tun_qp = &ctx->qp[qp_type];
1380 if (is_tun) {
1381 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1382 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1383 } else {
1384 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1385 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1386 }
1387
1388
1389 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1390 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1391 rx_buf_size, DMA_FROM_DEVICE);
1392 kfree(tun_qp->ring[i].addr);
1393 }
1394
1395 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1396 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1397 tx_buf_size, DMA_TO_DEVICE);
1398 kfree(tun_qp->tx_ring[i].buf.addr);
1399 if (tun_qp->tx_ring[i].ah)
1400 ib_destroy_ah(tun_qp->tx_ring[i].ah);
1401 }
1402 kfree(tun_qp->tx_ring);
1403 kfree(tun_qp->ring);
1404}
1405
1406static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1407{
1408 struct mlx4_ib_demux_pv_ctx *ctx;
1409 struct mlx4_ib_demux_pv_qp *tun_qp;
1410 struct ib_wc wc;
1411 int ret;
1412 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1413 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1414
1415 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1416 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1417 if (wc.status == IB_WC_SUCCESS) {
1418 switch (wc.opcode) {
1419 case IB_WC_RECV:
1420 mlx4_ib_multiplex_mad(ctx, &wc);
1421 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1422 wc.wr_id &
1423 (MLX4_NUM_TUNNEL_BUFS - 1));
1424 if (ret)
1425 pr_err("Failed reposting tunnel "
1426 "buf:%lld\n", wc.wr_id);
1427 break;
1428 case IB_WC_SEND:
1429 pr_debug("received tunnel send completion:"
1430 "wrid=0x%llx, status=0x%x\n",
1431 wc.wr_id, wc.status);
1432 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1433 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1434 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1435 = NULL;
1436 spin_lock(&tun_qp->tx_lock);
1437 tun_qp->tx_ix_tail++;
1438 spin_unlock(&tun_qp->tx_lock);
1439
1440 break;
1441 default:
1442 break;
1443 }
1444 } else {
1445 pr_debug("mlx4_ib: completion error in tunnel: %d."
1446 " status = %d, wrid = 0x%llx\n",
1447 ctx->slave, wc.status, wc.wr_id);
1448 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1449 ib_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1450 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1451 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1452 = NULL;
1453 spin_lock(&tun_qp->tx_lock);
1454 tun_qp->tx_ix_tail++;
1455 spin_unlock(&tun_qp->tx_lock);
1456 }
1457 }
1458 }
1459}
1460
1461static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1462{
1463 struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1464
1465 /* It's worse than that! He's dead, Jim! */
1466 pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1467 event->event, sqp->port);
1468}
1469
1470static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1471 enum ib_qp_type qp_type, int create_tun)
1472{
1473 int i, ret;
1474 struct mlx4_ib_demux_pv_qp *tun_qp;
1475 struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1476 struct ib_qp_attr attr;
1477 int qp_attr_mask_INIT;
1478
1479 if (qp_type > IB_QPT_GSI)
1480 return -EINVAL;
1481
1482 tun_qp = &ctx->qp[qp_type];
1483
1484 memset(&qp_init_attr, 0, sizeof qp_init_attr);
1485 qp_init_attr.init_attr.send_cq = ctx->cq;
1486 qp_init_attr.init_attr.recv_cq = ctx->cq;
1487 qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1488 qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1489 qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1490 qp_init_attr.init_attr.cap.max_send_sge = 1;
1491 qp_init_attr.init_attr.cap.max_recv_sge = 1;
1492 if (create_tun) {
1493 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1494 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1495 qp_init_attr.port = ctx->port;
1496 qp_init_attr.slave = ctx->slave;
1497 qp_init_attr.proxy_qp_type = qp_type;
1498 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1499 IB_QP_QKEY | IB_QP_PORT;
1500 } else {
1501 qp_init_attr.init_attr.qp_type = qp_type;
1502 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1503 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1504 }
1505 qp_init_attr.init_attr.port_num = ctx->port;
1506 qp_init_attr.init_attr.qp_context = ctx;
1507 qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1508 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1509 if (IS_ERR(tun_qp->qp)) {
1510 ret = PTR_ERR(tun_qp->qp);
1511 tun_qp->qp = NULL;
1512 pr_err("Couldn't create %s QP (%d)\n",
1513 create_tun ? "tunnel" : "special", ret);
1514 return ret;
1515 }
1516
1517 memset(&attr, 0, sizeof attr);
1518 attr.qp_state = IB_QPS_INIT;
1519 attr.pkey_index =
1520 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1521 attr.qkey = IB_QP1_QKEY;
1522 attr.port_num = ctx->port;
1523 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1524 if (ret) {
1525 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1526 create_tun ? "tunnel" : "special", ret);
1527 goto err_qp;
1528 }
1529 attr.qp_state = IB_QPS_RTR;
1530 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1531 if (ret) {
1532 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1533 create_tun ? "tunnel" : "special", ret);
1534 goto err_qp;
1535 }
1536 attr.qp_state = IB_QPS_RTS;
1537 attr.sq_psn = 0;
1538 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1539 if (ret) {
1540 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1541 create_tun ? "tunnel" : "special", ret);
1542 goto err_qp;
1543 }
1544
1545 for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1546 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1547 if (ret) {
1548 pr_err(" mlx4_ib_post_pv_buf error"
1549 " (err = %d, i = %d)\n", ret, i);
1550 goto err_qp;
1551 }
1552 }
1553 return 0;
1554
1555err_qp:
1556 ib_destroy_qp(tun_qp->qp);
1557 tun_qp->qp = NULL;
1558 return ret;
1559}
1560
1561/*
1562 * IB MAD completion callback for real SQPs
1563 */
1564static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1565{
1566 struct mlx4_ib_demux_pv_ctx *ctx;
1567 struct mlx4_ib_demux_pv_qp *sqp;
1568 struct ib_wc wc;
1569 struct ib_grh *grh;
1570 struct ib_mad *mad;
1571
1572 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1573 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1574
1575 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1576 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1577 if (wc.status == IB_WC_SUCCESS) {
1578 switch (wc.opcode) {
1579 case IB_WC_SEND:
1580 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1581 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1582 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1583 = NULL;
1584 spin_lock(&sqp->tx_lock);
1585 sqp->tx_ix_tail++;
1586 spin_unlock(&sqp->tx_lock);
1587 break;
1588 case IB_WC_RECV:
1589 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1590 (sqp->ring[wc.wr_id &
1591 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1592 grh = &(((struct mlx4_mad_rcv_buf *)
1593 (sqp->ring[wc.wr_id &
1594 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1595 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1596 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1597 (MLX4_NUM_TUNNEL_BUFS - 1)))
1598 pr_err("Failed reposting SQP "
1599 "buf:%lld\n", wc.wr_id);
1600 break;
1601 default:
1602 BUG_ON(1);
1603 break;
1604 }
1605 } else {
1606 pr_debug("mlx4_ib: completion error in tunnel: %d."
1607 " status = %d, wrid = 0x%llx\n",
1608 ctx->slave, wc.status, wc.wr_id);
1609 if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1610 ib_destroy_ah(sqp->tx_ring[wc.wr_id &
1611 (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1612 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1613 = NULL;
1614 spin_lock(&sqp->tx_lock);
1615 sqp->tx_ix_tail++;
1616 spin_unlock(&sqp->tx_lock);
1617 }
1618 }
1619 }
1620}
1621
1622static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1623 struct mlx4_ib_demux_pv_ctx **ret_ctx)
1624{
1625 struct mlx4_ib_demux_pv_ctx *ctx;
1626
1627 *ret_ctx = NULL;
1628 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1629 if (!ctx) {
1630 pr_err("failed allocating pv resource context "
1631 "for port %d, slave %d\n", port, slave);
1632 return -ENOMEM;
1633 }
1634
1635 ctx->ib_dev = &dev->ib_dev;
1636 ctx->port = port;
1637 ctx->slave = slave;
1638 *ret_ctx = ctx;
1639 return 0;
1640}
1641
1642static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1643{
1644 if (dev->sriov.demux[port - 1].tun[slave]) {
1645 kfree(dev->sriov.demux[port - 1].tun[slave]);
1646 dev->sriov.demux[port - 1].tun[slave] = NULL;
1647 }
1648}
1649
1650static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1651 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1652{
1653 int ret, cq_size;
1654
1655 if (ctx->state != DEMUX_PV_STATE_DOWN)
1656 return -EEXIST;
1657
1658 ctx->state = DEMUX_PV_STATE_STARTING;
1659 /* have QP0 only on port owner, and only if link layer is IB */
1660 if (ctx->slave == mlx4_master_func_num(to_mdev(ctx->ib_dev)->dev) &&
1661 rdma_port_get_link_layer(ibdev, ctx->port) == IB_LINK_LAYER_INFINIBAND)
1662 ctx->has_smi = 1;
1663
1664 if (ctx->has_smi) {
1665 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1666 if (ret) {
1667 pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1668 goto err_out;
1669 }
1670 }
1671
1672 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
1673 if (ret) {
1674 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
1675 goto err_out_qp0;
1676 }
1677
1678 cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
1679 if (ctx->has_smi)
1680 cq_size *= 2;
1681
1682 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1683 NULL, ctx, cq_size, 0);
1684 if (IS_ERR(ctx->cq)) {
1685 ret = PTR_ERR(ctx->cq);
1686 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
1687 goto err_buf;
1688 }
1689
1690 ctx->pd = ib_alloc_pd(ctx->ib_dev);
1691 if (IS_ERR(ctx->pd)) {
1692 ret = PTR_ERR(ctx->pd);
1693 pr_err("Couldn't create tunnel PD (%d)\n", ret);
1694 goto err_cq;
1695 }
1696
1697 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE);
1698 if (IS_ERR(ctx->mr)) {
1699 ret = PTR_ERR(ctx->mr);
1700 pr_err("Couldn't get tunnel DMA MR (%d)\n", ret);
1701 goto err_pd;
1702 }
1703
1704 if (ctx->has_smi) {
1705 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
1706 if (ret) {
1707 pr_err("Couldn't create %s QP0 (%d)\n",
1708 create_tun ? "tunnel for" : "", ret);
1709 goto err_mr;
1710 }
1711 }
1712
1713 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
1714 if (ret) {
1715 pr_err("Couldn't create %s QP1 (%d)\n",
1716 create_tun ? "tunnel for" : "", ret);
1717 goto err_qp0;
1718 }
1719
1720 if (create_tun)
1721 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
1722 else
1723 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
1724
1725 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
1726
1727 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1728 if (ret) {
1729 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
1730 goto err_wq;
1731 }
1732 ctx->state = DEMUX_PV_STATE_ACTIVE;
1733 return 0;
1734
1735err_wq:
1736 ctx->wq = NULL;
1737 ib_destroy_qp(ctx->qp[1].qp);
1738 ctx->qp[1].qp = NULL;
1739
1740
1741err_qp0:
1742 if (ctx->has_smi)
1743 ib_destroy_qp(ctx->qp[0].qp);
1744 ctx->qp[0].qp = NULL;
1745
1746err_mr:
1747 ib_dereg_mr(ctx->mr);
1748 ctx->mr = NULL;
1749
1750err_pd:
1751 ib_dealloc_pd(ctx->pd);
1752 ctx->pd = NULL;
1753
1754err_cq:
1755 ib_destroy_cq(ctx->cq);
1756 ctx->cq = NULL;
1757
1758err_buf:
1759 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
1760
1761err_out_qp0:
1762 if (ctx->has_smi)
1763 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
1764err_out:
1765 ctx->state = DEMUX_PV_STATE_DOWN;
1766 return ret;
1767}
1768
1769static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
1770 struct mlx4_ib_demux_pv_ctx *ctx, int flush)
1771{
1772 if (!ctx)
1773 return;
1774 if (ctx->state > DEMUX_PV_STATE_DOWN) {
1775 ctx->state = DEMUX_PV_STATE_DOWNING;
1776 if (flush)
1777 flush_workqueue(ctx->wq);
1778 if (ctx->has_smi) {
1779 ib_destroy_qp(ctx->qp[0].qp);
1780 ctx->qp[0].qp = NULL;
1781 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
1782 }
1783 ib_destroy_qp(ctx->qp[1].qp);
1784 ctx->qp[1].qp = NULL;
1785 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
1786 ib_dereg_mr(ctx->mr);
1787 ctx->mr = NULL;
1788 ib_dealloc_pd(ctx->pd);
1789 ctx->pd = NULL;
1790 ib_destroy_cq(ctx->cq);
1791 ctx->cq = NULL;
1792 ctx->state = DEMUX_PV_STATE_DOWN;
1793 }
1794}
1795
1796static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
1797 int port, int do_init)
1798{
1799 int ret = 0;
1800
1801 if (!do_init) {
1802 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
1803 /* for master, destroy real sqp resources */
1804 if (slave == mlx4_master_func_num(dev->dev))
1805 destroy_pv_resources(dev, slave, port,
1806 dev->sriov.sqps[port - 1], 1);
1807 /* destroy the tunnel qp resources */
1808 destroy_pv_resources(dev, slave, port,
1809 dev->sriov.demux[port - 1].tun[slave], 1);
1810 return 0;
1811 }
1812
1813 /* create the tunnel qp resources */
1814 ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
1815 dev->sriov.demux[port - 1].tun[slave]);
1816
1817 /* for master, create the real sqp resources */
1818 if (!ret && slave == mlx4_master_func_num(dev->dev))
1819 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
1820 dev->sriov.sqps[port - 1]);
1821 return ret;
1822}
1823
1824void mlx4_ib_tunnels_update_work(struct work_struct *work)
1825{
1826 struct mlx4_ib_demux_work *dmxw;
1827
1828 dmxw = container_of(work, struct mlx4_ib_demux_work, work);
1829 mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
1830 dmxw->do_init);
1831 kfree(dmxw);
1832 return;
1833}
1834
1835static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
1836 struct mlx4_ib_demux_ctx *ctx,
1837 int port)
1838{
1839 char name[12];
1840 int ret = 0;
1841 int i;
1842
1843 ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
1844 sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
1845 if (!ctx->tun)
1846 return -ENOMEM;
1847
1848 ctx->dev = dev;
1849 ctx->port = port;
1850 ctx->ib_dev = &dev->ib_dev;
1851
1852 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1853 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
1854 if (ret) {
1855 ret = -ENOMEM;
1856 goto err_mcg;
1857 }
1858 }
1859
1860 ret = mlx4_ib_mcg_port_init(ctx);
1861 if (ret) {
1862 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
1863 goto err_mcg;
1864 }
1865
1866 snprintf(name, sizeof name, "mlx4_ibt%d", port);
1867 ctx->wq = create_singlethread_workqueue(name);
1868 if (!ctx->wq) {
1869 pr_err("Failed to create tunnelling WQ for port %d\n", port);
1870 ret = -ENOMEM;
1871 goto err_wq;
1872 }
1873
1874 snprintf(name, sizeof name, "mlx4_ibud%d", port);
1875 ctx->ud_wq = create_singlethread_workqueue(name);
1876 if (!ctx->ud_wq) {
1877 pr_err("Failed to create up/down WQ for port %d\n", port);
1878 ret = -ENOMEM;
1879 goto err_udwq;
1880 }
1881
1882 return 0;
1883
1884err_udwq:
1885 destroy_workqueue(ctx->wq);
1886 ctx->wq = NULL;
1887
1888err_wq:
1889 mlx4_ib_mcg_port_cleanup(ctx, 1);
1890err_mcg:
1891 for (i = 0; i < dev->dev->caps.sqp_demux; i++)
1892 free_pv_object(dev, i, port);
1893 kfree(ctx->tun);
1894 ctx->tun = NULL;
1895 return ret;
1896}
1897
1898static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
1899{
1900 if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
1901 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
1902 flush_workqueue(sqp_ctx->wq);
1903 if (sqp_ctx->has_smi) {
1904 ib_destroy_qp(sqp_ctx->qp[0].qp);
1905 sqp_ctx->qp[0].qp = NULL;
1906 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
1907 }
1908 ib_destroy_qp(sqp_ctx->qp[1].qp);
1909 sqp_ctx->qp[1].qp = NULL;
1910 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
1911 ib_dereg_mr(sqp_ctx->mr);
1912 sqp_ctx->mr = NULL;
1913 ib_dealloc_pd(sqp_ctx->pd);
1914 sqp_ctx->pd = NULL;
1915 ib_destroy_cq(sqp_ctx->cq);
1916 sqp_ctx->cq = NULL;
1917 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
1918 }
1919}
1920
1921static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
1922{
1923 int i;
1924 if (ctx) {
1925 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1926 mlx4_ib_mcg_port_cleanup(ctx, 1);
1927 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1928 if (!ctx->tun[i])
1929 continue;
1930 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
1931 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
1932 }
1933 flush_workqueue(ctx->wq);
1934 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1935 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
1936 free_pv_object(dev, i, ctx->port);
1937 }
1938 kfree(ctx->tun);
1939 destroy_workqueue(ctx->ud_wq);
1940 destroy_workqueue(ctx->wq);
1941 }
1942}
1943
1944static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
1945{
1946 int i;
1947
1948 if (!mlx4_is_master(dev->dev))
1949 return;
1950 /* initialize or tear down tunnel QPs for the master */
1951 for (i = 0; i < dev->dev->caps.num_ports; i++)
1952 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
1953 return;
1954}
1955
1956int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
1957{
1958 int i = 0;
1959 int err;
1960
1961 if (!mlx4_is_mfunc(dev->dev))
1962 return 0;
1963
1964 dev->sriov.is_going_down = 0;
1965 spin_lock_init(&dev->sriov.going_down_lock);
1966 mlx4_ib_cm_paravirt_init(dev);
1967
1968 mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
1969
1970 if (mlx4_is_slave(dev->dev)) {
1971 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
1972 return 0;
1973 }
1974
1975 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
1976 if (i == mlx4_master_func_num(dev->dev))
1977 mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
1978 else
1979 mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
1980 }
1981
1982 err = mlx4_ib_init_alias_guid_service(dev);
1983 if (err) {
1984 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
1985 goto paravirt_err;
1986 }
1987 err = mlx4_ib_device_register_sysfs(dev);
1988 if (err) {
1989 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
1990 goto sysfs_err;
1991 }
1992
1993 mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
1994 dev->dev->caps.sqp_demux);
1995 for (i = 0; i < dev->num_ports; i++) {
1996 union ib_gid gid;
1997 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
1998 if (err)
1999 goto demux_err;
2000 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2001 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2002 &dev->sriov.sqps[i]);
2003 if (err)
2004 goto demux_err;
2005 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2006 if (err)
2007 goto demux_err;
2008 }
2009 mlx4_ib_master_tunnels(dev, 1);
2010 return 0;
2011
2012demux_err:
2013 while (i > 0) {
2014 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2015 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2016 --i;
2017 }
2018 mlx4_ib_device_unregister_sysfs(dev);
2019
2020sysfs_err:
2021 mlx4_ib_destroy_alias_guid_service(dev);
2022
2023paravirt_err:
2024 mlx4_ib_cm_paravirt_clean(dev, -1);
2025
2026 return err;
2027}
2028
2029void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2030{
2031 int i;
2032 unsigned long flags;
2033
2034 if (!mlx4_is_mfunc(dev->dev))
2035 return;
2036
2037 spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2038 dev->sriov.is_going_down = 1;
2039 spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2040 if (mlx4_is_master(dev->dev)) {
2041 for (i = 0; i < dev->num_ports; i++) {
2042 flush_workqueue(dev->sriov.demux[i].ud_wq);
2043 mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2044 kfree(dev->sriov.sqps[i]);
2045 dev->sriov.sqps[i] = NULL;
2046 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2047 }
2048
2049 mlx4_ib_cm_paravirt_clean(dev, -1);
2050 mlx4_ib_destroy_alias_guid_service(dev);
2051 mlx4_ib_device_unregister_sysfs(dev);
2052 }
2053}