aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaggai Eran <haggaie@mellanox.com>2016-02-29 08:45:08 -0500
committerDoug Ledford <dledford@redhat.com>2016-03-01 11:04:07 -0500
commit25361e02c44873a17e0148d9d5c42fa2e938a019 (patch)
treeb727559092760dfd37cb4bac47583e28508c3131
parent7722f47e71e58592a2ba4437d27c802ba1c64e08 (diff)
IB/mlx5: Generate completions in software
The GSI QP emulation requires also emulating completions for transmitted MADs. The CQ on which these completions are generated can also be used by the hardware, and the MAD layer is free to use any CQ of the device for the GSI QP. Add a method for generating software completions to each mlx5 CQ. Software completions are polled first, and generate calls to the completion handler callback if necessary. Reviewed-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c83
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h10
2 files changed, 88 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5ece9a89f7c2..2a9ad8401750 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -568,18 +568,44 @@ repoll:
568 return 0; 568 return 0;
569} 569}
570 570
571static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
572 struct ib_wc *wc)
573{
574 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
575 struct mlx5_ib_wc *soft_wc, *next;
576 int npolled = 0;
577
578 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
579 if (npolled >= num_entries)
580 break;
581
582 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
583 cq->mcq.cqn);
584
585 wc[npolled++] = soft_wc->wc;
586 list_del(&soft_wc->list);
587 kfree(soft_wc);
588 }
589
590 return npolled;
591}
592
571int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 593int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
572{ 594{
573 struct mlx5_ib_cq *cq = to_mcq(ibcq); 595 struct mlx5_ib_cq *cq = to_mcq(ibcq);
574 struct mlx5_ib_qp *cur_qp = NULL; 596 struct mlx5_ib_qp *cur_qp = NULL;
575 unsigned long flags; 597 unsigned long flags;
598 int soft_polled = 0;
576 int npolled; 599 int npolled;
577 int err = 0; 600 int err = 0;
578 601
579 spin_lock_irqsave(&cq->lock, flags); 602 spin_lock_irqsave(&cq->lock, flags);
580 603
581 for (npolled = 0; npolled < num_entries; npolled++) { 604 if (unlikely(!list_empty(&cq->wc_list)))
582 err = mlx5_poll_one(cq, &cur_qp, wc + npolled); 605 soft_polled = poll_soft_wc(cq, num_entries, wc);
606
607 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
608 err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
583 if (err) 609 if (err)
584 break; 610 break;
585 } 611 }
@@ -590,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
590 spin_unlock_irqrestore(&cq->lock, flags); 616 spin_unlock_irqrestore(&cq->lock, flags);
591 617
592 if (err == 0 || err == -EAGAIN) 618 if (err == 0 || err == -EAGAIN)
593 return npolled; 619 return soft_polled + npolled;
594 else 620 else
595 return err; 621 return err;
596} 622}
@@ -598,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
598int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 624int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
599{ 625{
600 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; 626 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
627 struct mlx5_ib_cq *cq = to_mcq(ibcq);
601 void __iomem *uar_page = mdev->priv.uuari.uars[0].map; 628 void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
629 unsigned long irq_flags;
630 int ret = 0;
631
632 spin_lock_irqsave(&cq->lock, irq_flags);
633 if (cq->notify_flags != IB_CQ_NEXT_COMP)
634 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
602 635
603 mlx5_cq_arm(&to_mcq(ibcq)->mcq, 636 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
637 ret = 1;
638 spin_unlock_irqrestore(&cq->lock, irq_flags);
639
640 mlx5_cq_arm(&cq->mcq,
604 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 641 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
605 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 642 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
606 uar_page, 643 uar_page,
607 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), 644 MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
608 to_mcq(ibcq)->mcq.cons_index); 645 to_mcq(ibcq)->mcq.cons_index);
609 646
610 return 0; 647 return ret;
611} 648}
612 649
613static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 650static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@@ -760,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
760 mlx5_db_free(dev->mdev, &cq->db); 797 mlx5_db_free(dev->mdev, &cq->db);
761} 798}
762 799
800static void notify_soft_wc_handler(struct work_struct *work)
801{
802 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
803 notify_work);
804
805 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
806}
807
763struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, 808struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
764 const struct ib_cq_init_attr *attr, 809 const struct ib_cq_init_attr *attr,
765 struct ib_ucontext *context, 810 struct ib_ucontext *context,
@@ -810,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
810 &index, &inlen); 855 &index, &inlen);
811 if (err) 856 if (err)
812 goto err_create; 857 goto err_create;
858
859 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
813 } 860 }
814 861
815 cq->cqe_size = cqe_size; 862 cq->cqe_size = cqe_size;
@@ -835,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
835 cq->mcq.comp = mlx5_ib_cq_comp; 882 cq->mcq.comp = mlx5_ib_cq_comp;
836 cq->mcq.event = mlx5_ib_cq_event; 883 cq->mcq.event = mlx5_ib_cq_event;
837 884
885 INIT_LIST_HEAD(&cq->wc_list);
886
838 if (context) 887 if (context)
839 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { 888 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
840 err = -EFAULT; 889 err = -EFAULT;
@@ -1222,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
1222 cq = to_mcq(ibcq); 1271 cq = to_mcq(ibcq);
1223 return cq->cqe_size; 1272 return cq->cqe_size;
1224} 1273}
1274
1275/* Called from atomic context */
1276int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1277{
1278 struct mlx5_ib_wc *soft_wc;
1279 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1280 unsigned long flags;
1281
1282 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1283 if (!soft_wc)
1284 return -ENOMEM;
1285
1286 soft_wc->wc = *wc;
1287 spin_lock_irqsave(&cq->lock, flags);
1288 list_add_tail(&soft_wc->list, &cq->wc_list);
1289 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1290 wc->status != IB_WC_SUCCESS) {
1291 cq->notify_flags = 0;
1292 schedule_work(&cq->notify_work);
1293 }
1294 spin_unlock_irqrestore(&cq->lock, flags);
1295
1296 return 0;
1297}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a8fc345c088a..0142efb5dd9c 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -393,6 +393,14 @@ struct mlx5_ib_cq {
393 struct ib_umem *resize_umem; 393 struct ib_umem *resize_umem;
394 int cqe_size; 394 int cqe_size;
395 u32 create_flags; 395 u32 create_flags;
396 struct list_head wc_list;
397 enum ib_cq_notify_flags notify_flags;
398 struct work_struct notify_work;
399};
400
401struct mlx5_ib_wc {
402 struct ib_wc wc;
403 struct list_head list;
396}; 404};
397 405
398struct mlx5_ib_srq { 406struct mlx5_ib_srq {
@@ -785,6 +793,8 @@ int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
785 struct ib_recv_wr **bad_wr); 793 struct ib_recv_wr **bad_wr);
786void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); 794void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
787 795
796int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
797
788static inline void init_query_mad(struct ib_smp *mad) 798static inline void init_query_mad(struct ib_smp *mad)
789{ 799{
790 mad->base_version = 1; 800 mad->base_version = 1;