aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-12-29 02:06:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-12-29 02:06:01 -0500
commit19286e4a7a0ce0a7ac584be614c40513d6318ad6 (patch)
tree10b2f6defef19db7a49a66c940d4ba53221cee95 /drivers/infiniband/hw/cxgb4
parent5f520fc318764df800789edd202b5e3b55130613 (diff)
parent45e6ae7ef21b907dacb18da62d5787d74a31d860 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "This is the next batch of for-rc patches from RDMA. It includes the fix for the ipoib regression I mentioned last time, and the result of a fairly major debugging effort to get iser working reliably on cxgb4 hardware - it turns out the cxgb4 driver was not handling QP error flushing properly causing iser to fail. - cxgb4 fix for an iser testing failure as debugged by Steve and Sagi. The problem was a driver bug in the handling of shutting down a QP. - Various vmw_pvrdma fixes for bogus WARN_ON, missed resource free on error unwind and a use after free bug - Improper congestion counter values on mlx5 when link aggregation is enabled - ipoib lockdep regression introduced in this merge window - hfi1 regression supporting the device in a VM introduced in a recent patch - Typo that breaks future uAPI compatibility in the verbs core - More SELinux related oops fixing - Fix an oops during error unwind in mlx5" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/mlx5: Fix mlx5_ib_alloc_mr error flow IB/core: Verify that QP is security enabled in create and destroy IB/uverbs: Fix command checking as part of ib_uverbs_ex_modify_qp() IB/mlx5: Serialize access to the VMA list IB/hfi: Only read capability registers if the capability exists IB/ipoib: Fix lockdep issue found on ipoib_ib_dev_heavy_flush IB/mlx5: Fix congestion counters in LAG mode RDMA/vmw_pvrdma: Avoid use after free due to QP/CQ/SRQ destroy RDMA/vmw_pvrdma: Use refcount_dec_and_test to avoid warning RDMA/vmw_pvrdma: Call ib_umem_release on destroy QP path iw_cxgb4: when flushing, complete all wrs in a chain iw_cxgb4: reflect the original WR opcode in drain cqes iw_cxgb4: Only validate the MSN for successful completions
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c72
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h6
4 files changed, 78 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b7bfc536e00f..6f2b26126c64 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,7 +395,7 @@ next_cqe:
395 395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) 396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{ 397{
398 if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) { 398 if (DRAIN_CQE(cqe)) {
399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); 399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
400 return 0; 400 return 0;
401 } 401 }
@@ -494,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
494 /* 494 /*
495 * Special cqe for drain WR completions... 495 * Special cqe for drain WR completions...
496 */ 496 */
497 if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { 497 if (DRAIN_CQE(hw_cqe)) {
498 *cookie = CQE_DRAIN_COOKIE(hw_cqe); 498 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
499 *cqe = *hw_cqe; 499 *cqe = *hw_cqe;
500 goto skip_cqe; 500 goto skip_cqe;
@@ -571,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
571 ret = -EAGAIN; 571 ret = -EAGAIN;
572 goto skip_cqe; 572 goto skip_cqe;
573 } 573 }
574 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { 574 if (unlikely(!CQE_STATUS(hw_cqe) &&
575 CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
575 t4_set_wq_in_error(wq); 576 t4_set_wq_in_error(wq);
576 hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); 577 hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
577 goto proc_cqe;
578 } 578 }
579 goto proc_cqe; 579 goto proc_cqe;
580 } 580 }
@@ -748,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
748 c4iw_invalidate_mr(qhp->rhp, 748 c4iw_invalidate_mr(qhp->rhp,
749 CQE_WRID_FR_STAG(&cqe)); 749 CQE_WRID_FR_STAG(&cqe));
750 break; 750 break;
751 case C4IW_DRAIN_OPCODE:
752 wc->opcode = IB_WC_SEND;
753 break;
754 default: 751 default:
755 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", 752 pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n",
756 CQE_OPCODE(&cqe), CQE_QPID(&cqe)); 753 CQE_OPCODE(&cqe), CQE_QPID(&cqe));
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 470f97a79ebb..65dd3726ca02 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
693 return IB_QPS_ERR; 693 return IB_QPS_ERR;
694} 694}
695 695
696#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
697
698static inline u32 c4iw_ib_to_tpt_access(int a) 696static inline u32 c4iw_ib_to_tpt_access(int a)
699{ 697{
700 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | 698 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 38bddd02a943..d5c92fc520d6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
790 return 0; 790 return 0;
791} 791}
792 792
793static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) 793static int ib_to_fw_opcode(int ib_opcode)
794{
795 int opcode;
796
797 switch (ib_opcode) {
798 case IB_WR_SEND_WITH_INV:
799 opcode = FW_RI_SEND_WITH_INV;
800 break;
801 case IB_WR_SEND:
802 opcode = FW_RI_SEND;
803 break;
804 case IB_WR_RDMA_WRITE:
805 opcode = FW_RI_RDMA_WRITE;
806 break;
807 case IB_WR_RDMA_READ:
808 case IB_WR_RDMA_READ_WITH_INV:
809 opcode = FW_RI_READ_REQ;
810 break;
811 case IB_WR_REG_MR:
812 opcode = FW_RI_FAST_REGISTER;
813 break;
814 case IB_WR_LOCAL_INV:
815 opcode = FW_RI_LOCAL_INV;
816 break;
817 default:
818 opcode = -EINVAL;
819 }
820 return opcode;
821}
822
823static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
794{ 824{
795 struct t4_cqe cqe = {}; 825 struct t4_cqe cqe = {};
796 struct c4iw_cq *schp; 826 struct c4iw_cq *schp;
797 unsigned long flag; 827 unsigned long flag;
798 struct t4_cq *cq; 828 struct t4_cq *cq;
829 int opcode;
799 830
800 schp = to_c4iw_cq(qhp->ibqp.send_cq); 831 schp = to_c4iw_cq(qhp->ibqp.send_cq);
801 cq = &schp->cq; 832 cq = &schp->cq;
802 833
834 opcode = ib_to_fw_opcode(wr->opcode);
835 if (opcode < 0)
836 return opcode;
837
803 cqe.u.drain_cookie = wr->wr_id; 838 cqe.u.drain_cookie = wr->wr_id;
804 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | 839 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
805 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | 840 CQE_OPCODE_V(opcode) |
806 CQE_TYPE_V(1) | 841 CQE_TYPE_V(1) |
807 CQE_SWCQE_V(1) | 842 CQE_SWCQE_V(1) |
843 CQE_DRAIN_V(1) |
808 CQE_QPID_V(qhp->wq.sq.qid)); 844 CQE_QPID_V(qhp->wq.sq.qid));
809 845
810 spin_lock_irqsave(&schp->lock, flag); 846 spin_lock_irqsave(&schp->lock, flag);
@@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
819 schp->ibcq.cq_context); 855 schp->ibcq.cq_context);
820 spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 856 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
821 } 857 }
858 return 0;
859}
860
861static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr,
862 struct ib_send_wr **bad_wr)
863{
864 int ret = 0;
865
866 while (wr) {
867 ret = complete_sq_drain_wr(qhp, wr);
868 if (ret) {
869 *bad_wr = wr;
870 break;
871 }
872 wr = wr->next;
873 }
874 return ret;
822} 875}
823 876
824static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) 877static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
@@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
833 886
834 cqe.u.drain_cookie = wr->wr_id; 887 cqe.u.drain_cookie = wr->wr_id;
835 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | 888 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
836 CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | 889 CQE_OPCODE_V(FW_RI_SEND) |
837 CQE_TYPE_V(0) | 890 CQE_TYPE_V(0) |
838 CQE_SWCQE_V(1) | 891 CQE_SWCQE_V(1) |
892 CQE_DRAIN_V(1) |
839 CQE_QPID_V(qhp->wq.sq.qid)); 893 CQE_QPID_V(qhp->wq.sq.qid));
840 894
841 spin_lock_irqsave(&rchp->lock, flag); 895 spin_lock_irqsave(&rchp->lock, flag);
@@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
852 } 906 }
853} 907}
854 908
909static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
910{
911 while (wr) {
912 complete_rq_drain_wr(qhp, wr);
913 wr = wr->next;
914 }
915}
916
855int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 917int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
856 struct ib_send_wr **bad_wr) 918 struct ib_send_wr **bad_wr)
857{ 919{
@@ -875,7 +937,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
875 */ 937 */
876 if (qhp->wq.flushed) { 938 if (qhp->wq.flushed) {
877 spin_unlock_irqrestore(&qhp->lock, flag); 939 spin_unlock_irqrestore(&qhp->lock, flag);
878 complete_sq_drain_wr(qhp, wr); 940 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
879 return err; 941 return err;
880 } 942 }
881 num_wrs = t4_sq_avail(&qhp->wq); 943 num_wrs = t4_sq_avail(&qhp->wq);
@@ -1023,7 +1085,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1023 */ 1085 */
1024 if (qhp->wq.flushed) { 1086 if (qhp->wq.flushed) {
1025 spin_unlock_irqrestore(&qhp->lock, flag); 1087 spin_unlock_irqrestore(&qhp->lock, flag);
1026 complete_rq_drain_wr(qhp, wr); 1088 complete_rq_drain_wrs(qhp, wr);
1027 return err; 1089 return err;
1028 } 1090 }
1029 num_wrs = t4_rq_avail(&qhp->wq); 1091 num_wrs = t4_rq_avail(&qhp->wq);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e9ea94268d51..79e8ee12c391 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -197,6 +197,11 @@ struct t4_cqe {
197#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) 197#define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M)
198#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S) 198#define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S)
199 199
200#define CQE_DRAIN_S 10
201#define CQE_DRAIN_M 0x1
202#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M)
203#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S)
204
200#define CQE_STATUS_S 5 205#define CQE_STATUS_S 5
201#define CQE_STATUS_M 0x1F 206#define CQE_STATUS_M 0x1F
202#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M) 207#define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M)
@@ -213,6 +218,7 @@ struct t4_cqe {
213#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S) 218#define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S)
214 219
215#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header))) 220#define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header)))
221#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header)))
216#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) 222#define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header)))
217#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) 223#define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header)))
218#define SQ_TYPE(x) (CQE_TYPE((x))) 224#define SQ_TYPE(x) (CQE_TYPE((x)))