diff options
author | Alexander Schmidt <alexs@linux.vnet.ibm.com> | 2008-08-12 09:46:30 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-08-12 14:34:59 -0400 |
commit | 6773f079b72ab0200fe9afa9bb0c656a6af5400c (patch) | |
tree | 92b3b86492965367f62c37a84414b892d50504cf /drivers | |
parent | 129a10fb81309f455eeb444560ec38657d29c46f (diff) |
IB/ehca: Discard double CQE for one WR
Under rare circumstances, the ehca hardware might erroneously generate
two CQEs for the same WQE, which is not compliant to the IB spec and
will cause unpredictable errors like memory being freed twice. To
avoid this problem, the driver needs to detect the second CQE and
discard it.
For this purpose, introduce an array holding as many elements as the
SQ of the QP, called sq_map. Each sq_map entry stores a "reported"
flag for one WQE in the SQ. When a work request is posted to the SQ,
the respective "reported" flag is set to zero. After the arrival of a
CQE, the flag is set to 1, which allows to detect the occurence of a
second CQE.
The mapping between WQE / CQE and the corresponding sq_map element is
implemented by replacing the lowest 16 Bits of the wr_id with the
index in the queue map. The original 16 Bits are stored in the sq_map
entry and are restored when the CQE is passed to the application.
Signed-off-by: Alexander Schmidt <alexs@linux.vnet.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 9 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qes.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 34 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_reqs.c | 54 |
4 files changed, 78 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 0b0618edd645..1ab919f836a8 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -156,6 +156,14 @@ struct ehca_mod_qp_parm { | |||
156 | 156 | ||
157 | #define EHCA_MOD_QP_PARM_MAX 4 | 157 | #define EHCA_MOD_QP_PARM_MAX 4 |
158 | 158 | ||
159 | #define QMAP_IDX_MASK 0xFFFFULL | ||
160 | |||
161 | /* struct for tracking if cqes have been reported to the application */ | ||
162 | struct ehca_qmap_entry { | ||
163 | u16 app_wr_id; | ||
164 | u16 reported; | ||
165 | }; | ||
166 | |||
159 | struct ehca_qp { | 167 | struct ehca_qp { |
160 | union { | 168 | union { |
161 | struct ib_qp ib_qp; | 169 | struct ib_qp ib_qp; |
@@ -165,6 +173,7 @@ struct ehca_qp { | |||
165 | enum ehca_ext_qp_type ext_type; | 173 | enum ehca_ext_qp_type ext_type; |
166 | enum ib_qp_state state; | 174 | enum ib_qp_state state; |
167 | struct ipz_queue ipz_squeue; | 175 | struct ipz_queue ipz_squeue; |
176 | struct ehca_qmap_entry *sq_map; | ||
168 | struct ipz_queue ipz_rqueue; | 177 | struct ipz_queue ipz_rqueue; |
169 | struct h_galpas galpas; | 178 | struct h_galpas galpas; |
170 | u32 qkey; | 179 | u32 qkey; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/infiniband/hw/ehca/ehca_qes.h index 818803057ebf..5d28e3e98a20 100644 --- a/drivers/infiniband/hw/ehca/ehca_qes.h +++ b/drivers/infiniband/hw/ehca/ehca_qes.h | |||
@@ -213,6 +213,7 @@ struct ehca_wqe { | |||
213 | #define WC_STATUS_ERROR_BIT 0x80000000 | 213 | #define WC_STATUS_ERROR_BIT 0x80000000 |
214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 | 214 | #define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800 |
215 | #define WC_STATUS_PURGE_BIT 0x10 | 215 | #define WC_STATUS_PURGE_BIT 0x10 |
216 | #define WC_SEND_RECEIVE_BIT 0x80 | ||
216 | 217 | ||
217 | struct ehca_cqe { | 218 | struct ehca_cqe { |
218 | u64 work_request_id; | 219 | u64 work_request_id; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index c58fd4eead18..b6bcee036734 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -412,6 +412,7 @@ static struct ehca_qp *internal_create_qp( | |||
412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, | 412 | struct ehca_shca *shca = container_of(pd->device, struct ehca_shca, |
413 | ib_device); | 413 | ib_device); |
414 | struct ib_ucontext *context = NULL; | 414 | struct ib_ucontext *context = NULL; |
415 | u32 nr_qes; | ||
415 | u64 h_ret; | 416 | u64 h_ret; |
416 | int is_llqp = 0, has_srq = 0; | 417 | int is_llqp = 0, has_srq = 0; |
417 | int qp_type, max_send_sge, max_recv_sge, ret; | 418 | int qp_type, max_send_sge, max_recv_sge, ret; |
@@ -715,6 +716,15 @@ static struct ehca_qp *internal_create_qp( | |||
715 | "and pages ret=%i", ret); | 716 | "and pages ret=%i", ret); |
716 | goto create_qp_exit2; | 717 | goto create_qp_exit2; |
717 | } | 718 | } |
719 | nr_qes = my_qp->ipz_squeue.queue_length / | ||
720 | my_qp->ipz_squeue.qe_size; | ||
721 | my_qp->sq_map = vmalloc(nr_qes * | ||
722 | sizeof(struct ehca_qmap_entry)); | ||
723 | if (!my_qp->sq_map) { | ||
724 | ehca_err(pd->device, "Couldn't allocate squeue " | ||
725 | "map ret=%i", ret); | ||
726 | goto create_qp_exit3; | ||
727 | } | ||
718 | } | 728 | } |
719 | 729 | ||
720 | if (HAS_RQ(my_qp)) { | 730 | if (HAS_RQ(my_qp)) { |
@@ -724,7 +734,7 @@ static struct ehca_qp *internal_create_qp( | |||
724 | if (ret) { | 734 | if (ret) { |
725 | ehca_err(pd->device, "Couldn't initialize rqueue " | 735 | ehca_err(pd->device, "Couldn't initialize rqueue " |
726 | "and pages ret=%i", ret); | 736 | "and pages ret=%i", ret); |
727 | goto create_qp_exit3; | 737 | goto create_qp_exit4; |
728 | } | 738 | } |
729 | } | 739 | } |
730 | 740 | ||
@@ -770,7 +780,7 @@ static struct ehca_qp *internal_create_qp( | |||
770 | if (!my_qp->mod_qp_parm) { | 780 | if (!my_qp->mod_qp_parm) { |
771 | ehca_err(pd->device, | 781 | ehca_err(pd->device, |
772 | "Could not alloc mod_qp_parm"); | 782 | "Could not alloc mod_qp_parm"); |
773 | goto create_qp_exit4; | 783 | goto create_qp_exit5; |
774 | } | 784 | } |
775 | } | 785 | } |
776 | } | 786 | } |
@@ -780,7 +790,7 @@ static struct ehca_qp *internal_create_qp( | |||
780 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); | 790 | h_ret = ehca_define_sqp(shca, my_qp, init_attr); |
781 | if (h_ret != H_SUCCESS) { | 791 | if (h_ret != H_SUCCESS) { |
782 | ret = ehca2ib_return_code(h_ret); | 792 | ret = ehca2ib_return_code(h_ret); |
783 | goto create_qp_exit5; | 793 | goto create_qp_exit6; |
784 | } | 794 | } |
785 | } | 795 | } |
786 | 796 | ||
@@ -789,7 +799,7 @@ static struct ehca_qp *internal_create_qp( | |||
789 | if (ret) { | 799 | if (ret) { |
790 | ehca_err(pd->device, | 800 | ehca_err(pd->device, |
791 | "Couldn't assign qp to send_cq ret=%i", ret); | 801 | "Couldn't assign qp to send_cq ret=%i", ret); |
792 | goto create_qp_exit5; | 802 | goto create_qp_exit6; |
793 | } | 803 | } |
794 | } | 804 | } |
795 | 805 | ||
@@ -815,22 +825,26 @@ static struct ehca_qp *internal_create_qp( | |||
815 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { | 825 | if (ib_copy_to_udata(udata, &resp, sizeof resp)) { |
816 | ehca_err(pd->device, "Copy to udata failed"); | 826 | ehca_err(pd->device, "Copy to udata failed"); |
817 | ret = -EINVAL; | 827 | ret = -EINVAL; |
818 | goto create_qp_exit6; | 828 | goto create_qp_exit7; |
819 | } | 829 | } |
820 | } | 830 | } |
821 | 831 | ||
822 | return my_qp; | 832 | return my_qp; |
823 | 833 | ||
824 | create_qp_exit6: | 834 | create_qp_exit7: |
825 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); | 835 | ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); |
826 | 836 | ||
827 | create_qp_exit5: | 837 | create_qp_exit6: |
828 | kfree(my_qp->mod_qp_parm); | 838 | kfree(my_qp->mod_qp_parm); |
829 | 839 | ||
830 | create_qp_exit4: | 840 | create_qp_exit5: |
831 | if (HAS_RQ(my_qp)) | 841 | if (HAS_RQ(my_qp)) |
832 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 842 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
833 | 843 | ||
844 | create_qp_exit4: | ||
845 | if (HAS_SQ(my_qp)) | ||
846 | vfree(my_qp->sq_map); | ||
847 | |||
834 | create_qp_exit3: | 848 | create_qp_exit3: |
835 | if (HAS_SQ(my_qp)) | 849 | if (HAS_SQ(my_qp)) |
836 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 850 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
@@ -1979,8 +1993,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
1979 | 1993 | ||
1980 | if (HAS_RQ(my_qp)) | 1994 | if (HAS_RQ(my_qp)) |
1981 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 1995 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
1982 | if (HAS_SQ(my_qp)) | 1996 | if (HAS_SQ(my_qp)) { |
1983 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 1997 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
1998 | vfree(my_qp->sq_map); | ||
1999 | } | ||
1984 | kmem_cache_free(qp_cache, my_qp); | 2000 | kmem_cache_free(qp_cache, my_qp); |
1985 | atomic_dec(&shca->num_qps); | 2001 | atomic_dec(&shca->num_qps); |
1986 | return 0; | 2002 | return 0; |
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index cea3eba9c83e..4426d82fe798 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
@@ -139,6 +139,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr) | |||
139 | static inline int ehca_write_swqe(struct ehca_qp *qp, | 139 | static inline int ehca_write_swqe(struct ehca_qp *qp, |
140 | struct ehca_wqe *wqe_p, | 140 | struct ehca_wqe *wqe_p, |
141 | const struct ib_send_wr *send_wr, | 141 | const struct ib_send_wr *send_wr, |
142 | u32 sq_map_idx, | ||
142 | int hidden) | 143 | int hidden) |
143 | { | 144 | { |
144 | u32 idx; | 145 | u32 idx; |
@@ -157,7 +158,11 @@ static inline int ehca_write_swqe(struct ehca_qp *qp, | |||
157 | /* clear wqe header until sglist */ | 158 | /* clear wqe header until sglist */ |
158 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); | 159 | memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list)); |
159 | 160 | ||
160 | wqe_p->work_request_id = send_wr->wr_id; | 161 | wqe_p->work_request_id = send_wr->wr_id & ~QMAP_IDX_MASK; |
162 | wqe_p->work_request_id |= sq_map_idx & QMAP_IDX_MASK; | ||
163 | |||
164 | qp->sq_map[sq_map_idx].app_wr_id = send_wr->wr_id & QMAP_IDX_MASK; | ||
165 | qp->sq_map[sq_map_idx].reported = 0; | ||
161 | 166 | ||
162 | switch (send_wr->opcode) { | 167 | switch (send_wr->opcode) { |
163 | case IB_WR_SEND: | 168 | case IB_WR_SEND: |
@@ -381,6 +386,7 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
381 | { | 386 | { |
382 | struct ehca_wqe *wqe_p; | 387 | struct ehca_wqe *wqe_p; |
383 | int ret; | 388 | int ret; |
389 | u32 sq_map_idx; | ||
384 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; | 390 | u64 start_offset = my_qp->ipz_squeue.current_q_offset; |
385 | 391 | ||
386 | /* get pointer next to free WQE */ | 392 | /* get pointer next to free WQE */ |
@@ -393,8 +399,15 @@ static inline int post_one_send(struct ehca_qp *my_qp, | |||
393 | "qp_num=%x", my_qp->ib_qp.qp_num); | 399 | "qp_num=%x", my_qp->ib_qp.qp_num); |
394 | return -ENOMEM; | 400 | return -ENOMEM; |
395 | } | 401 | } |
402 | |||
403 | /* | ||
404 | * Get the index of the WQE in the send queue. The same index is used | ||
405 | * for writing into the sq_map. | ||
406 | */ | ||
407 | sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size; | ||
408 | |||
396 | /* write a SEND WQE into the QUEUE */ | 409 | /* write a SEND WQE into the QUEUE */ |
397 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden); | 410 | ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden); |
398 | /* | 411 | /* |
399 | * if something failed, | 412 | * if something failed, |
400 | * reset the free entry pointer to the start value | 413 | * reset the free entry pointer to the start value |
@@ -654,8 +667,34 @@ repoll: | |||
654 | my_cq, my_cq->cq_number); | 667 | my_cq, my_cq->cq_number); |
655 | } | 668 | } |
656 | 669 | ||
657 | /* we got a completion! */ | 670 | read_lock(&ehca_qp_idr_lock); |
658 | wc->wr_id = cqe->work_request_id; | 671 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); |
672 | read_unlock(&ehca_qp_idr_lock); | ||
673 | if (!my_qp) | ||
674 | goto repoll; | ||
675 | wc->qp = &my_qp->ib_qp; | ||
676 | |||
677 | if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT)) { | ||
678 | struct ehca_qmap_entry *qmap_entry; | ||
679 | /* | ||
680 | * We got a send completion and need to restore the original | ||
681 | * wr_id. | ||
682 | */ | ||
683 | qmap_entry = &my_qp->sq_map[cqe->work_request_id & | ||
684 | QMAP_IDX_MASK]; | ||
685 | |||
686 | if (qmap_entry->reported) { | ||
687 | ehca_warn(cq->device, "Double cqe on qp_num=%#x", | ||
688 | my_qp->real_qp_num); | ||
689 | /* found a double cqe, discard it and read next one */ | ||
690 | goto repoll; | ||
691 | } | ||
692 | wc->wr_id = cqe->work_request_id & ~QMAP_IDX_MASK; | ||
693 | wc->wr_id |= qmap_entry->app_wr_id; | ||
694 | qmap_entry->reported = 1; | ||
695 | } else | ||
696 | /* We got a receive completion. */ | ||
697 | wc->wr_id = cqe->work_request_id; | ||
659 | 698 | ||
660 | /* eval ib_wc_opcode */ | 699 | /* eval ib_wc_opcode */ |
661 | wc->opcode = ib_wc_opcode[cqe->optype]-1; | 700 | wc->opcode = ib_wc_opcode[cqe->optype]-1; |
@@ -678,13 +717,6 @@ repoll: | |||
678 | } else | 717 | } else |
679 | wc->status = IB_WC_SUCCESS; | 718 | wc->status = IB_WC_SUCCESS; |
680 | 719 | ||
681 | read_lock(&ehca_qp_idr_lock); | ||
682 | my_qp = idr_find(&ehca_qp_idr, cqe->qp_token); | ||
683 | read_unlock(&ehca_qp_idr_lock); | ||
684 | if (!my_qp) | ||
685 | goto repoll; | ||
686 | wc->qp = &my_qp->ib_qp; | ||
687 | |||
688 | wc->byte_len = cqe->nr_bytes_transferred; | 720 | wc->byte_len = cqe->nr_bytes_transferred; |
689 | wc->pkey_index = cqe->pkey_index; | 721 | wc->pkey_index = cqe->pkey_index; |
690 | wc->slid = cqe->rlid; | 722 | wc->slid = cqe->rlid; |