aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorStefan Roscher <ossrosch@linux.vnet.ibm.com>2008-12-01 13:05:50 -0500
committerRoland Dreier <rolandd@cisco.com>2008-12-01 13:05:50 -0500
commit7ec4f4634a4326c1f8fd172c80c8f59c9b3e90a4 (patch)
tree56daee13483f7c73cca3b160100d1b0cfb244843 /drivers/infiniband/hw
parent6b1f9d647e848060d34c3db408413989f1e460ba (diff)
IB/ehca: Fix problem with generated flush work completions
This fix enables ehca device driver to generate flush work completions even if the application doesn't request completions for all work requests. The current implementation of ehca will generate flush work completions for the wrong work requests if an application uses non signaled work completions. Signed-off-by: Stefan Roscher <stefan.roscher@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c26
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c51
3 files changed, 53 insertions, 28 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 4df887af66a5..7fc35cf0cddf 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -163,7 +163,8 @@ struct ehca_mod_qp_parm {
163/* struct for tracking if cqes have been reported to the application */ 163/* struct for tracking if cqes have been reported to the application */
164struct ehca_qmap_entry { 164struct ehca_qmap_entry {
165 u16 app_wr_id; 165 u16 app_wr_id;
166 u16 reported; 166 u8 reported;
167 u8 cqe_req;
167}; 168};
168 169
169struct ehca_queue_map { 170struct ehca_queue_map {
@@ -171,6 +172,7 @@ struct ehca_queue_map {
171 unsigned int entries; 172 unsigned int entries;
172 unsigned int tail; 173 unsigned int tail;
173 unsigned int left_to_poll; 174 unsigned int left_to_poll;
175 unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
174}; 176};
175 177
176struct ehca_qp { 178struct ehca_qp {
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 9e05ee2db39b..cadbf0cdd910 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap)
435{ 435{
436 int i; 436 int i;
437 437
438 qmap->tail = 0; 438 qmap->tail = qmap->entries - 1;
439 for (i = 0; i < qmap->entries; i++) 439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
440 qmap->map[i].reported = 1; 442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
444 }
441} 445}
442 446
443/* 447/*
@@ -1121,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1121 void *wqe_v; 1125 void *wqe_v;
1122 u64 q_ofs; 1126 u64 q_ofs;
1123 u32 wqe_idx; 1127 u32 wqe_idx;
1128 unsigned int tail_idx;
1124 1129
1125 /* convert real to abs address */ 1130 /* convert real to abs address */
1126 wqe_p = wqe_p & (~(1UL << 63)); 1131 wqe_p = wqe_p & (~(1UL << 63));
@@ -1133,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1133 return -EFAULT; 1138 return -EFAULT;
1134 } 1139 }
1135 1140
1141 tail_idx = (qmap->tail + 1) % qmap->entries;
1136 wqe_idx = q_ofs / ipz_queue->qe_size; 1142 wqe_idx = q_ofs / ipz_queue->qe_size;
1137 if (wqe_idx < qmap->tail)
1138 qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
1139 else
1140 qmap->left_to_poll = wqe_idx - qmap->tail;
1141 1143
1144 /* check all processed wqes, whether a cqe is requested or not */
1145 while (tail_idx != wqe_idx) {
1146 if (qmap->map[tail_idx].cqe_req)
1147 qmap->left_to_poll++;
1148 tail_idx = (tail_idx + 1) % qmap->entries;
1149 }
1150 /* save index in queue, where we have to start flushing */
1151 qmap->next_wqe_idx = wqe_idx;
1142 return 0; 1152 return 0;
1143} 1153}
1144 1154
@@ -1185,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1185 } else { 1195 } else {
1186 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); 1196 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1187 my_qp->sq_map.left_to_poll = 0; 1197 my_qp->sq_map.left_to_poll = 0;
1198 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
1199 my_qp->sq_map.entries;
1188 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); 1200 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1189 1201
1190 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); 1202 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1191 my_qp->rq_map.left_to_poll = 0; 1203 my_qp->rq_map.left_to_poll = 0;
1204 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
1205 my_qp->rq_map.entries;
1192 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); 1206 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1193 } 1207 }
1194 1208
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 64928079eafa..00a648f4316c 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
179 179
180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id); 180 qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
181 qmap_entry->reported = 0; 181 qmap_entry->reported = 0;
182 qmap_entry->cqe_req = 0;
182 183
183 switch (send_wr->opcode) { 184 switch (send_wr->opcode) {
184 case IB_WR_SEND: 185 case IB_WR_SEND:
@@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
203 204
204 if ((send_wr->send_flags & IB_SEND_SIGNALED || 205 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
205 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR) 206 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
206 && !hidden) 207 && !hidden) {
207 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; 208 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
209 qmap_entry->cqe_req = 1;
210 }
208 211
209 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 212 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
210 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) { 213 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
@@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
569 qmap_entry = &my_qp->rq_map.map[rq_map_idx]; 572 qmap_entry = &my_qp->rq_map.map[rq_map_idx];
570 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id); 573 qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
571 qmap_entry->reported = 0; 574 qmap_entry->reported = 0;
575 qmap_entry->cqe_req = 1;
572 576
573 wqe_cnt++; 577 wqe_cnt++;
574 } /* eof for cur_recv_wr */ 578 } /* eof for cur_recv_wr */
@@ -706,27 +710,34 @@ repoll:
706 goto repoll; 710 goto repoll;
707 wc->qp = &my_qp->ib_qp; 711 wc->qp = &my_qp->ib_qp;
708 712
713 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
714 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
715 /* We got a send completion. */
716 qmap = &my_qp->sq_map;
717 else
718 /* We got a receive completion. */
719 qmap = &my_qp->rq_map;
720
721 /* advance the tail pointer */
722 qmap->tail = qmap_tail_idx;
723
709 if (is_error) { 724 if (is_error) {
710 /* 725 /*
711 * set left_to_poll to 0 because in error state, we will not 726 * set left_to_poll to 0 because in error state, we will not
712 * get any additional CQEs 727 * get any additional CQEs
713 */ 728 */
714 ehca_add_to_err_list(my_qp, 1); 729 my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
730 my_qp->sq_map.entries;
715 my_qp->sq_map.left_to_poll = 0; 731 my_qp->sq_map.left_to_poll = 0;
732 ehca_add_to_err_list(my_qp, 1);
716 733
734 my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
735 my_qp->rq_map.entries;
736 my_qp->rq_map.left_to_poll = 0;
717 if (HAS_RQ(my_qp)) 737 if (HAS_RQ(my_qp))
718 ehca_add_to_err_list(my_qp, 0); 738 ehca_add_to_err_list(my_qp, 0);
719 my_qp->rq_map.left_to_poll = 0;
720 } 739 }
721 740
722 qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
723 if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
724 /* We got a send completion. */
725 qmap = &my_qp->sq_map;
726 else
727 /* We got a receive completion. */
728 qmap = &my_qp->rq_map;
729
730 qmap_entry = &qmap->map[qmap_tail_idx]; 741 qmap_entry = &qmap->map[qmap_tail_idx];
731 if (qmap_entry->reported) { 742 if (qmap_entry->reported) {
732 ehca_warn(cq->device, "Double cqe on qp_num=%#x", 743 ehca_warn(cq->device, "Double cqe on qp_num=%#x",
@@ -738,10 +749,6 @@ repoll:
738 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id); 749 wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
739 qmap_entry->reported = 1; 750 qmap_entry->reported = 1;
740 751
741 /* this is a proper completion, we need to advance the tail pointer */
742 if (++qmap->tail == qmap->entries)
743 qmap->tail = 0;
744
745 /* if left_to_poll is decremented to 0, add the QP to the error list */ 752 /* if left_to_poll is decremented to 0, add the QP to the error list */
746 if (qmap->left_to_poll > 0) { 753 if (qmap->left_to_poll > 0) {
747 qmap->left_to_poll--; 754 qmap->left_to_poll--;
@@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
805 else 812 else
806 qmap = &my_qp->rq_map; 813 qmap = &my_qp->rq_map;
807 814
808 qmap_entry = &qmap->map[qmap->tail]; 815 qmap_entry = &qmap->map[qmap->next_wqe_idx];
809 816
810 while ((nr < num_entries) && (qmap_entry->reported == 0)) { 817 while ((nr < num_entries) && (qmap_entry->reported == 0)) {
811 /* generate flush CQE */ 818 /* generate flush CQE */
819
812 memset(wc, 0, sizeof(*wc)); 820 memset(wc, 0, sizeof(*wc));
813 821
814 offset = qmap->tail * ipz_queue->qe_size; 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size;
815 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
816 if (!wqe) { 824 if (!wqe) {
817 ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 ehca_err(cq->device, "Invalid wqe offset=%#lx on "
@@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
850 858
851 wc->qp = &my_qp->ib_qp; 859 wc->qp = &my_qp->ib_qp;
852 860
853 /* mark as reported and advance tail pointer */ 861 /* mark as reported and advance next_wqe pointer */
854 qmap_entry->reported = 1; 862 qmap_entry->reported = 1;
855 if (++qmap->tail == qmap->entries) 863 qmap->next_wqe_idx++;
856 qmap->tail = 0; 864 if (qmap->next_wqe_idx == qmap->entries)
857 qmap_entry = &qmap->map[qmap->tail]; 865 qmap->next_wqe_idx = 0;
866 qmap_entry = &qmap->map[qmap->next_wqe_idx];
858 867
859 wc++; nr++; 868 wc++; nr++;
860 } 869 }