diff options
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 7 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 12 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_reqs.c | 13 |
3 files changed, 19 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index 7fc35cf0cddf..c825142a2fb7 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
| @@ -175,6 +175,13 @@ struct ehca_queue_map { | |||
| 175 | unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ | 175 | unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */ |
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| 178 | /* function to calculate the next index for the qmap */ | ||
| 179 | static inline unsigned int next_index(unsigned int cur_index, unsigned int limit) | ||
| 180 | { | ||
| 181 | unsigned int temp = cur_index + 1; | ||
| 182 | return (temp == limit) ? 0 : temp; | ||
| 183 | } | ||
| 184 | |||
| 178 | struct ehca_qp { | 185 | struct ehca_qp { |
| 179 | union { | 186 | union { |
| 180 | struct ib_qp ib_qp; | 187 | struct ib_qp ib_qp; |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index cadbf0cdd910..f161cf173dbe 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
| @@ -1138,14 +1138,14 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue, | |||
| 1138 | return -EFAULT; | 1138 | return -EFAULT; |
| 1139 | } | 1139 | } |
| 1140 | 1140 | ||
| 1141 | tail_idx = (qmap->tail + 1) % qmap->entries; | 1141 | tail_idx = next_index(qmap->tail, qmap->entries); |
| 1142 | wqe_idx = q_ofs / ipz_queue->qe_size; | 1142 | wqe_idx = q_ofs / ipz_queue->qe_size; |
| 1143 | 1143 | ||
| 1144 | /* check all processed wqes, whether a cqe is requested or not */ | 1144 | /* check all processed wqes, whether a cqe is requested or not */ |
| 1145 | while (tail_idx != wqe_idx) { | 1145 | while (tail_idx != wqe_idx) { |
| 1146 | if (qmap->map[tail_idx].cqe_req) | 1146 | if (qmap->map[tail_idx].cqe_req) |
| 1147 | qmap->left_to_poll++; | 1147 | qmap->left_to_poll++; |
| 1148 | tail_idx = (tail_idx + 1) % qmap->entries; | 1148 | tail_idx = next_index(tail_idx, qmap->entries); |
| 1149 | } | 1149 | } |
| 1150 | /* save index in queue, where we have to start flushing */ | 1150 | /* save index in queue, where we have to start flushing */ |
| 1151 | qmap->next_wqe_idx = wqe_idx; | 1151 | qmap->next_wqe_idx = wqe_idx; |
| @@ -1195,14 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca) | |||
| 1195 | } else { | 1195 | } else { |
| 1196 | spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); | 1196 | spin_lock_irqsave(&my_qp->send_cq->spinlock, flags); |
| 1197 | my_qp->sq_map.left_to_poll = 0; | 1197 | my_qp->sq_map.left_to_poll = 0; |
| 1198 | my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % | 1198 | my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, |
| 1199 | my_qp->sq_map.entries; | 1199 | my_qp->sq_map.entries); |
| 1200 | spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); | 1200 | spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags); |
| 1201 | 1201 | ||
| 1202 | spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); | 1202 | spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags); |
| 1203 | my_qp->rq_map.left_to_poll = 0; | 1203 | my_qp->rq_map.left_to_poll = 0; |
| 1204 | my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % | 1204 | my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, |
| 1205 | my_qp->rq_map.entries; | 1205 | my_qp->rq_map.entries); |
| 1206 | spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); | 1206 | spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags); |
| 1207 | } | 1207 | } |
| 1208 | 1208 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c index 00a648f4316c..c7112686782f 100644 --- a/drivers/infiniband/hw/ehca/ehca_reqs.c +++ b/drivers/infiniband/hw/ehca/ehca_reqs.c | |||
| @@ -726,13 +726,13 @@ repoll: | |||
| 726 | * set left_to_poll to 0 because in error state, we will not | 726 | * set left_to_poll to 0 because in error state, we will not |
| 727 | * get any additional CQEs | 727 | * get any additional CQEs |
| 728 | */ | 728 | */ |
| 729 | my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) % | 729 | my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail, |
| 730 | my_qp->sq_map.entries; | 730 | my_qp->sq_map.entries); |
| 731 | my_qp->sq_map.left_to_poll = 0; | 731 | my_qp->sq_map.left_to_poll = 0; |
| 732 | ehca_add_to_err_list(my_qp, 1); | 732 | ehca_add_to_err_list(my_qp, 1); |
| 733 | 733 | ||
| 734 | my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) % | 734 | my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail, |
| 735 | my_qp->rq_map.entries; | 735 | my_qp->rq_map.entries); |
| 736 | my_qp->rq_map.left_to_poll = 0; | 736 | my_qp->rq_map.left_to_poll = 0; |
| 737 | if (HAS_RQ(my_qp)) | 737 | if (HAS_RQ(my_qp)) |
| 738 | ehca_add_to_err_list(my_qp, 0); | 738 | ehca_add_to_err_list(my_qp, 0); |
| @@ -860,9 +860,8 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, | |||
| 860 | 860 | ||
| 861 | /* mark as reported and advance next_wqe pointer */ | 861 | /* mark as reported and advance next_wqe pointer */ |
| 862 | qmap_entry->reported = 1; | 862 | qmap_entry->reported = 1; |
| 863 | qmap->next_wqe_idx++; | 863 | qmap->next_wqe_idx = next_index(qmap->next_wqe_idx, |
| 864 | if (qmap->next_wqe_idx == qmap->entries) | 864 | qmap->entries); |
| 865 | qmap->next_wqe_idx = 0; | ||
| 866 | qmap_entry = &qmap->map[qmap->next_wqe_idx]; | 865 | qmap_entry = &qmap->map[qmap->next_wqe_idx]; |
| 867 | 866 | ||
| 868 | wc++; nr++; | 867 | wc++; nr++; |
