diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2007-07-06 15:48:23 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-07-09 23:12:26 -0400 |
commit | 4fc570bcbe77f823aae183dd824869f79e74cc97 (patch) | |
tree | 3955946608a973b06db8b7e5b3ec319354a6fa73 /drivers/infiniband | |
parent | 06cc85086e6896939f8c68f8518224748f6b0b2f (diff) |
IB/ipath: Add barrier before updating WC head in shared memory
Add a barrier to make sure the CPU doesn't reorder writes to memory,
since user programs can be polling on the head index update and the
entry should be written before that.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_cq.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_srq.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ud.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.c | 2 |
5 files changed, 12 insertions, 1 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c index 9014ef63eedc..a6f04d27ec57 100644 --- a/drivers/infiniband/hw/ipath/ipath_cq.c +++ b/drivers/infiniband/hw/ipath/ipath_cq.c | |||
@@ -90,6 +90,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) | |||
90 | wc->queue[head].sl = entry->sl; | 90 | wc->queue[head].sl = entry->sl; |
91 | wc->queue[head].dlid_path_bits = entry->dlid_path_bits; | 91 | wc->queue[head].dlid_path_bits = entry->dlid_path_bits; |
92 | wc->queue[head].port_num = entry->port_num; | 92 | wc->queue[head].port_num = entry->port_num; |
93 | /* Make sure queue entry is written before the head index. */ | ||
94 | smp_wmb(); | ||
93 | wc->head = next; | 95 | wc->head = next; |
94 | 96 | ||
95 | if (cq->notify == IB_CQ_NEXT_COMP || | 97 | if (cq->notify == IB_CQ_NEXT_COMP || |
@@ -139,7 +141,8 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) | |||
139 | 141 | ||
140 | if (tail == wc->head) | 142 | if (tail == wc->head) |
141 | break; | 143 | break; |
142 | 144 | /* Make sure entry is read after head index is read. */ | |
145 | smp_rmb(); | ||
143 | qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, | 146 | qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table, |
144 | wc->queue[tail].qp_num); | 147 | wc->queue[tail].qp_num); |
145 | entry->qp = &qp->ibqp; | 148 | entry->qp = &qp->ibqp; |
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 854deb56ac02..85256747d8a1 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -194,6 +194,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
194 | ret = 0; | 194 | ret = 0; |
195 | goto bail; | 195 | goto bail; |
196 | } | 196 | } |
197 | /* Make sure entry is read after head index is read. */ | ||
198 | smp_rmb(); | ||
197 | wqe = get_rwqe_ptr(rq, tail); | 199 | wqe = get_rwqe_ptr(rq, tail); |
198 | if (++tail >= rq->size) | 200 | if (++tail >= rq->size) |
199 | tail = 0; | 201 | tail = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c index 14cbbd633d34..40c36ec19016 100644 --- a/drivers/infiniband/hw/ipath/ipath_srq.c +++ b/drivers/infiniband/hw/ipath/ipath_srq.c | |||
@@ -80,6 +80,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |||
80 | wqe->num_sge = wr->num_sge; | 80 | wqe->num_sge = wr->num_sge; |
81 | for (i = 0; i < wr->num_sge; i++) | 81 | for (i = 0; i < wr->num_sge; i++) |
82 | wqe->sg_list[i] = wr->sg_list[i]; | 82 | wqe->sg_list[i] = wr->sg_list[i]; |
83 | /* Make sure queue entry is written before the head index. */ | ||
84 | smp_wmb(); | ||
83 | wq->head = next; | 85 | wq->head = next; |
84 | spin_unlock_irqrestore(&srq->rq.lock, flags); | 86 | spin_unlock_irqrestore(&srq->rq.lock, flags); |
85 | } | 87 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_ud.c b/drivers/infiniband/hw/ipath/ipath_ud.c index 38ba771b3efe..f9a3338a5fb7 100644 --- a/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/drivers/infiniband/hw/ipath/ipath_ud.c | |||
@@ -176,6 +176,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, | |||
176 | dev->n_pkt_drops++; | 176 | dev->n_pkt_drops++; |
177 | goto bail_sge; | 177 | goto bail_sge; |
178 | } | 178 | } |
179 | /* Make sure entry is read after head index is read. */ | ||
180 | smp_rmb(); | ||
179 | wqe = get_rwqe_ptr(rq, tail); | 181 | wqe = get_rwqe_ptr(rq, tail); |
180 | if (++tail >= rq->size) | 182 | if (++tail >= rq->size) |
181 | tail = 0; | 183 | tail = 0; |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 0aecded6af86..c76ea0e0b024 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -327,6 +327,8 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
327 | wqe->num_sge = wr->num_sge; | 327 | wqe->num_sge = wr->num_sge; |
328 | for (i = 0; i < wr->num_sge; i++) | 328 | for (i = 0; i < wr->num_sge; i++) |
329 | wqe->sg_list[i] = wr->sg_list[i]; | 329 | wqe->sg_list[i] = wr->sg_list[i]; |
330 | /* Make sure queue entry is written before the head index. */ | ||
331 | smp_wmb(); | ||
330 | wq->head = next; | 332 | wq->head = next; |
331 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | 333 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); |
332 | } | 334 | } |