diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-07-01 07:36:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-01 12:56:01 -0400 |
commit | 12eef41f8b72b6e11e36b48c78849c17e49781c8 (patch) | |
tree | 3a8bd77d77c7542e19d200d3abf25fc07f0d8f51 /drivers/infiniband/hw/ipath/ipath_ruc.c | |
parent | fba75200ad92892bf32d8d6f1443c6f1e4f48676 (diff) |
[PATCH] IB/ipath: rC receive interrupt performance changes
This patch separates QP state used for sending and receiving RC packets so the
processing in the receive interrupt handler can be done mostly without locks
being held. ACK packets are now sent without requiring synchronization with
the send tasklet.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ruc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 37 |
1 files changed, 17 insertions, 20 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 9a456a7ce352..99c0652d49dc 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -113,20 +113,23 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
113 | * | 113 | * |
114 | * Return 0 if no RWQE is available, otherwise return 1. | 114 | * Return 0 if no RWQE is available, otherwise return 1. |
115 | * | 115 | * |
116 | * Called at interrupt level with the QP r_rq.lock held. | 116 | * Can be called from interrupt level. |
117 | */ | 117 | */ |
118 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | 118 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) |
119 | { | 119 | { |
120 | unsigned long flags; | ||
120 | struct ipath_rq *rq; | 121 | struct ipath_rq *rq; |
121 | struct ipath_srq *srq; | 122 | struct ipath_srq *srq; |
122 | struct ipath_rwqe *wqe; | 123 | struct ipath_rwqe *wqe; |
123 | int ret; | 124 | int ret = 1; |
124 | 125 | ||
125 | if (!qp->ibqp.srq) { | 126 | if (!qp->ibqp.srq) { |
126 | rq = &qp->r_rq; | 127 | rq = &qp->r_rq; |
128 | spin_lock_irqsave(&rq->lock, flags); | ||
129 | |||
127 | if (unlikely(rq->tail == rq->head)) { | 130 | if (unlikely(rq->tail == rq->head)) { |
128 | ret = 0; | 131 | ret = 0; |
129 | goto bail; | 132 | goto done; |
130 | } | 133 | } |
131 | wqe = get_rwqe_ptr(rq, rq->tail); | 134 | wqe = get_rwqe_ptr(rq, rq->tail); |
132 | qp->r_wr_id = wqe->wr_id; | 135 | qp->r_wr_id = wqe->wr_id; |
@@ -138,17 +141,16 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
138 | } | 141 | } |
139 | if (++rq->tail >= rq->size) | 142 | if (++rq->tail >= rq->size) |
140 | rq->tail = 0; | 143 | rq->tail = 0; |
141 | ret = 1; | 144 | goto done; |
142 | goto bail; | ||
143 | } | 145 | } |
144 | 146 | ||
145 | srq = to_isrq(qp->ibqp.srq); | 147 | srq = to_isrq(qp->ibqp.srq); |
146 | rq = &srq->rq; | 148 | rq = &srq->rq; |
147 | spin_lock(&rq->lock); | 149 | spin_lock_irqsave(&rq->lock, flags); |
150 | |||
148 | if (unlikely(rq->tail == rq->head)) { | 151 | if (unlikely(rq->tail == rq->head)) { |
149 | spin_unlock(&rq->lock); | ||
150 | ret = 0; | 152 | ret = 0; |
151 | goto bail; | 153 | goto done; |
152 | } | 154 | } |
153 | wqe = get_rwqe_ptr(rq, rq->tail); | 155 | wqe = get_rwqe_ptr(rq, rq->tail); |
154 | qp->r_wr_id = wqe->wr_id; | 156 | qp->r_wr_id = wqe->wr_id; |
@@ -170,18 +172,18 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
170 | n = rq->head - rq->tail; | 172 | n = rq->head - rq->tail; |
171 | if (n < srq->limit) { | 173 | if (n < srq->limit) { |
172 | srq->limit = 0; | 174 | srq->limit = 0; |
173 | spin_unlock(&rq->lock); | 175 | spin_unlock_irqrestore(&rq->lock, flags); |
174 | ev.device = qp->ibqp.device; | 176 | ev.device = qp->ibqp.device; |
175 | ev.element.srq = qp->ibqp.srq; | 177 | ev.element.srq = qp->ibqp.srq; |
176 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | 178 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
177 | srq->ibsrq.event_handler(&ev, | 179 | srq->ibsrq.event_handler(&ev, |
178 | srq->ibsrq.srq_context); | 180 | srq->ibsrq.srq_context); |
179 | } else | 181 | goto bail; |
180 | spin_unlock(&rq->lock); | 182 | } |
181 | } else | 183 | } |
182 | spin_unlock(&rq->lock); | ||
183 | ret = 1; | ||
184 | 184 | ||
185 | done: | ||
186 | spin_unlock_irqrestore(&rq->lock, flags); | ||
185 | bail: | 187 | bail: |
186 | return ret; | 188 | return ret; |
187 | } | 189 | } |
@@ -248,10 +250,8 @@ again: | |||
248 | wc.imm_data = wqe->wr.imm_data; | 250 | wc.imm_data = wqe->wr.imm_data; |
249 | /* FALLTHROUGH */ | 251 | /* FALLTHROUGH */ |
250 | case IB_WR_SEND: | 252 | case IB_WR_SEND: |
251 | spin_lock_irqsave(&qp->r_rq.lock, flags); | ||
252 | if (!ipath_get_rwqe(qp, 0)) { | 253 | if (!ipath_get_rwqe(qp, 0)) { |
253 | rnr_nak: | 254 | rnr_nak: |
254 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
255 | /* Handle RNR NAK */ | 255 | /* Handle RNR NAK */ |
256 | if (qp->ibqp.qp_type == IB_QPT_UC) | 256 | if (qp->ibqp.qp_type == IB_QPT_UC) |
257 | goto send_comp; | 257 | goto send_comp; |
@@ -263,20 +263,17 @@ again: | |||
263 | sqp->s_rnr_retry--; | 263 | sqp->s_rnr_retry--; |
264 | dev->n_rnr_naks++; | 264 | dev->n_rnr_naks++; |
265 | sqp->s_rnr_timeout = | 265 | sqp->s_rnr_timeout = |
266 | ib_ipath_rnr_table[sqp->s_min_rnr_timer]; | 266 | ib_ipath_rnr_table[sqp->r_min_rnr_timer]; |
267 | ipath_insert_rnr_queue(sqp); | 267 | ipath_insert_rnr_queue(sqp); |
268 | goto done; | 268 | goto done; |
269 | } | 269 | } |
270 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
271 | break; | 270 | break; |
272 | 271 | ||
273 | case IB_WR_RDMA_WRITE_WITH_IMM: | 272 | case IB_WR_RDMA_WRITE_WITH_IMM: |
274 | wc.wc_flags = IB_WC_WITH_IMM; | 273 | wc.wc_flags = IB_WC_WITH_IMM; |
275 | wc.imm_data = wqe->wr.imm_data; | 274 | wc.imm_data = wqe->wr.imm_data; |
276 | spin_lock_irqsave(&qp->r_rq.lock, flags); | ||
277 | if (!ipath_get_rwqe(qp, 1)) | 275 | if (!ipath_get_rwqe(qp, 1)) |
278 | goto rnr_nak; | 276 | goto rnr_nak; |
279 | spin_unlock_irqrestore(&qp->r_rq.lock, flags); | ||
280 | /* FALLTHROUGH */ | 277 | /* FALLTHROUGH */ |
281 | case IB_WR_RDMA_WRITE: | 278 | case IB_WR_RDMA_WRITE: |
282 | if (wqe->length == 0) | 279 | if (wqe->length == 0) |