diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2006-09-22 18:22:26 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:26 -0400 |
commit | 373d9915803aebbbf7fd3841efd9dac31c32e148 (patch) | |
tree | bd0b05d5e78eab4471bc4d623a880013693077ea /drivers/infiniband/hw/ipath/ipath_ruc.c | |
parent | 9bc57e2d19db4da81c1150120658cc3658a99ed4 (diff) |
IB/ipath: Performance improvements via mmap of queues
Improve performance of userspace post receive, post SRQ receive, and
poll CQ operations for ipath by allowing userspace to directly mmap()
receive queues and completion queues. This eliminates the copying
between userspace and the kernel in the data path.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ruc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 138 |
1 files changed, 93 insertions, 45 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 772bc59fb85c..dd09420d677d 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -32,7 +32,7 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include "ipath_verbs.h" | 34 | #include "ipath_verbs.h" |
35 | #include "ipath_common.h" | 35 | #include "ipath_kernel.h" |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Convert the AETH RNR timeout code into the number of milliseconds. | 38 | * Convert the AETH RNR timeout code into the number of milliseconds. |
@@ -106,6 +106,54 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
106 | spin_unlock_irqrestore(&dev->pending_lock, flags); | 106 | spin_unlock_irqrestore(&dev->pending_lock, flags); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | ||
110 | { | ||
111 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
112 | int user = to_ipd(qp->ibqp.pd)->user; | ||
113 | int i, j, ret; | ||
114 | struct ib_wc wc; | ||
115 | |||
116 | qp->r_len = 0; | ||
117 | for (i = j = 0; i < wqe->num_sge; i++) { | ||
118 | if (wqe->sg_list[i].length == 0) | ||
119 | continue; | ||
120 | /* Check LKEY */ | ||
121 | if ((user && wqe->sg_list[i].lkey == 0) || | ||
122 | !ipath_lkey_ok(&dev->lk_table, | ||
123 | &qp->r_sg_list[j], &wqe->sg_list[i], | ||
124 | IB_ACCESS_LOCAL_WRITE)) | ||
125 | goto bad_lkey; | ||
126 | qp->r_len += wqe->sg_list[i].length; | ||
127 | j++; | ||
128 | } | ||
129 | qp->r_sge.sge = qp->r_sg_list[0]; | ||
130 | qp->r_sge.sg_list = qp->r_sg_list + 1; | ||
131 | qp->r_sge.num_sge = j; | ||
132 | ret = 1; | ||
133 | goto bail; | ||
134 | |||
135 | bad_lkey: | ||
136 | wc.wr_id = wqe->wr_id; | ||
137 | wc.status = IB_WC_LOC_PROT_ERR; | ||
138 | wc.opcode = IB_WC_RECV; | ||
139 | wc.vendor_err = 0; | ||
140 | wc.byte_len = 0; | ||
141 | wc.imm_data = 0; | ||
142 | wc.qp_num = qp->ibqp.qp_num; | ||
143 | wc.src_qp = 0; | ||
144 | wc.wc_flags = 0; | ||
145 | wc.pkey_index = 0; | ||
146 | wc.slid = 0; | ||
147 | wc.sl = 0; | ||
148 | wc.dlid_path_bits = 0; | ||
149 | wc.port_num = 0; | ||
150 | /* Signal solicited completion event. */ | ||
151 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | ||
152 | ret = 0; | ||
153 | bail: | ||
154 | return ret; | ||
155 | } | ||
156 | |||
109 | /** | 157 | /** |
110 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE | 158 | * ipath_get_rwqe - copy the next RWQE into the QP's RWQE |
111 | * @qp: the QP | 159 | * @qp: the QP |
@@ -119,71 +167,71 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) | |||
119 | { | 167 | { |
120 | unsigned long flags; | 168 | unsigned long flags; |
121 | struct ipath_rq *rq; | 169 | struct ipath_rq *rq; |
170 | struct ipath_rwq *wq; | ||
122 | struct ipath_srq *srq; | 171 | struct ipath_srq *srq; |
123 | struct ipath_rwqe *wqe; | 172 | struct ipath_rwqe *wqe; |
124 | int ret = 1; | 173 | void (*handler)(struct ib_event *, void *); |
174 | u32 tail; | ||
175 | int ret; | ||
125 | 176 | ||
126 | if (!qp->ibqp.srq) { | 177 | if (qp->ibqp.srq) { |
178 | srq = to_isrq(qp->ibqp.srq); | ||
179 | handler = srq->ibsrq.event_handler; | ||
180 | rq = &srq->rq; | ||
181 | } else { | ||
182 | srq = NULL; | ||
183 | handler = NULL; | ||
127 | rq = &qp->r_rq; | 184 | rq = &qp->r_rq; |
128 | spin_lock_irqsave(&rq->lock, flags); | ||
129 | |||
130 | if (unlikely(rq->tail == rq->head)) { | ||
131 | ret = 0; | ||
132 | goto done; | ||
133 | } | ||
134 | wqe = get_rwqe_ptr(rq, rq->tail); | ||
135 | qp->r_wr_id = wqe->wr_id; | ||
136 | if (!wr_id_only) { | ||
137 | qp->r_sge.sge = wqe->sg_list[0]; | ||
138 | qp->r_sge.sg_list = wqe->sg_list + 1; | ||
139 | qp->r_sge.num_sge = wqe->num_sge; | ||
140 | qp->r_len = wqe->length; | ||
141 | } | ||
142 | if (++rq->tail >= rq->size) | ||
143 | rq->tail = 0; | ||
144 | goto done; | ||
145 | } | 185 | } |
146 | 186 | ||
147 | srq = to_isrq(qp->ibqp.srq); | ||
148 | rq = &srq->rq; | ||
149 | spin_lock_irqsave(&rq->lock, flags); | 187 | spin_lock_irqsave(&rq->lock, flags); |
150 | 188 | wq = rq->wq; | |
151 | if (unlikely(rq->tail == rq->head)) { | 189 | tail = wq->tail; |
152 | ret = 0; | 190 | /* Validate tail before using it since it is user writable. */ |
153 | goto done; | 191 | if (tail >= rq->size) |
154 | } | 192 | tail = 0; |
155 | wqe = get_rwqe_ptr(rq, rq->tail); | 193 | do { |
194 | if (unlikely(tail == wq->head)) { | ||
195 | spin_unlock_irqrestore(&rq->lock, flags); | ||
196 | ret = 0; | ||
197 | goto bail; | ||
198 | } | ||
199 | wqe = get_rwqe_ptr(rq, tail); | ||
200 | if (++tail >= rq->size) | ||
201 | tail = 0; | ||
202 | } while (!wr_id_only && !init_sge(qp, wqe)); | ||
156 | qp->r_wr_id = wqe->wr_id; | 203 | qp->r_wr_id = wqe->wr_id; |
157 | if (!wr_id_only) { | 204 | wq->tail = tail; |
158 | qp->r_sge.sge = wqe->sg_list[0]; | 205 | |
159 | qp->r_sge.sg_list = wqe->sg_list + 1; | 206 | ret = 1; |
160 | qp->r_sge.num_sge = wqe->num_sge; | 207 | if (handler) { |
161 | qp->r_len = wqe->length; | ||
162 | } | ||
163 | if (++rq->tail >= rq->size) | ||
164 | rq->tail = 0; | ||
165 | if (srq->ibsrq.event_handler) { | ||
166 | struct ib_event ev; | ||
167 | u32 n; | 208 | u32 n; |
168 | 209 | ||
169 | if (rq->head < rq->tail) | 210 | /* |
170 | n = rq->size + rq->head - rq->tail; | 211 | * validate head pointer value and compute |
212 | * the number of remaining WQEs. | ||
213 | */ | ||
214 | n = wq->head; | ||
215 | if (n >= rq->size) | ||
216 | n = 0; | ||
217 | if (n < tail) | ||
218 | n += rq->size - tail; | ||
171 | else | 219 | else |
172 | n = rq->head - rq->tail; | 220 | n -= tail; |
173 | if (n < srq->limit) { | 221 | if (n < srq->limit) { |
222 | struct ib_event ev; | ||
223 | |||
174 | srq->limit = 0; | 224 | srq->limit = 0; |
175 | spin_unlock_irqrestore(&rq->lock, flags); | 225 | spin_unlock_irqrestore(&rq->lock, flags); |
176 | ev.device = qp->ibqp.device; | 226 | ev.device = qp->ibqp.device; |
177 | ev.element.srq = qp->ibqp.srq; | 227 | ev.element.srq = qp->ibqp.srq; |
178 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; | 228 | ev.event = IB_EVENT_SRQ_LIMIT_REACHED; |
179 | srq->ibsrq.event_handler(&ev, | 229 | handler(&ev, srq->ibsrq.srq_context); |
180 | srq->ibsrq.srq_context); | ||
181 | goto bail; | 230 | goto bail; |
182 | } | 231 | } |
183 | } | 232 | } |
184 | |||
185 | done: | ||
186 | spin_unlock_irqrestore(&rq->lock, flags); | 233 | spin_unlock_irqrestore(&rq->lock, flags); |
234 | |||
187 | bail: | 235 | bail: |
188 | return ret; | 236 | return ret; |
189 | } | 237 | } |