diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_srq.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_srq.c | 375 |
1 files changed, 375 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c new file mode 100644 index 000000000000..c3ec8efc2ed8 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_srq.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include <linux/err.h> | ||
35 | #include <linux/slab.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | |||
38 | #include "qib_verbs.h" | ||
39 | |||
40 | /** | ||
41 | * qib_post_srq_receive - post a receive on a shared receive queue | ||
42 | * @ibsrq: the SRQ to post the receive on | ||
43 | * @wr: the list of work requests to post | ||
44 | * @bad_wr: A pointer to the first WR to cause a problem is put here | ||
45 | * | ||
46 | * This may be called from interrupt context. | ||
47 | */ | ||
48 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
49 | struct ib_recv_wr **bad_wr) | ||
50 | { | ||
51 | struct qib_srq *srq = to_isrq(ibsrq); | ||
52 | struct qib_rwq *wq; | ||
53 | unsigned long flags; | ||
54 | int ret; | ||
55 | |||
56 | for (; wr; wr = wr->next) { | ||
57 | struct qib_rwqe *wqe; | ||
58 | u32 next; | ||
59 | int i; | ||
60 | |||
61 | if ((unsigned) wr->num_sge > srq->rq.max_sge) { | ||
62 | *bad_wr = wr; | ||
63 | ret = -EINVAL; | ||
64 | goto bail; | ||
65 | } | ||
66 | |||
67 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
68 | wq = srq->rq.wq; | ||
69 | next = wq->head + 1; | ||
70 | if (next >= srq->rq.size) | ||
71 | next = 0; | ||
72 | if (next == wq->tail) { | ||
73 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
74 | *bad_wr = wr; | ||
75 | ret = -ENOMEM; | ||
76 | goto bail; | ||
77 | } | ||
78 | |||
79 | wqe = get_rwqe_ptr(&srq->rq, wq->head); | ||
80 | wqe->wr_id = wr->wr_id; | ||
81 | wqe->num_sge = wr->num_sge; | ||
82 | for (i = 0; i < wr->num_sge; i++) | ||
83 | wqe->sg_list[i] = wr->sg_list[i]; | ||
84 | /* Make sure queue entry is written before the head index. */ | ||
85 | smp_wmb(); | ||
86 | wq->head = next; | ||
87 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
88 | } | ||
89 | ret = 0; | ||
90 | |||
91 | bail: | ||
92 | return ret; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * qib_create_srq - create a shared receive queue | ||
97 | * @ibpd: the protection domain of the SRQ to create | ||
98 | * @srq_init_attr: the attributes of the SRQ | ||
99 | * @udata: data from libibverbs when creating a user SRQ | ||
100 | */ | ||
101 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | ||
102 | struct ib_srq_init_attr *srq_init_attr, | ||
103 | struct ib_udata *udata) | ||
104 | { | ||
105 | struct qib_ibdev *dev = to_idev(ibpd->device); | ||
106 | struct qib_srq *srq; | ||
107 | u32 sz; | ||
108 | struct ib_srq *ret; | ||
109 | |||
110 | if (srq_init_attr->attr.max_sge == 0 || | ||
111 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || | ||
112 | srq_init_attr->attr.max_wr == 0 || | ||
113 | srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) { | ||
114 | ret = ERR_PTR(-EINVAL); | ||
115 | goto done; | ||
116 | } | ||
117 | |||
118 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||
119 | if (!srq) { | ||
120 | ret = ERR_PTR(-ENOMEM); | ||
121 | goto done; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Need to use vmalloc() if we want to support large #s of entries. | ||
126 | */ | ||
127 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | ||
128 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | ||
129 | sz = sizeof(struct ib_sge) * srq->rq.max_sge + | ||
130 | sizeof(struct qib_rwqe); | ||
131 | srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); | ||
132 | if (!srq->rq.wq) { | ||
133 | ret = ERR_PTR(-ENOMEM); | ||
134 | goto bail_srq; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * Return the address of the RWQ as the offset to mmap. | ||
139 | * See qib_mmap() for details. | ||
140 | */ | ||
141 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
142 | int err; | ||
143 | u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz; | ||
144 | |||
145 | srq->ip = | ||
146 | qib_create_mmap_info(dev, s, ibpd->uobject->context, | ||
147 | srq->rq.wq); | ||
148 | if (!srq->ip) { | ||
149 | ret = ERR_PTR(-ENOMEM); | ||
150 | goto bail_wq; | ||
151 | } | ||
152 | |||
153 | err = ib_copy_to_udata(udata, &srq->ip->offset, | ||
154 | sizeof(srq->ip->offset)); | ||
155 | if (err) { | ||
156 | ret = ERR_PTR(err); | ||
157 | goto bail_ip; | ||
158 | } | ||
159 | } else | ||
160 | srq->ip = NULL; | ||
161 | |||
162 | /* | ||
163 | * ib_create_srq() will initialize srq->ibsrq. | ||
164 | */ | ||
165 | spin_lock_init(&srq->rq.lock); | ||
166 | srq->rq.wq->head = 0; | ||
167 | srq->rq.wq->tail = 0; | ||
168 | srq->limit = srq_init_attr->attr.srq_limit; | ||
169 | |||
170 | spin_lock(&dev->n_srqs_lock); | ||
171 | if (dev->n_srqs_allocated == ib_qib_max_srqs) { | ||
172 | spin_unlock(&dev->n_srqs_lock); | ||
173 | ret = ERR_PTR(-ENOMEM); | ||
174 | goto bail_ip; | ||
175 | } | ||
176 | |||
177 | dev->n_srqs_allocated++; | ||
178 | spin_unlock(&dev->n_srqs_lock); | ||
179 | |||
180 | if (srq->ip) { | ||
181 | spin_lock_irq(&dev->pending_lock); | ||
182 | list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); | ||
183 | spin_unlock_irq(&dev->pending_lock); | ||
184 | } | ||
185 | |||
186 | ret = &srq->ibsrq; | ||
187 | goto done; | ||
188 | |||
189 | bail_ip: | ||
190 | kfree(srq->ip); | ||
191 | bail_wq: | ||
192 | vfree(srq->rq.wq); | ||
193 | bail_srq: | ||
194 | kfree(srq); | ||
195 | done: | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * qib_modify_srq - modify a shared receive queue | ||
201 | * @ibsrq: the SRQ to modify | ||
202 | * @attr: the new attributes of the SRQ | ||
203 | * @attr_mask: indicates which attributes to modify | ||
204 | * @udata: user data for libibverbs.so | ||
205 | */ | ||
206 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
207 | enum ib_srq_attr_mask attr_mask, | ||
208 | struct ib_udata *udata) | ||
209 | { | ||
210 | struct qib_srq *srq = to_isrq(ibsrq); | ||
211 | struct qib_rwq *wq; | ||
212 | int ret = 0; | ||
213 | |||
214 | if (attr_mask & IB_SRQ_MAX_WR) { | ||
215 | struct qib_rwq *owq; | ||
216 | struct qib_rwqe *p; | ||
217 | u32 sz, size, n, head, tail; | ||
218 | |||
219 | /* Check that the requested sizes are below the limits. */ | ||
220 | if ((attr->max_wr > ib_qib_max_srq_wrs) || | ||
221 | ((attr_mask & IB_SRQ_LIMIT) ? | ||
222 | attr->srq_limit : srq->limit) > attr->max_wr) { | ||
223 | ret = -EINVAL; | ||
224 | goto bail; | ||
225 | } | ||
226 | |||
227 | sz = sizeof(struct qib_rwqe) + | ||
228 | srq->rq.max_sge * sizeof(struct ib_sge); | ||
229 | size = attr->max_wr + 1; | ||
230 | wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); | ||
231 | if (!wq) { | ||
232 | ret = -ENOMEM; | ||
233 | goto bail; | ||
234 | } | ||
235 | |||
236 | /* Check that we can write the offset to mmap. */ | ||
237 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
238 | __u64 offset_addr; | ||
239 | __u64 offset = 0; | ||
240 | |||
241 | ret = ib_copy_from_udata(&offset_addr, udata, | ||
242 | sizeof(offset_addr)); | ||
243 | if (ret) | ||
244 | goto bail_free; | ||
245 | udata->outbuf = | ||
246 | (void __user *) (unsigned long) offset_addr; | ||
247 | ret = ib_copy_to_udata(udata, &offset, | ||
248 | sizeof(offset)); | ||
249 | if (ret) | ||
250 | goto bail_free; | ||
251 | } | ||
252 | |||
253 | spin_lock_irq(&srq->rq.lock); | ||
254 | /* | ||
255 | * validate head and tail pointer values and compute | ||
256 | * the number of remaining WQEs. | ||
257 | */ | ||
258 | owq = srq->rq.wq; | ||
259 | head = owq->head; | ||
260 | tail = owq->tail; | ||
261 | if (head >= srq->rq.size || tail >= srq->rq.size) { | ||
262 | ret = -EINVAL; | ||
263 | goto bail_unlock; | ||
264 | } | ||
265 | n = head; | ||
266 | if (n < tail) | ||
267 | n += srq->rq.size - tail; | ||
268 | else | ||
269 | n -= tail; | ||
270 | if (size <= n) { | ||
271 | ret = -EINVAL; | ||
272 | goto bail_unlock; | ||
273 | } | ||
274 | n = 0; | ||
275 | p = wq->wq; | ||
276 | while (tail != head) { | ||
277 | struct qib_rwqe *wqe; | ||
278 | int i; | ||
279 | |||
280 | wqe = get_rwqe_ptr(&srq->rq, tail); | ||
281 | p->wr_id = wqe->wr_id; | ||
282 | p->num_sge = wqe->num_sge; | ||
283 | for (i = 0; i < wqe->num_sge; i++) | ||
284 | p->sg_list[i] = wqe->sg_list[i]; | ||
285 | n++; | ||
286 | p = (struct qib_rwqe *)((char *) p + sz); | ||
287 | if (++tail >= srq->rq.size) | ||
288 | tail = 0; | ||
289 | } | ||
290 | srq->rq.wq = wq; | ||
291 | srq->rq.size = size; | ||
292 | wq->head = n; | ||
293 | wq->tail = 0; | ||
294 | if (attr_mask & IB_SRQ_LIMIT) | ||
295 | srq->limit = attr->srq_limit; | ||
296 | spin_unlock_irq(&srq->rq.lock); | ||
297 | |||
298 | vfree(owq); | ||
299 | |||
300 | if (srq->ip) { | ||
301 | struct qib_mmap_info *ip = srq->ip; | ||
302 | struct qib_ibdev *dev = to_idev(srq->ibsrq.device); | ||
303 | u32 s = sizeof(struct qib_rwq) + size * sz; | ||
304 | |||
305 | qib_update_mmap_info(dev, ip, s, wq); | ||
306 | |||
307 | /* | ||
308 | * Return the offset to mmap. | ||
309 | * See qib_mmap() for details. | ||
310 | */ | ||
311 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
312 | ret = ib_copy_to_udata(udata, &ip->offset, | ||
313 | sizeof(ip->offset)); | ||
314 | if (ret) | ||
315 | goto bail; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * Put user mapping info onto the pending list | ||
320 | * unless it already is on the list. | ||
321 | */ | ||
322 | spin_lock_irq(&dev->pending_lock); | ||
323 | if (list_empty(&ip->pending_mmaps)) | ||
324 | list_add(&ip->pending_mmaps, | ||
325 | &dev->pending_mmaps); | ||
326 | spin_unlock_irq(&dev->pending_lock); | ||
327 | } | ||
328 | } else if (attr_mask & IB_SRQ_LIMIT) { | ||
329 | spin_lock_irq(&srq->rq.lock); | ||
330 | if (attr->srq_limit >= srq->rq.size) | ||
331 | ret = -EINVAL; | ||
332 | else | ||
333 | srq->limit = attr->srq_limit; | ||
334 | spin_unlock_irq(&srq->rq.lock); | ||
335 | } | ||
336 | goto bail; | ||
337 | |||
338 | bail_unlock: | ||
339 | spin_unlock_irq(&srq->rq.lock); | ||
340 | bail_free: | ||
341 | vfree(wq); | ||
342 | bail: | ||
343 | return ret; | ||
344 | } | ||
345 | |||
346 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | ||
347 | { | ||
348 | struct qib_srq *srq = to_isrq(ibsrq); | ||
349 | |||
350 | attr->max_wr = srq->rq.size - 1; | ||
351 | attr->max_sge = srq->rq.max_sge; | ||
352 | attr->srq_limit = srq->limit; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * qib_destroy_srq - destroy a shared receive queue | ||
358 | * @ibsrq: the SRQ to destroy | ||
359 | */ | ||
360 | int qib_destroy_srq(struct ib_srq *ibsrq) | ||
361 | { | ||
362 | struct qib_srq *srq = to_isrq(ibsrq); | ||
363 | struct qib_ibdev *dev = to_idev(ibsrq->device); | ||
364 | |||
365 | spin_lock(&dev->n_srqs_lock); | ||
366 | dev->n_srqs_allocated--; | ||
367 | spin_unlock(&dev->n_srqs_lock); | ||
368 | if (srq->ip) | ||
369 | kref_put(&srq->ip->ref, qib_release_mmap_info); | ||
370 | else | ||
371 | vfree(srq->rq.wq); | ||
372 | kfree(srq); | ||
373 | |||
374 | return 0; | ||
375 | } | ||