aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/srq.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-05-08 21:00:38 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-08 21:00:38 -0400
commit225c7b1feef1b41170f7037a5b10a65cd8a42c54 (patch)
tree702a0a2cbba7f1c5b2949d236b4463d486204fdc /drivers/infiniband/hw/mlx4/srq.c
parent1bf66a30421ca772820f489d88c16d0c430d6a67 (diff)
IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters
Add an InfiniBand driver for Mellanox ConnectX adapters. Because these adapters can also be used as ethernet NICs and Fibre Channel HBAs, the driver is split into two modules: mlx4_core: Handles low-level things like device initialization and processing firmware commands. Also controls resource allocation so that the InfiniBand, ethernet and FC functions can share a device without stepping on each other. mlx4_ib: Handles InfiniBand-specific things; plugs into the InfiniBand midlayer. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx4/srq.c')
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c334
1 files changed, 334 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
new file mode 100644
index 00000000000..42ab4a801d6
--- /dev/null
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -0,0 +1,334 @@
1/*
2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx4/qp.h>
34#include <linux/mlx4/srq.h>
35
36#include "mlx4_ib.h"
37#include "user.h"
38
39static void *get_wqe(struct mlx4_ib_srq *srq, int n)
40{
41 int offset = n << srq->msrq.wqe_shift;
42
43 if (srq->buf.nbufs == 1)
44 return srq->buf.u.direct.buf + offset;
45 else
46 return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
47 (offset & (PAGE_SIZE - 1));
48}
49
50static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
51{
52 struct ib_event event;
53 struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
54
55 if (ibsrq->event_handler) {
56 event.device = ibsrq->device;
57 event.element.srq = ibsrq;
58 switch (type) {
59 case MLX4_EVENT_TYPE_SRQ_LIMIT:
60 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
61 break;
62 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
63 event.event = IB_EVENT_SRQ_ERR;
64 break;
65 default:
66 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
67 "on SRQ %06x\n", type, srq->srqn);
68 return;
69 }
70
71 ibsrq->event_handler(&event, ibsrq->srq_context);
72 }
73}
74
75struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
76 struct ib_srq_init_attr *init_attr,
77 struct ib_udata *udata)
78{
79 struct mlx4_ib_dev *dev = to_mdev(pd->device);
80 struct mlx4_ib_srq *srq;
81 struct mlx4_wqe_srq_next_seg *next;
82 int desc_size;
83 int buf_size;
84 int err;
85 int i;
86
87 /* Sanity check SRQ size before proceeding */
88 if (init_attr->attr.max_wr >= dev->dev->caps.max_srq_wqes ||
89 init_attr->attr.max_sge > dev->dev->caps.max_srq_sge)
90 return ERR_PTR(-EINVAL);
91
92 srq = kmalloc(sizeof *srq, GFP_KERNEL);
93 if (!srq)
94 return ERR_PTR(-ENOMEM);
95
96 mutex_init(&srq->mutex);
97 spin_lock_init(&srq->lock);
98 srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
99 srq->msrq.max_gs = init_attr->attr.max_sge;
100
101 desc_size = max(32UL,
102 roundup_pow_of_two(sizeof (struct mlx4_wqe_srq_next_seg) +
103 srq->msrq.max_gs *
104 sizeof (struct mlx4_wqe_data_seg)));
105 srq->msrq.wqe_shift = ilog2(desc_size);
106
107 buf_size = srq->msrq.max * desc_size;
108
109 if (pd->uobject) {
110 struct mlx4_ib_create_srq ucmd;
111
112 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
113 err = -EFAULT;
114 goto err_srq;
115 }
116
117 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
118 buf_size, 0);
119 if (IS_ERR(srq->umem)) {
120 err = PTR_ERR(srq->umem);
121 goto err_srq;
122 }
123
124 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem),
125 ilog2(srq->umem->page_size), &srq->mtt);
126 if (err)
127 goto err_buf;
128
129 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem);
130 if (err)
131 goto err_mtt;
132
133 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
134 ucmd.db_addr, &srq->db);
135 if (err)
136 goto err_mtt;
137 } else {
138 err = mlx4_ib_db_alloc(dev, &srq->db, 0);
139 if (err)
140 goto err_srq;
141
142 *srq->db.db = 0;
143
144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
145 err = -ENOMEM;
146 goto err_db;
147 }
148
149 srq->head = 0;
150 srq->tail = srq->msrq.max - 1;
151 srq->wqe_ctr = 0;
152
153 for (i = 0; i < srq->msrq.max; ++i) {
154 next = get_wqe(srq, i);
155 next->next_wqe_index =
156 cpu_to_be16((i + 1) & (srq->msrq.max - 1));
157 }
158
159 err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
160 &srq->mtt);
161 if (err)
162 goto err_buf;
163
164 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
165 if (err)
166 goto err_mtt;
167
168 srq->wrid = kmalloc(srq->msrq.max * sizeof (u64), GFP_KERNEL);
169 if (!srq->wrid) {
170 err = -ENOMEM;
171 goto err_mtt;
172 }
173 }
174
175 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt,
176 srq->db.dma, &srq->msrq);
177 if (err)
178 goto err_wrid;
179
180 srq->msrq.event = mlx4_ib_srq_event;
181
182 if (pd->uobject)
183 if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof (__u32))) {
184 err = -EFAULT;
185 goto err_wrid;
186 }
187
188 init_attr->attr.max_wr = srq->msrq.max - 1;
189
190 return &srq->ibsrq;
191
192err_wrid:
193 if (pd->uobject)
194 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
195 else
196 kfree(srq->wrid);
197
198err_mtt:
199 mlx4_mtt_cleanup(dev->dev, &srq->mtt);
200
201err_buf:
202 if (pd->uobject)
203 ib_umem_release(srq->umem);
204 else
205 mlx4_buf_free(dev->dev, buf_size, &srq->buf);
206
207err_db:
208 if (!pd->uobject)
209 mlx4_ib_db_free(dev, &srq->db);
210
211err_srq:
212 kfree(srq);
213
214 return ERR_PTR(err);
215}
216
217int mlx4_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
218 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
219{
220 struct mlx4_ib_dev *dev = to_mdev(ibsrq->device);
221 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
222 int ret;
223
224 /* We don't support resizing SRQs (yet?) */
225 if (attr_mask & IB_SRQ_MAX_WR)
226 return -EINVAL;
227
228 if (attr_mask & IB_SRQ_LIMIT) {
229 if (attr->srq_limit >= srq->msrq.max)
230 return -EINVAL;
231
232 mutex_lock(&srq->mutex);
233 ret = mlx4_srq_arm(dev->dev, &srq->msrq, attr->srq_limit);
234 mutex_unlock(&srq->mutex);
235
236 if (ret)
237 return ret;
238 }
239
240 return 0;
241}
242
243int mlx4_ib_destroy_srq(struct ib_srq *srq)
244{
245 struct mlx4_ib_dev *dev = to_mdev(srq->device);
246 struct mlx4_ib_srq *msrq = to_msrq(srq);
247
248 mlx4_srq_free(dev->dev, &msrq->msrq);
249 mlx4_mtt_cleanup(dev->dev, &msrq->mtt);
250
251 if (srq->uobject) {
252 mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
253 ib_umem_release(msrq->umem);
254 } else {
255 kfree(msrq->wrid);
256 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
257 &msrq->buf);
258 mlx4_ib_db_free(dev, &msrq->db);
259 }
260
261 kfree(msrq);
262
263 return 0;
264}
265
266void mlx4_ib_free_srq_wqe(struct mlx4_ib_srq *srq, int wqe_index)
267{
268 struct mlx4_wqe_srq_next_seg *next;
269
270 /* always called with interrupts disabled. */
271 spin_lock(&srq->lock);
272
273 next = get_wqe(srq, srq->tail);
274 next->next_wqe_index = cpu_to_be16(wqe_index);
275 srq->tail = wqe_index;
276
277 spin_unlock(&srq->lock);
278}
279
280int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
281 struct ib_recv_wr **bad_wr)
282{
283 struct mlx4_ib_srq *srq = to_msrq(ibsrq);
284 struct mlx4_wqe_srq_next_seg *next;
285 struct mlx4_wqe_data_seg *scat;
286 unsigned long flags;
287 int err = 0;
288 int nreq;
289 int i;
290
291 spin_lock_irqsave(&srq->lock, flags);
292
293 for (nreq = 0; wr; ++nreq, wr = wr->next) {
294 if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
295 err = -EINVAL;
296 *bad_wr = wr;
297 break;
298 }
299
300 srq->wrid[srq->head] = wr->wr_id;
301
302 next = get_wqe(srq, srq->head);
303 srq->head = be16_to_cpu(next->next_wqe_index);
304 scat = (struct mlx4_wqe_data_seg *) (next + 1);
305
306 for (i = 0; i < wr->num_sge; ++i) {
307 scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
308 scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
309 scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
310 }
311
312 if (i < srq->msrq.max_gs) {
313 scat[i].byte_count = 0;
314 scat[i].lkey = cpu_to_be32(MLX4_INVALID_LKEY);
315 scat[i].addr = 0;
316 }
317 }
318
319 if (likely(nreq)) {
320 srq->wqe_ctr += nreq;
321
322 /*
323 * Make sure that descriptors are written before
324 * doorbell record.
325 */
326 wmb();
327
328 *srq->db.db = cpu_to_be32(srq->wqe_ctr);
329 }
330
331 spin_unlock_irqrestore(&srq->lock, flags);
332
333 return err;
334}