diff options
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_provider.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 1203 |
1 files changed, 1203 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c new file mode 100644 index 00000000000..6861087d776 --- /dev/null +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -0,0 +1,1203 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006 Chelsio, Inc. All rights reserved. | ||
3 | * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/moduleparam.h> | ||
35 | #include <linux/device.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/delay.h> | ||
39 | #include <linux/errno.h> | ||
40 | #include <linux/list.h> | ||
41 | #include <linux/spinlock.h> | ||
42 | #include <linux/ethtool.h> | ||
43 | |||
44 | #include <asm/io.h> | ||
45 | #include <asm/irq.h> | ||
46 | #include <asm/byteorder.h> | ||
47 | |||
48 | #include <rdma/iw_cm.h> | ||
49 | #include <rdma/ib_verbs.h> | ||
50 | #include <rdma/ib_smi.h> | ||
51 | #include <rdma/ib_user_verbs.h> | ||
52 | |||
53 | #include "cxio_hal.h" | ||
54 | #include "iwch.h" | ||
55 | #include "iwch_provider.h" | ||
56 | #include "iwch_cm.h" | ||
57 | #include "iwch_user.h" | ||
58 | |||
59 | static int iwch_modify_port(struct ib_device *ibdev, | ||
60 | u8 port, int port_modify_mask, | ||
61 | struct ib_port_modify *props) | ||
62 | { | ||
63 | return -ENOSYS; | ||
64 | } | ||
65 | |||
66 | static struct ib_ah *iwch_ah_create(struct ib_pd *pd, | ||
67 | struct ib_ah_attr *ah_attr) | ||
68 | { | ||
69 | return ERR_PTR(-ENOSYS); | ||
70 | } | ||
71 | |||
72 | static int iwch_ah_destroy(struct ib_ah *ah) | ||
73 | { | ||
74 | return -ENOSYS; | ||
75 | } | ||
76 | |||
77 | static int iwch_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
78 | { | ||
79 | return -ENOSYS; | ||
80 | } | ||
81 | |||
82 | static int iwch_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
83 | { | ||
84 | return -ENOSYS; | ||
85 | } | ||
86 | |||
87 | static int iwch_process_mad(struct ib_device *ibdev, | ||
88 | int mad_flags, | ||
89 | u8 port_num, | ||
90 | struct ib_wc *in_wc, | ||
91 | struct ib_grh *in_grh, | ||
92 | struct ib_mad *in_mad, struct ib_mad *out_mad) | ||
93 | { | ||
94 | return -ENOSYS; | ||
95 | } | ||
96 | |||
97 | static int iwch_dealloc_ucontext(struct ib_ucontext *context) | ||
98 | { | ||
99 | struct iwch_dev *rhp = to_iwch_dev(context->device); | ||
100 | struct iwch_ucontext *ucontext = to_iwch_ucontext(context); | ||
101 | struct iwch_mm_entry *mm, *tmp; | ||
102 | |||
103 | PDBG("%s context %p\n", __FUNCTION__, context); | ||
104 | list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) | ||
105 | kfree(mm); | ||
106 | cxio_release_ucontext(&rhp->rdev, &ucontext->uctx); | ||
107 | kfree(ucontext); | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | static struct ib_ucontext *iwch_alloc_ucontext(struct ib_device *ibdev, | ||
112 | struct ib_udata *udata) | ||
113 | { | ||
114 | struct iwch_ucontext *context; | ||
115 | struct iwch_dev *rhp = to_iwch_dev(ibdev); | ||
116 | |||
117 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | ||
118 | context = kzalloc(sizeof(*context), GFP_KERNEL); | ||
119 | if (!context) | ||
120 | return ERR_PTR(-ENOMEM); | ||
121 | cxio_init_ucontext(&rhp->rdev, &context->uctx); | ||
122 | INIT_LIST_HEAD(&context->mmaps); | ||
123 | spin_lock_init(&context->mmap_lock); | ||
124 | return &context->ibucontext; | ||
125 | } | ||
126 | |||
127 | static int iwch_destroy_cq(struct ib_cq *ib_cq) | ||
128 | { | ||
129 | struct iwch_cq *chp; | ||
130 | |||
131 | PDBG("%s ib_cq %p\n", __FUNCTION__, ib_cq); | ||
132 | chp = to_iwch_cq(ib_cq); | ||
133 | |||
134 | remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); | ||
135 | atomic_dec(&chp->refcnt); | ||
136 | wait_event(chp->wait, !atomic_read(&chp->refcnt)); | ||
137 | |||
138 | cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); | ||
139 | kfree(chp); | ||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, | ||
144 | struct ib_ucontext *ib_context, | ||
145 | struct ib_udata *udata) | ||
146 | { | ||
147 | struct iwch_dev *rhp; | ||
148 | struct iwch_cq *chp; | ||
149 | struct iwch_create_cq_resp uresp; | ||
150 | struct iwch_create_cq_req ureq; | ||
151 | struct iwch_ucontext *ucontext = NULL; | ||
152 | |||
153 | PDBG("%s ib_dev %p entries %d\n", __FUNCTION__, ibdev, entries); | ||
154 | rhp = to_iwch_dev(ibdev); | ||
155 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); | ||
156 | if (!chp) | ||
157 | return ERR_PTR(-ENOMEM); | ||
158 | |||
159 | if (ib_context) { | ||
160 | ucontext = to_iwch_ucontext(ib_context); | ||
161 | if (!t3a_device(rhp)) { | ||
162 | if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) { | ||
163 | kfree(chp); | ||
164 | return ERR_PTR(-EFAULT); | ||
165 | } | ||
166 | chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | if (t3a_device(rhp)) { | ||
171 | |||
172 | /* | ||
173 | * T3A: Add some fluff to handle extra CQEs inserted | ||
174 | * for various errors. | ||
175 | * Additional CQE possibilities: | ||
176 | * TERMINATE, | ||
177 | * incoming RDMA WRITE Failures | ||
178 | * incoming RDMA READ REQUEST FAILUREs | ||
179 | * NOTE: We cannot ensure the CQ won't overflow. | ||
180 | */ | ||
181 | entries += 16; | ||
182 | } | ||
183 | entries = roundup_pow_of_two(entries); | ||
184 | chp->cq.size_log2 = ilog2(entries); | ||
185 | |||
186 | if (cxio_create_cq(&rhp->rdev, &chp->cq)) { | ||
187 | kfree(chp); | ||
188 | return ERR_PTR(-ENOMEM); | ||
189 | } | ||
190 | chp->rhp = rhp; | ||
191 | chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; | ||
192 | spin_lock_init(&chp->lock); | ||
193 | atomic_set(&chp->refcnt, 1); | ||
194 | init_waitqueue_head(&chp->wait); | ||
195 | insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); | ||
196 | |||
197 | if (ucontext) { | ||
198 | struct iwch_mm_entry *mm; | ||
199 | |||
200 | mm = kmalloc(sizeof *mm, GFP_KERNEL); | ||
201 | if (!mm) { | ||
202 | iwch_destroy_cq(&chp->ibcq); | ||
203 | return ERR_PTR(-ENOMEM); | ||
204 | } | ||
205 | uresp.cqid = chp->cq.cqid; | ||
206 | uresp.size_log2 = chp->cq.size_log2; | ||
207 | spin_lock(&ucontext->mmap_lock); | ||
208 | uresp.key = ucontext->key; | ||
209 | ucontext->key += PAGE_SIZE; | ||
210 | spin_unlock(&ucontext->mmap_lock); | ||
211 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | ||
212 | kfree(mm); | ||
213 | iwch_destroy_cq(&chp->ibcq); | ||
214 | return ERR_PTR(-EFAULT); | ||
215 | } | ||
216 | mm->key = uresp.key; | ||
217 | mm->addr = virt_to_phys(chp->cq.queue); | ||
218 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | ||
219 | sizeof (struct t3_cqe)); | ||
220 | insert_mmap(ucontext, mm); | ||
221 | } | ||
222 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", | ||
223 | chp->cq.cqid, chp, (1 << chp->cq.size_log2), | ||
224 | (unsigned long long) chp->cq.dma_addr); | ||
225 | return &chp->ibcq; | ||
226 | } | ||
227 | |||
228 | static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) | ||
229 | { | ||
230 | #ifdef notyet | ||
231 | struct iwch_cq *chp = to_iwch_cq(cq); | ||
232 | struct t3_cq oldcq, newcq; | ||
233 | int ret; | ||
234 | |||
235 | PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__, cq, cqe); | ||
236 | |||
237 | /* We don't downsize... */ | ||
238 | if (cqe <= cq->cqe) | ||
239 | return 0; | ||
240 | |||
241 | /* create new t3_cq with new size */ | ||
242 | cqe = roundup_pow_of_two(cqe+1); | ||
243 | newcq.size_log2 = ilog2(cqe); | ||
244 | |||
245 | /* Dont allow resize to less than the current wce count */ | ||
246 | if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) { | ||
247 | return -ENOMEM; | ||
248 | } | ||
249 | |||
250 | /* Quiesce all QPs using this CQ */ | ||
251 | ret = iwch_quiesce_qps(chp); | ||
252 | if (ret) { | ||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | ret = cxio_create_cq(&chp->rhp->rdev, &newcq); | ||
257 | if (ret) { | ||
258 | return ret; | ||
259 | } | ||
260 | |||
261 | /* copy CQEs */ | ||
262 | memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) * | ||
263 | sizeof(struct t3_cqe)); | ||
264 | |||
265 | /* old iwch_qp gets new t3_cq but keeps old cqid */ | ||
266 | oldcq = chp->cq; | ||
267 | chp->cq = newcq; | ||
268 | chp->cq.cqid = oldcq.cqid; | ||
269 | |||
270 | /* resize new t3_cq to update the HW context */ | ||
271 | ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq); | ||
272 | if (ret) { | ||
273 | chp->cq = oldcq; | ||
274 | return ret; | ||
275 | } | ||
276 | chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1; | ||
277 | |||
278 | /* destroy old t3_cq */ | ||
279 | oldcq.cqid = newcq.cqid; | ||
280 | ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq); | ||
281 | if (ret) { | ||
282 | printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n", | ||
283 | __FUNCTION__, ret); | ||
284 | } | ||
285 | |||
286 | /* add user hooks here */ | ||
287 | |||
288 | /* resume qps */ | ||
289 | ret = iwch_resume_qps(chp); | ||
290 | return ret; | ||
291 | #else | ||
292 | return -ENOSYS; | ||
293 | #endif | ||
294 | } | ||
295 | |||
296 | static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) | ||
297 | { | ||
298 | struct iwch_dev *rhp; | ||
299 | struct iwch_cq *chp; | ||
300 | enum t3_cq_opcode cq_op; | ||
301 | int err; | ||
302 | unsigned long flag; | ||
303 | u32 rptr; | ||
304 | |||
305 | chp = to_iwch_cq(ibcq); | ||
306 | rhp = chp->rhp; | ||
307 | if (notify == IB_CQ_SOLICITED) | ||
308 | cq_op = CQ_ARM_SE; | ||
309 | else | ||
310 | cq_op = CQ_ARM_AN; | ||
311 | if (chp->user_rptr_addr) { | ||
312 | if (get_user(rptr, chp->user_rptr_addr)) | ||
313 | return -EFAULT; | ||
314 | spin_lock_irqsave(&chp->lock, flag); | ||
315 | chp->cq.rptr = rptr; | ||
316 | } else | ||
317 | spin_lock_irqsave(&chp->lock, flag); | ||
318 | PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); | ||
319 | err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); | ||
320 | spin_unlock_irqrestore(&chp->lock, flag); | ||
321 | if (err) | ||
322 | printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, | ||
323 | chp->cq.cqid); | ||
324 | return err; | ||
325 | } | ||
326 | |||
327 | static int iwch_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | ||
328 | { | ||
329 | int len = vma->vm_end - vma->vm_start; | ||
330 | u32 key = vma->vm_pgoff << PAGE_SHIFT; | ||
331 | struct cxio_rdev *rdev_p; | ||
332 | int ret = 0; | ||
333 | struct iwch_mm_entry *mm; | ||
334 | struct iwch_ucontext *ucontext; | ||
335 | |||
336 | PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__, vma->vm_pgoff, | ||
337 | key, len); | ||
338 | |||
339 | if (vma->vm_start & (PAGE_SIZE-1)) { | ||
340 | return -EINVAL; | ||
341 | } | ||
342 | |||
343 | rdev_p = &(to_iwch_dev(context->device)->rdev); | ||
344 | ucontext = to_iwch_ucontext(context); | ||
345 | |||
346 | mm = remove_mmap(ucontext, key, len); | ||
347 | if (!mm) | ||
348 | return -EINVAL; | ||
349 | kfree(mm); | ||
350 | |||
351 | if ((mm->addr >= rdev_p->rnic_info.udbell_physbase) && | ||
352 | (mm->addr < (rdev_p->rnic_info.udbell_physbase + | ||
353 | rdev_p->rnic_info.udbell_len))) { | ||
354 | |||
355 | /* | ||
356 | * Map T3 DB register. | ||
357 | */ | ||
358 | if (vma->vm_flags & VM_READ) { | ||
359 | return -EPERM; | ||
360 | } | ||
361 | |||
362 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
363 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
364 | vma->vm_flags &= ~VM_MAYREAD; | ||
365 | ret = io_remap_pfn_range(vma, vma->vm_start, | ||
366 | mm->addr >> PAGE_SHIFT, | ||
367 | len, vma->vm_page_prot); | ||
368 | } else { | ||
369 | |||
370 | /* | ||
371 | * Map WQ or CQ contig dma memory... | ||
372 | */ | ||
373 | ret = remap_pfn_range(vma, vma->vm_start, | ||
374 | mm->addr >> PAGE_SHIFT, | ||
375 | len, vma->vm_page_prot); | ||
376 | } | ||
377 | |||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | static int iwch_deallocate_pd(struct ib_pd *pd) | ||
382 | { | ||
383 | struct iwch_dev *rhp; | ||
384 | struct iwch_pd *php; | ||
385 | |||
386 | php = to_iwch_pd(pd); | ||
387 | rhp = php->rhp; | ||
388 | PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__, pd, php->pdid); | ||
389 | cxio_hal_put_pdid(rhp->rdev.rscp, php->pdid); | ||
390 | kfree(php); | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | static struct ib_pd *iwch_allocate_pd(struct ib_device *ibdev, | ||
395 | struct ib_ucontext *context, | ||
396 | struct ib_udata *udata) | ||
397 | { | ||
398 | struct iwch_pd *php; | ||
399 | u32 pdid; | ||
400 | struct iwch_dev *rhp; | ||
401 | |||
402 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | ||
403 | rhp = (struct iwch_dev *) ibdev; | ||
404 | pdid = cxio_hal_get_pdid(rhp->rdev.rscp); | ||
405 | if (!pdid) | ||
406 | return ERR_PTR(-EINVAL); | ||
407 | php = kzalloc(sizeof(*php), GFP_KERNEL); | ||
408 | if (!php) { | ||
409 | cxio_hal_put_pdid(rhp->rdev.rscp, pdid); | ||
410 | return ERR_PTR(-ENOMEM); | ||
411 | } | ||
412 | php->pdid = pdid; | ||
413 | php->rhp = rhp; | ||
414 | if (context) { | ||
415 | if (ib_copy_to_udata(udata, &php->pdid, sizeof (__u32))) { | ||
416 | iwch_deallocate_pd(&php->ibpd); | ||
417 | return ERR_PTR(-EFAULT); | ||
418 | } | ||
419 | } | ||
420 | PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__, pdid, php); | ||
421 | return &php->ibpd; | ||
422 | } | ||
423 | |||
424 | static int iwch_dereg_mr(struct ib_mr *ib_mr) | ||
425 | { | ||
426 | struct iwch_dev *rhp; | ||
427 | struct iwch_mr *mhp; | ||
428 | u32 mmid; | ||
429 | |||
430 | PDBG("%s ib_mr %p\n", __FUNCTION__, ib_mr); | ||
431 | /* There can be no memory windows */ | ||
432 | if (atomic_read(&ib_mr->usecnt)) | ||
433 | return -EINVAL; | ||
434 | |||
435 | mhp = to_iwch_mr(ib_mr); | ||
436 | rhp = mhp->rhp; | ||
437 | mmid = mhp->attr.stag >> 8; | ||
438 | cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | ||
439 | mhp->attr.pbl_addr); | ||
440 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
441 | if (mhp->kva) | ||
442 | kfree((void *) (unsigned long) mhp->kva); | ||
443 | PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__, mmid, mhp); | ||
444 | kfree(mhp); | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | static struct ib_mr *iwch_register_phys_mem(struct ib_pd *pd, | ||
449 | struct ib_phys_buf *buffer_list, | ||
450 | int num_phys_buf, | ||
451 | int acc, | ||
452 | u64 *iova_start) | ||
453 | { | ||
454 | __be64 *page_list; | ||
455 | int shift; | ||
456 | u64 total_size; | ||
457 | int npages; | ||
458 | struct iwch_dev *rhp; | ||
459 | struct iwch_pd *php; | ||
460 | struct iwch_mr *mhp; | ||
461 | int ret; | ||
462 | |||
463 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | ||
464 | php = to_iwch_pd(pd); | ||
465 | rhp = php->rhp; | ||
466 | |||
467 | acc = iwch_convert_access(acc); | ||
468 | |||
469 | |||
470 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | ||
471 | if (!mhp) | ||
472 | return ERR_PTR(-ENOMEM); | ||
473 | |||
474 | /* First check that we have enough alignment */ | ||
475 | if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { | ||
476 | ret = -EINVAL; | ||
477 | goto err; | ||
478 | } | ||
479 | |||
480 | if (num_phys_buf > 1 && | ||
481 | ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { | ||
482 | ret = -EINVAL; | ||
483 | goto err; | ||
484 | } | ||
485 | |||
486 | ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, | ||
487 | &total_size, &npages, &shift, &page_list); | ||
488 | if (ret) | ||
489 | goto err; | ||
490 | |||
491 | mhp->rhp = rhp; | ||
492 | mhp->attr.pdid = php->pdid; | ||
493 | mhp->attr.zbva = 0; | ||
494 | |||
495 | /* NOTE: TPT perms are backwards from BIND WR perms! */ | ||
496 | mhp->attr.perms = (acc & 0x1) << 3; | ||
497 | mhp->attr.perms |= (acc & 0x2) << 1; | ||
498 | mhp->attr.perms |= (acc & 0x4) >> 1; | ||
499 | mhp->attr.perms |= (acc & 0x8) >> 3; | ||
500 | |||
501 | mhp->attr.va_fbo = *iova_start; | ||
502 | mhp->attr.page_size = shift - 12; | ||
503 | |||
504 | mhp->attr.len = (u32) total_size; | ||
505 | mhp->attr.pbl_size = npages; | ||
506 | ret = iwch_register_mem(rhp, php, mhp, shift, page_list); | ||
507 | kfree(page_list); | ||
508 | if (ret) { | ||
509 | goto err; | ||
510 | } | ||
511 | return &mhp->ibmr; | ||
512 | err: | ||
513 | kfree(mhp); | ||
514 | return ERR_PTR(ret); | ||
515 | |||
516 | } | ||
517 | |||
518 | static int iwch_reregister_phys_mem(struct ib_mr *mr, | ||
519 | int mr_rereg_mask, | ||
520 | struct ib_pd *pd, | ||
521 | struct ib_phys_buf *buffer_list, | ||
522 | int num_phys_buf, | ||
523 | int acc, u64 * iova_start) | ||
524 | { | ||
525 | |||
526 | struct iwch_mr mh, *mhp; | ||
527 | struct iwch_pd *php; | ||
528 | struct iwch_dev *rhp; | ||
529 | int new_acc; | ||
530 | __be64 *page_list = NULL; | ||
531 | int shift = 0; | ||
532 | u64 total_size; | ||
533 | int npages; | ||
534 | int ret; | ||
535 | |||
536 | PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__, mr, pd); | ||
537 | |||
538 | /* There can be no memory windows */ | ||
539 | if (atomic_read(&mr->usecnt)) | ||
540 | return -EINVAL; | ||
541 | |||
542 | mhp = to_iwch_mr(mr); | ||
543 | rhp = mhp->rhp; | ||
544 | php = to_iwch_pd(mr->pd); | ||
545 | |||
546 | /* make sure we are on the same adapter */ | ||
547 | if (rhp != php->rhp) | ||
548 | return -EINVAL; | ||
549 | |||
550 | new_acc = mhp->attr.perms; | ||
551 | |||
552 | memcpy(&mh, mhp, sizeof *mhp); | ||
553 | |||
554 | if (mr_rereg_mask & IB_MR_REREG_PD) | ||
555 | php = to_iwch_pd(pd); | ||
556 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | ||
557 | mh.attr.perms = iwch_convert_access(acc); | ||
558 | if (mr_rereg_mask & IB_MR_REREG_TRANS) | ||
559 | ret = build_phys_page_list(buffer_list, num_phys_buf, | ||
560 | iova_start, | ||
561 | &total_size, &npages, | ||
562 | &shift, &page_list); | ||
563 | |||
564 | ret = iwch_reregister_mem(rhp, php, &mh, shift, page_list, npages); | ||
565 | kfree(page_list); | ||
566 | if (ret) { | ||
567 | return ret; | ||
568 | } | ||
569 | if (mr_rereg_mask & IB_MR_REREG_PD) | ||
570 | mhp->attr.pdid = php->pdid; | ||
571 | if (mr_rereg_mask & IB_MR_REREG_ACCESS) | ||
572 | mhp->attr.perms = acc; | ||
573 | if (mr_rereg_mask & IB_MR_REREG_TRANS) { | ||
574 | mhp->attr.zbva = 0; | ||
575 | mhp->attr.va_fbo = *iova_start; | ||
576 | mhp->attr.page_size = shift - 12; | ||
577 | mhp->attr.len = (u32) total_size; | ||
578 | mhp->attr.pbl_size = npages; | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | |||
585 | static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | ||
586 | int acc, struct ib_udata *udata) | ||
587 | { | ||
588 | __be64 *pages; | ||
589 | int shift, n, len; | ||
590 | int i, j, k; | ||
591 | int err = 0; | ||
592 | struct ib_umem_chunk *chunk; | ||
593 | struct iwch_dev *rhp; | ||
594 | struct iwch_pd *php; | ||
595 | struct iwch_mr *mhp; | ||
596 | struct iwch_reg_user_mr_resp uresp; | ||
597 | |||
598 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | ||
599 | shift = ffs(region->page_size) - 1; | ||
600 | |||
601 | php = to_iwch_pd(pd); | ||
602 | rhp = php->rhp; | ||
603 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | ||
604 | if (!mhp) | ||
605 | return ERR_PTR(-ENOMEM); | ||
606 | |||
607 | n = 0; | ||
608 | list_for_each_entry(chunk, ®ion->chunk_list, list) | ||
609 | n += chunk->nents; | ||
610 | |||
611 | pages = kmalloc(n * sizeof(u64), GFP_KERNEL); | ||
612 | if (!pages) { | ||
613 | err = -ENOMEM; | ||
614 | goto err; | ||
615 | } | ||
616 | |||
617 | acc = iwch_convert_access(acc); | ||
618 | |||
619 | i = n = 0; | ||
620 | |||
621 | list_for_each_entry(chunk, ®ion->chunk_list, list) | ||
622 | for (j = 0; j < chunk->nmap; ++j) { | ||
623 | len = sg_dma_len(&chunk->page_list[j]) >> shift; | ||
624 | for (k = 0; k < len; ++k) { | ||
625 | pages[i++] = cpu_to_be64(sg_dma_address( | ||
626 | &chunk->page_list[j]) + | ||
627 | region->page_size * k); | ||
628 | } | ||
629 | } | ||
630 | |||
631 | mhp->rhp = rhp; | ||
632 | mhp->attr.pdid = php->pdid; | ||
633 | mhp->attr.zbva = 0; | ||
634 | mhp->attr.perms = (acc & 0x1) << 3; | ||
635 | mhp->attr.perms |= (acc & 0x2) << 1; | ||
636 | mhp->attr.perms |= (acc & 0x4) >> 1; | ||
637 | mhp->attr.perms |= (acc & 0x8) >> 3; | ||
638 | mhp->attr.va_fbo = region->virt_base; | ||
639 | mhp->attr.page_size = shift - 12; | ||
640 | mhp->attr.len = (u32) region->length; | ||
641 | mhp->attr.pbl_size = i; | ||
642 | err = iwch_register_mem(rhp, php, mhp, shift, pages); | ||
643 | kfree(pages); | ||
644 | if (err) | ||
645 | goto err; | ||
646 | |||
647 | if (udata && t3b_device(rhp)) { | ||
648 | uresp.pbl_addr = (mhp->attr.pbl_addr - | ||
649 | rhp->rdev.rnic_info.pbl_base) >> 3; | ||
650 | PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__, | ||
651 | uresp.pbl_addr); | ||
652 | |||
653 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | ||
654 | iwch_dereg_mr(&mhp->ibmr); | ||
655 | err = -EFAULT; | ||
656 | goto err; | ||
657 | } | ||
658 | } | ||
659 | |||
660 | return &mhp->ibmr; | ||
661 | |||
662 | err: | ||
663 | kfree(mhp); | ||
664 | return ERR_PTR(err); | ||
665 | } | ||
666 | |||
667 | static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc) | ||
668 | { | ||
669 | struct ib_phys_buf bl; | ||
670 | u64 kva; | ||
671 | struct ib_mr *ibmr; | ||
672 | |||
673 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | ||
674 | |||
675 | /* | ||
676 | * T3 only supports 32 bits of size. | ||
677 | */ | ||
678 | bl.size = 0xffffffff; | ||
679 | bl.addr = 0; | ||
680 | kva = 0; | ||
681 | ibmr = iwch_register_phys_mem(pd, &bl, 1, acc, &kva); | ||
682 | return ibmr; | ||
683 | } | ||
684 | |||
685 | static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) | ||
686 | { | ||
687 | struct iwch_dev *rhp; | ||
688 | struct iwch_pd *php; | ||
689 | struct iwch_mw *mhp; | ||
690 | u32 mmid; | ||
691 | u32 stag = 0; | ||
692 | int ret; | ||
693 | |||
694 | php = to_iwch_pd(pd); | ||
695 | rhp = php->rhp; | ||
696 | mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); | ||
697 | if (!mhp) | ||
698 | return ERR_PTR(-ENOMEM); | ||
699 | ret = cxio_allocate_window(&rhp->rdev, &stag, php->pdid); | ||
700 | if (ret) { | ||
701 | kfree(mhp); | ||
702 | return ERR_PTR(ret); | ||
703 | } | ||
704 | mhp->rhp = rhp; | ||
705 | mhp->attr.pdid = php->pdid; | ||
706 | mhp->attr.type = TPT_MW; | ||
707 | mhp->attr.stag = stag; | ||
708 | mmid = (stag) >> 8; | ||
709 | insert_handle(rhp, &rhp->mmidr, mhp, mmid); | ||
710 | PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__, mmid, mhp, stag); | ||
711 | return &(mhp->ibmw); | ||
712 | } | ||
713 | |||
714 | static int iwch_dealloc_mw(struct ib_mw *mw) | ||
715 | { | ||
716 | struct iwch_dev *rhp; | ||
717 | struct iwch_mw *mhp; | ||
718 | u32 mmid; | ||
719 | |||
720 | mhp = to_iwch_mw(mw); | ||
721 | rhp = mhp->rhp; | ||
722 | mmid = (mw->rkey) >> 8; | ||
723 | cxio_deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
724 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
725 | kfree(mhp); | ||
726 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__, mw, mmid, mhp); | ||
727 | return 0; | ||
728 | } | ||
729 | |||
730 | static int iwch_destroy_qp(struct ib_qp *ib_qp) | ||
731 | { | ||
732 | struct iwch_dev *rhp; | ||
733 | struct iwch_qp *qhp; | ||
734 | struct iwch_qp_attributes attrs; | ||
735 | struct iwch_ucontext *ucontext; | ||
736 | |||
737 | qhp = to_iwch_qp(ib_qp); | ||
738 | rhp = qhp->rhp; | ||
739 | |||
740 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | ||
741 | attrs.next_state = IWCH_QP_STATE_ERROR; | ||
742 | iwch_modify_qp(rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, &attrs, 0); | ||
743 | } | ||
744 | wait_event(qhp->wait, !qhp->ep); | ||
745 | |||
746 | remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); | ||
747 | |||
748 | atomic_dec(&qhp->refcnt); | ||
749 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | ||
750 | |||
751 | ucontext = ib_qp->uobject ? to_iwch_ucontext(ib_qp->uobject->context) | ||
752 | : NULL; | ||
753 | cxio_destroy_qp(&rhp->rdev, &qhp->wq, | ||
754 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | ||
755 | |||
756 | PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__, | ||
757 | ib_qp, qhp->wq.qpid, qhp); | ||
758 | kfree(qhp); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | static struct ib_qp *iwch_create_qp(struct ib_pd *pd, | ||
763 | struct ib_qp_init_attr *attrs, | ||
764 | struct ib_udata *udata) | ||
765 | { | ||
766 | struct iwch_dev *rhp; | ||
767 | struct iwch_qp *qhp; | ||
768 | struct iwch_pd *php; | ||
769 | struct iwch_cq *schp; | ||
770 | struct iwch_cq *rchp; | ||
771 | struct iwch_create_qp_resp uresp; | ||
772 | int wqsize, sqsize, rqsize; | ||
773 | struct iwch_ucontext *ucontext; | ||
774 | |||
775 | PDBG("%s ib_pd %p\n", __FUNCTION__, pd); | ||
776 | if (attrs->qp_type != IB_QPT_RC) | ||
777 | return ERR_PTR(-EINVAL); | ||
778 | php = to_iwch_pd(pd); | ||
779 | rhp = php->rhp; | ||
780 | schp = get_chp(rhp, ((struct iwch_cq *) attrs->send_cq)->cq.cqid); | ||
781 | rchp = get_chp(rhp, ((struct iwch_cq *) attrs->recv_cq)->cq.cqid); | ||
782 | if (!schp || !rchp) | ||
783 | return ERR_PTR(-EINVAL); | ||
784 | |||
785 | /* The RQT size must be # of entries + 1 rounded up to a power of two */ | ||
786 | rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr); | ||
787 | if (rqsize == attrs->cap.max_recv_wr) | ||
788 | rqsize = roundup_pow_of_two(attrs->cap.max_recv_wr+1); | ||
789 | |||
790 | /* T3 doesn't support RQT depth < 16 */ | ||
791 | if (rqsize < 16) | ||
792 | rqsize = 16; | ||
793 | |||
794 | if (rqsize > T3_MAX_RQ_SIZE) | ||
795 | return ERR_PTR(-EINVAL); | ||
796 | |||
797 | /* | ||
798 | * NOTE: The SQ and total WQ sizes don't need to be | ||
799 | * a power of two. However, all the code assumes | ||
800 | * they are. EG: Q_FREECNT() and friends. | ||
801 | */ | ||
802 | sqsize = roundup_pow_of_two(attrs->cap.max_send_wr); | ||
803 | wqsize = roundup_pow_of_two(rqsize + sqsize); | ||
804 | PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__, | ||
805 | wqsize, sqsize, rqsize); | ||
806 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); | ||
807 | if (!qhp) | ||
808 | return ERR_PTR(-ENOMEM); | ||
809 | qhp->wq.size_log2 = ilog2(wqsize); | ||
810 | qhp->wq.rq_size_log2 = ilog2(rqsize); | ||
811 | qhp->wq.sq_size_log2 = ilog2(sqsize); | ||
812 | ucontext = pd->uobject ? to_iwch_ucontext(pd->uobject->context) : NULL; | ||
813 | if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, | ||
814 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx)) { | ||
815 | kfree(qhp); | ||
816 | return ERR_PTR(-ENOMEM); | ||
817 | } | ||
818 | attrs->cap.max_recv_wr = rqsize - 1; | ||
819 | attrs->cap.max_send_wr = sqsize; | ||
820 | qhp->rhp = rhp; | ||
821 | qhp->attr.pd = php->pdid; | ||
822 | qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; | ||
823 | qhp->attr.rcq = ((struct iwch_cq *) attrs->recv_cq)->cq.cqid; | ||
824 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | ||
825 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | ||
826 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | ||
827 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | ||
828 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | ||
829 | qhp->attr.state = IWCH_QP_STATE_IDLE; | ||
830 | qhp->attr.next_state = IWCH_QP_STATE_IDLE; | ||
831 | |||
832 | /* | ||
833 | * XXX - These don't get passed in from the openib user | ||
834 | * at create time. The CM sets them via a QP modify. | ||
835 | * Need to fix... I think the CM should | ||
836 | */ | ||
837 | qhp->attr.enable_rdma_read = 1; | ||
838 | qhp->attr.enable_rdma_write = 1; | ||
839 | qhp->attr.enable_bind = 1; | ||
840 | qhp->attr.max_ord = 1; | ||
841 | qhp->attr.max_ird = 1; | ||
842 | |||
843 | spin_lock_init(&qhp->lock); | ||
844 | init_waitqueue_head(&qhp->wait); | ||
845 | atomic_set(&qhp->refcnt, 1); | ||
846 | insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid); | ||
847 | |||
848 | if (udata) { | ||
849 | |||
850 | struct iwch_mm_entry *mm1, *mm2; | ||
851 | |||
852 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | ||
853 | if (!mm1) { | ||
854 | iwch_destroy_qp(&qhp->ibqp); | ||
855 | return ERR_PTR(-ENOMEM); | ||
856 | } | ||
857 | |||
858 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | ||
859 | if (!mm2) { | ||
860 | kfree(mm1); | ||
861 | iwch_destroy_qp(&qhp->ibqp); | ||
862 | return ERR_PTR(-ENOMEM); | ||
863 | } | ||
864 | |||
865 | uresp.qpid = qhp->wq.qpid; | ||
866 | uresp.size_log2 = qhp->wq.size_log2; | ||
867 | uresp.sq_size_log2 = qhp->wq.sq_size_log2; | ||
868 | uresp.rq_size_log2 = qhp->wq.rq_size_log2; | ||
869 | spin_lock(&ucontext->mmap_lock); | ||
870 | uresp.key = ucontext->key; | ||
871 | ucontext->key += PAGE_SIZE; | ||
872 | uresp.db_key = ucontext->key; | ||
873 | ucontext->key += PAGE_SIZE; | ||
874 | spin_unlock(&ucontext->mmap_lock); | ||
875 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | ||
876 | kfree(mm1); | ||
877 | kfree(mm2); | ||
878 | iwch_destroy_qp(&qhp->ibqp); | ||
879 | return ERR_PTR(-EFAULT); | ||
880 | } | ||
881 | mm1->key = uresp.key; | ||
882 | mm1->addr = virt_to_phys(qhp->wq.queue); | ||
883 | mm1->len = PAGE_ALIGN(wqsize * sizeof (union t3_wr)); | ||
884 | insert_mmap(ucontext, mm1); | ||
885 | mm2->key = uresp.db_key; | ||
886 | mm2->addr = qhp->wq.udb & PAGE_MASK; | ||
887 | mm2->len = PAGE_SIZE; | ||
888 | insert_mmap(ucontext, mm2); | ||
889 | } | ||
890 | qhp->ibqp.qp_num = qhp->wq.qpid; | ||
891 | init_timer(&(qhp->timer)); | ||
892 | PDBG("%s sq_num_entries %d, rq_num_entries %d " | ||
893 | "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n", | ||
894 | __FUNCTION__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | ||
895 | qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, | ||
896 | 1 << qhp->wq.size_log2); | ||
897 | return &qhp->ibqp; | ||
898 | } | ||
899 | |||
900 | static int iwch_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | ||
901 | int attr_mask, struct ib_udata *udata) | ||
902 | { | ||
903 | struct iwch_dev *rhp; | ||
904 | struct iwch_qp *qhp; | ||
905 | enum iwch_qp_attr_mask mask = 0; | ||
906 | struct iwch_qp_attributes attrs; | ||
907 | |||
908 | PDBG("%s ib_qp %p\n", __FUNCTION__, ibqp); | ||
909 | |||
910 | /* iwarp does not support the RTR state */ | ||
911 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | ||
912 | attr_mask &= ~IB_QP_STATE; | ||
913 | |||
914 | /* Make sure we still have something left to do */ | ||
915 | if (!attr_mask) | ||
916 | return 0; | ||
917 | |||
918 | memset(&attrs, 0, sizeof attrs); | ||
919 | qhp = to_iwch_qp(ibqp); | ||
920 | rhp = qhp->rhp; | ||
921 | |||
922 | attrs.next_state = iwch_convert_state(attr->qp_state); | ||
923 | attrs.enable_rdma_read = (attr->qp_access_flags & | ||
924 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | ||
925 | attrs.enable_rdma_write = (attr->qp_access_flags & | ||
926 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | ||
927 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | ||
928 | |||
929 | |||
930 | mask |= (attr_mask & IB_QP_STATE) ? IWCH_QP_ATTR_NEXT_STATE : 0; | ||
931 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | ||
932 | (IWCH_QP_ATTR_ENABLE_RDMA_READ | | ||
933 | IWCH_QP_ATTR_ENABLE_RDMA_WRITE | | ||
934 | IWCH_QP_ATTR_ENABLE_RDMA_BIND) : 0; | ||
935 | |||
936 | return iwch_modify_qp(rhp, qhp, mask, &attrs, 0); | ||
937 | } | ||
938 | |||
939 | void iwch_qp_add_ref(struct ib_qp *qp) | ||
940 | { | ||
941 | PDBG("%s ib_qp %p\n", __FUNCTION__, qp); | ||
942 | atomic_inc(&(to_iwch_qp(qp)->refcnt)); | ||
943 | } | ||
944 | |||
945 | void iwch_qp_rem_ref(struct ib_qp *qp) | ||
946 | { | ||
947 | PDBG("%s ib_qp %p\n", __FUNCTION__, qp); | ||
948 | if (atomic_dec_and_test(&(to_iwch_qp(qp)->refcnt))) | ||
949 | wake_up(&(to_iwch_qp(qp)->wait)); | ||
950 | } | ||
951 | |||
952 | struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) | ||
953 | { | ||
954 | PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); | ||
955 | return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); | ||
956 | } | ||
957 | |||
958 | |||
959 | static int iwch_query_pkey(struct ib_device *ibdev, | ||
960 | u8 port, u16 index, u16 * pkey) | ||
961 | { | ||
962 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | ||
963 | *pkey = 0; | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | static int iwch_query_gid(struct ib_device *ibdev, u8 port, | ||
968 | int index, union ib_gid *gid) | ||
969 | { | ||
970 | struct iwch_dev *dev; | ||
971 | |||
972 | PDBG("%s ibdev %p, port %d, index %d, gid %p\n", | ||
973 | __FUNCTION__, ibdev, port, index, gid); | ||
974 | dev = to_iwch_dev(ibdev); | ||
975 | BUG_ON(port == 0 || port > 2); | ||
976 | memset(&(gid->raw[0]), 0, sizeof(gid->raw)); | ||
977 | memcpy(&(gid->raw[0]), dev->rdev.port_info.lldevs[port-1]->dev_addr, 6); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | static int iwch_query_device(struct ib_device *ibdev, | ||
982 | struct ib_device_attr *props) | ||
983 | { | ||
984 | |||
985 | struct iwch_dev *dev; | ||
986 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | ||
987 | |||
988 | dev = to_iwch_dev(ibdev); | ||
989 | memset(props, 0, sizeof *props); | ||
990 | memcpy(&props->sys_image_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | ||
991 | props->device_cap_flags = dev->device_cap_flags; | ||
992 | props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor; | ||
993 | props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device; | ||
994 | props->max_mr_size = ~0ull; | ||
995 | props->max_qp = dev->attr.max_qps; | ||
996 | props->max_qp_wr = dev->attr.max_wrs; | ||
997 | props->max_sge = dev->attr.max_sge_per_wr; | ||
998 | props->max_sge_rd = 1; | ||
999 | props->max_qp_rd_atom = dev->attr.max_rdma_reads_per_qp; | ||
1000 | props->max_cq = dev->attr.max_cqs; | ||
1001 | props->max_cqe = dev->attr.max_cqes_per_cq; | ||
1002 | props->max_mr = dev->attr.max_mem_regs; | ||
1003 | props->max_pd = dev->attr.max_pds; | ||
1004 | props->local_ca_ack_delay = 0; | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | static int iwch_query_port(struct ib_device *ibdev, | ||
1010 | u8 port, struct ib_port_attr *props) | ||
1011 | { | ||
1012 | PDBG("%s ibdev %p\n", __FUNCTION__, ibdev); | ||
1013 | props->max_mtu = IB_MTU_4096; | ||
1014 | props->lid = 0; | ||
1015 | props->lmc = 0; | ||
1016 | props->sm_lid = 0; | ||
1017 | props->sm_sl = 0; | ||
1018 | props->state = IB_PORT_ACTIVE; | ||
1019 | props->phys_state = 0; | ||
1020 | props->port_cap_flags = | ||
1021 | IB_PORT_CM_SUP | | ||
1022 | IB_PORT_SNMP_TUNNEL_SUP | | ||
1023 | IB_PORT_REINIT_SUP | | ||
1024 | IB_PORT_DEVICE_MGMT_SUP | | ||
1025 | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; | ||
1026 | props->gid_tbl_len = 1; | ||
1027 | props->pkey_tbl_len = 1; | ||
1028 | props->qkey_viol_cntr = 0; | ||
1029 | props->active_width = 2; | ||
1030 | props->active_speed = 2; | ||
1031 | props->max_msg_sz = -1; | ||
1032 | |||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | static ssize_t show_rev(struct class_device *cdev, char *buf) | ||
1037 | { | ||
1038 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | ||
1039 | ibdev.class_dev); | ||
1040 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | ||
1041 | return sprintf(buf, "%d\n", dev->rdev.t3cdev_p->type); | ||
1042 | } | ||
1043 | |||
1044 | static ssize_t show_fw_ver(struct class_device *cdev, char *buf) | ||
1045 | { | ||
1046 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | ||
1047 | ibdev.class_dev); | ||
1048 | struct ethtool_drvinfo info; | ||
1049 | struct net_device *lldev = dev->rdev.t3cdev_p->lldev; | ||
1050 | |||
1051 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | ||
1052 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | ||
1053 | return sprintf(buf, "%s\n", info.fw_version); | ||
1054 | } | ||
1055 | |||
1056 | static ssize_t show_hca(struct class_device *cdev, char *buf) | ||
1057 | { | ||
1058 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | ||
1059 | ibdev.class_dev); | ||
1060 | struct ethtool_drvinfo info; | ||
1061 | struct net_device *lldev = dev->rdev.t3cdev_p->lldev; | ||
1062 | |||
1063 | PDBG("%s class dev 0x%p\n", __FUNCTION__, cdev); | ||
1064 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | ||
1065 | return sprintf(buf, "%s\n", info.driver); | ||
1066 | } | ||
1067 | |||
1068 | static ssize_t show_board(struct class_device *cdev, char *buf) | ||
1069 | { | ||
1070 | struct iwch_dev *dev = container_of(cdev, struct iwch_dev, | ||
1071 | ibdev.class_dev); | ||
1072 | PDBG("%s class dev 0x%p\n", __FUNCTION__, dev); | ||
1073 | return sprintf(buf, "%x.%x\n", dev->rdev.rnic_info.pdev->vendor, | ||
1074 | dev->rdev.rnic_info.pdev->device); | ||
1075 | } | ||
1076 | |||
1077 | static CLASS_DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
1078 | static CLASS_DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
1079 | static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | ||
1080 | static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); | ||
1081 | |||
1082 | static struct class_device_attribute *iwch_class_attributes[] = { | ||
1083 | &class_device_attr_hw_rev, | ||
1084 | &class_device_attr_fw_ver, | ||
1085 | &class_device_attr_hca_type, | ||
1086 | &class_device_attr_board_id | ||
1087 | }; | ||
1088 | |||
1089 | int iwch_register_device(struct iwch_dev *dev) | ||
1090 | { | ||
1091 | int ret; | ||
1092 | int i; | ||
1093 | |||
1094 | PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); | ||
1095 | strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX); | ||
1096 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | ||
1097 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | ||
1098 | dev->ibdev.owner = THIS_MODULE; | ||
1099 | dev->device_cap_flags = | ||
1100 | (IB_DEVICE_ZERO_STAG | | ||
1101 | IB_DEVICE_SEND_W_INV | IB_DEVICE_MEM_WINDOW); | ||
1102 | |||
1103 | dev->ibdev.uverbs_cmd_mask = | ||
1104 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
1105 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
1106 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
1107 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
1108 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
1109 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
1110 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
1111 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
1112 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
1113 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
1114 | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | | ||
1115 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
1116 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
1117 | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | | ||
1118 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
1119 | (1ull << IB_USER_VERBS_CMD_POST_SEND) | | ||
1120 | (1ull << IB_USER_VERBS_CMD_POST_RECV); | ||
1121 | dev->ibdev.node_type = RDMA_NODE_RNIC; | ||
1122 | memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); | ||
1123 | dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; | ||
1124 | dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); | ||
1125 | dev->ibdev.class_dev.dev = &(dev->rdev.rnic_info.pdev->dev); | ||
1126 | dev->ibdev.query_device = iwch_query_device; | ||
1127 | dev->ibdev.query_port = iwch_query_port; | ||
1128 | dev->ibdev.modify_port = iwch_modify_port; | ||
1129 | dev->ibdev.query_pkey = iwch_query_pkey; | ||
1130 | dev->ibdev.query_gid = iwch_query_gid; | ||
1131 | dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; | ||
1132 | dev->ibdev.dealloc_ucontext = iwch_dealloc_ucontext; | ||
1133 | dev->ibdev.mmap = iwch_mmap; | ||
1134 | dev->ibdev.alloc_pd = iwch_allocate_pd; | ||
1135 | dev->ibdev.dealloc_pd = iwch_deallocate_pd; | ||
1136 | dev->ibdev.create_ah = iwch_ah_create; | ||
1137 | dev->ibdev.destroy_ah = iwch_ah_destroy; | ||
1138 | dev->ibdev.create_qp = iwch_create_qp; | ||
1139 | dev->ibdev.modify_qp = iwch_ib_modify_qp; | ||
1140 | dev->ibdev.destroy_qp = iwch_destroy_qp; | ||
1141 | dev->ibdev.create_cq = iwch_create_cq; | ||
1142 | dev->ibdev.destroy_cq = iwch_destroy_cq; | ||
1143 | dev->ibdev.resize_cq = iwch_resize_cq; | ||
1144 | dev->ibdev.poll_cq = iwch_poll_cq; | ||
1145 | dev->ibdev.get_dma_mr = iwch_get_dma_mr; | ||
1146 | dev->ibdev.reg_phys_mr = iwch_register_phys_mem; | ||
1147 | dev->ibdev.rereg_phys_mr = iwch_reregister_phys_mem; | ||
1148 | dev->ibdev.reg_user_mr = iwch_reg_user_mr; | ||
1149 | dev->ibdev.dereg_mr = iwch_dereg_mr; | ||
1150 | dev->ibdev.alloc_mw = iwch_alloc_mw; | ||
1151 | dev->ibdev.bind_mw = iwch_bind_mw; | ||
1152 | dev->ibdev.dealloc_mw = iwch_dealloc_mw; | ||
1153 | |||
1154 | dev->ibdev.attach_mcast = iwch_multicast_attach; | ||
1155 | dev->ibdev.detach_mcast = iwch_multicast_detach; | ||
1156 | dev->ibdev.process_mad = iwch_process_mad; | ||
1157 | |||
1158 | dev->ibdev.req_notify_cq = iwch_arm_cq; | ||
1159 | dev->ibdev.post_send = iwch_post_send; | ||
1160 | dev->ibdev.post_recv = iwch_post_receive; | ||
1161 | |||
1162 | |||
1163 | dev->ibdev.iwcm = | ||
1164 | (struct iw_cm_verbs *) kmalloc(sizeof(struct iw_cm_verbs), | ||
1165 | GFP_KERNEL); | ||
1166 | dev->ibdev.iwcm->connect = iwch_connect; | ||
1167 | dev->ibdev.iwcm->accept = iwch_accept_cr; | ||
1168 | dev->ibdev.iwcm->reject = iwch_reject_cr; | ||
1169 | dev->ibdev.iwcm->create_listen = iwch_create_listen; | ||
1170 | dev->ibdev.iwcm->destroy_listen = iwch_destroy_listen; | ||
1171 | dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; | ||
1172 | dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; | ||
1173 | dev->ibdev.iwcm->get_qp = iwch_get_qp; | ||
1174 | |||
1175 | ret = ib_register_device(&dev->ibdev); | ||
1176 | if (ret) | ||
1177 | goto bail1; | ||
1178 | |||
1179 | for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) { | ||
1180 | ret = class_device_create_file(&dev->ibdev.class_dev, | ||
1181 | iwch_class_attributes[i]); | ||
1182 | if (ret) { | ||
1183 | goto bail2; | ||
1184 | } | ||
1185 | } | ||
1186 | return 0; | ||
1187 | bail2: | ||
1188 | ib_unregister_device(&dev->ibdev); | ||
1189 | bail1: | ||
1190 | return ret; | ||
1191 | } | ||
1192 | |||
1193 | void iwch_unregister_device(struct iwch_dev *dev) | ||
1194 | { | ||
1195 | int i; | ||
1196 | |||
1197 | PDBG("%s iwch_dev %p\n", __FUNCTION__, dev); | ||
1198 | for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) | ||
1199 | class_device_remove_file(&dev->ibdev.class_dev, | ||
1200 | iwch_class_attributes[i]); | ||
1201 | ib_unregister_device(&dev->ibdev); | ||
1202 | return; | ||
1203 | } | ||