aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib
diff options
context:
space:
mode:
authorHarish Chegondi <harish.chegondi@intel.com>2016-01-22 16:07:36 -0500
committerDoug Ledford <dledford@redhat.com>2016-03-10 20:37:29 -0500
commit4bb88e5f84326ff6343bc64a33040850f45b44d8 (patch)
tree330775eea434d3f690cbd512807b84bc2e6fc590 /drivers/infiniband/hw/qib
parent5196aa96e18a7b3ccbf5ec4705fe7981aee03771 (diff)
IB/qib: Remove completion queue data structures and functions from qib
Use the completion queue functionality provided by rdmavt. Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Harish Chegondi <harish.chegondi@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qib')
-rw-r--r--drivers/infiniband/hw/qib/Makefile2
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_cq.c545
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h62
11 files changed, 24 insertions, 631 deletions
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile
index 45db4fc21c93..d78f6889f82f 100644
--- a/drivers/infiniband/hw/qib/Makefile
+++ b/drivers/infiniband/hw/qib/Makefile
@@ -1,6 +1,6 @@
1obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o 1obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o
2 2
3ib_qib-y := qib_cq.o qib_diag.o qib_driver.o qib_eeprom.o \ 3ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \
4 qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \ 4 qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \
5 qib_mad.o qib_pcie.o qib_pio_copy.o \ 5 qib_mad.o qib_pcie.o qib_pio_copy.o \
6 qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \ 6 qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 29cbe67f39d9..ccadece9bb13 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1097,8 +1097,6 @@ struct qib_devdata {
1097 u16 psxmitwait_check_rate; 1097 u16 psxmitwait_check_rate;
1098 /* high volume overflow errors defered to tasklet */ 1098 /* high volume overflow errors defered to tasklet */
1099 struct tasklet_struct error_tasklet; 1099 struct tasklet_struct error_tasklet;
1100 /* per device cq worker */
1101 struct kthread_worker *worker;
1102 1100
1103 int assigned_node_id; /* NUMA node closest to HCA */ 1101 int assigned_node_id; /* NUMA node closest to HCA */
1104}; 1102};
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
deleted file mode 100644
index 094f69495dec..000000000000
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ /dev/null
@@ -1,545 +0,0 @@
1/*
2 * Copyright (c) 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/err.h>
36#include <linux/slab.h>
37#include <linux/vmalloc.h>
38#include <linux/kthread.h>
39
40#include "qib_verbs.h"
41#include "qib.h"
42
43/**
44 * qib_cq_enter - add a new entry to the completion queue
45 * @cq: completion queue
46 * @entry: work completion entry to add
47 * @sig: true if @entry is a solicitated entry
48 *
49 * This may be called with qp->s_lock held.
50 */
51void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
52{
53 struct qib_cq_wc *wc;
54 unsigned long flags;
55 u32 head;
56 u32 next;
57
58 spin_lock_irqsave(&cq->lock, flags);
59
60 /*
61 * Note that the head pointer might be writable by user processes.
62 * Take care to verify it is a sane value.
63 */
64 wc = cq->queue;
65 head = wc->head;
66 if (head >= (unsigned) cq->ibcq.cqe) {
67 head = cq->ibcq.cqe;
68 next = 0;
69 } else
70 next = head + 1;
71 if (unlikely(next == wc->tail)) {
72 spin_unlock_irqrestore(&cq->lock, flags);
73 if (cq->ibcq.event_handler) {
74 struct ib_event ev;
75
76 ev.device = cq->ibcq.device;
77 ev.element.cq = &cq->ibcq;
78 ev.event = IB_EVENT_CQ_ERR;
79 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
80 }
81 return;
82 }
83 if (cq->ip) {
84 wc->uqueue[head].wr_id = entry->wr_id;
85 wc->uqueue[head].status = entry->status;
86 wc->uqueue[head].opcode = entry->opcode;
87 wc->uqueue[head].vendor_err = entry->vendor_err;
88 wc->uqueue[head].byte_len = entry->byte_len;
89 wc->uqueue[head].ex.imm_data =
90 (__u32 __force)entry->ex.imm_data;
91 wc->uqueue[head].qp_num = entry->qp->qp_num;
92 wc->uqueue[head].src_qp = entry->src_qp;
93 wc->uqueue[head].wc_flags = entry->wc_flags;
94 wc->uqueue[head].pkey_index = entry->pkey_index;
95 wc->uqueue[head].slid = entry->slid;
96 wc->uqueue[head].sl = entry->sl;
97 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
98 wc->uqueue[head].port_num = entry->port_num;
99 /* Make sure entry is written before the head index. */
100 smp_wmb();
101 } else
102 wc->kqueue[head] = *entry;
103 wc->head = next;
104
105 if (cq->notify == IB_CQ_NEXT_COMP ||
106 (cq->notify == IB_CQ_SOLICITED &&
107 (solicited || entry->status != IB_WC_SUCCESS))) {
108 struct kthread_worker *worker;
109 /*
110 * This will cause send_complete() to be called in
111 * another thread.
112 */
113 smp_rmb();
114 worker = cq->dd->worker;
115 if (likely(worker)) {
116 cq->notify = IB_CQ_NONE;
117 cq->triggered++;
118 queue_kthread_work(worker, &cq->comptask);
119 }
120 }
121
122 spin_unlock_irqrestore(&cq->lock, flags);
123}
124
125/**
126 * qib_poll_cq - poll for work completion entries
127 * @ibcq: the completion queue to poll
128 * @num_entries: the maximum number of entries to return
129 * @entry: pointer to array where work completions are placed
130 *
131 * Returns the number of completion entries polled.
132 *
133 * This may be called from interrupt context. Also called by ib_poll_cq()
134 * in the generic verbs code.
135 */
136int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
137{
138 struct qib_cq *cq = to_icq(ibcq);
139 struct qib_cq_wc *wc;
140 unsigned long flags;
141 int npolled;
142 u32 tail;
143
144 /* The kernel can only poll a kernel completion queue */
145 if (cq->ip) {
146 npolled = -EINVAL;
147 goto bail;
148 }
149
150 spin_lock_irqsave(&cq->lock, flags);
151
152 wc = cq->queue;
153 tail = wc->tail;
154 if (tail > (u32) cq->ibcq.cqe)
155 tail = (u32) cq->ibcq.cqe;
156 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
157 if (tail == wc->head)
158 break;
159 /* The kernel doesn't need a RMB since it has the lock. */
160 *entry = wc->kqueue[tail];
161 if (tail >= cq->ibcq.cqe)
162 tail = 0;
163 else
164 tail++;
165 }
166 wc->tail = tail;
167
168 spin_unlock_irqrestore(&cq->lock, flags);
169
170bail:
171 return npolled;
172}
173
174static void send_complete(struct kthread_work *work)
175{
176 struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
177
178 /*
179 * The completion handler will most likely rearm the notification
180 * and poll for all pending entries. If a new completion entry
181 * is added while we are in this routine, queue_work()
182 * won't call us again until we return so we check triggered to
183 * see if we need to call the handler again.
184 */
185 for (;;) {
186 u8 triggered = cq->triggered;
187
188 /*
189 * IPoIB connected mode assumes the callback is from a
190 * soft IRQ. We simulate this by blocking "bottom halves".
191 * See the implementation for ipoib_cm_handle_tx_wc(),
192 * netif_tx_lock_bh() and netif_tx_lock().
193 */
194 local_bh_disable();
195 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
196 local_bh_enable();
197
198 if (cq->triggered == triggered)
199 return;
200 }
201}
202
203/**
204 * qib_create_cq - create a completion queue
205 * @ibdev: the device this completion queue is attached to
206 * @attr: creation attributes
207 * @context: unused by the QLogic_IB driver
208 * @udata: user data for libibverbs.so
209 *
210 * Returns a pointer to the completion queue or negative errno values
211 * for failure.
212 *
213 * Called by ib_create_cq() in the generic verbs code.
214 */
215struct ib_cq *qib_create_cq(struct ib_device *ibdev,
216 const struct ib_cq_init_attr *attr,
217 struct ib_ucontext *context,
218 struct ib_udata *udata)
219{
220 int entries = attr->cqe;
221 struct qib_ibdev *dev = to_idev(ibdev);
222 struct qib_cq *cq;
223 struct qib_cq_wc *wc;
224 struct ib_cq *ret;
225 u32 sz;
226
227 if (attr->flags)
228 return ERR_PTR(-EINVAL);
229
230 if (entries < 1 || entries > ib_qib_max_cqes) {
231 ret = ERR_PTR(-EINVAL);
232 goto done;
233 }
234
235 /* Allocate the completion queue structure. */
236 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
237 if (!cq) {
238 ret = ERR_PTR(-ENOMEM);
239 goto done;
240 }
241
242 /*
243 * Allocate the completion queue entries and head/tail pointers.
244 * This is allocated separately so that it can be resized and
245 * also mapped into user space.
246 * We need to use vmalloc() in order to support mmap and large
247 * numbers of entries.
248 */
249 sz = sizeof(*wc);
250 if (udata && udata->outlen >= sizeof(__u64))
251 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
252 else
253 sz += sizeof(struct ib_wc) * (entries + 1);
254 wc = vmalloc_user(sz);
255 if (!wc) {
256 ret = ERR_PTR(-ENOMEM);
257 goto bail_cq;
258 }
259
260 /*
261 * Return the address of the WC as the offset to mmap.
262 * See qib_mmap() for details.
263 */
264 if (udata && udata->outlen >= sizeof(__u64)) {
265 int err;
266
267 cq->ip = rvt_create_mmap_info(&dev->rdi, sz, context, wc);
268 if (!cq->ip) {
269 ret = ERR_PTR(-ENOMEM);
270 goto bail_wc;
271 }
272
273 err = ib_copy_to_udata(udata, &cq->ip->offset,
274 sizeof(cq->ip->offset));
275 if (err) {
276 ret = ERR_PTR(err);
277 goto bail_ip;
278 }
279 } else
280 cq->ip = NULL;
281
282 spin_lock(&dev->n_cqs_lock);
283 if (dev->n_cqs_allocated == ib_qib_max_cqs) {
284 spin_unlock(&dev->n_cqs_lock);
285 ret = ERR_PTR(-ENOMEM);
286 goto bail_ip;
287 }
288
289 dev->n_cqs_allocated++;
290 spin_unlock(&dev->n_cqs_lock);
291
292 if (cq->ip) {
293 spin_lock_irq(&dev->rdi.pending_lock);
294 list_add(&cq->ip->pending_mmaps, &dev->rdi.pending_mmaps);
295 spin_unlock_irq(&dev->rdi.pending_lock);
296 }
297
298 /*
299 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
300 * The number of entries should be >= the number requested or return
301 * an error.
302 */
303 cq->dd = dd_from_dev(dev);
304 cq->ibcq.cqe = entries;
305 cq->notify = IB_CQ_NONE;
306 cq->triggered = 0;
307 spin_lock_init(&cq->lock);
308 init_kthread_work(&cq->comptask, send_complete);
309 wc->head = 0;
310 wc->tail = 0;
311 cq->queue = wc;
312
313 ret = &cq->ibcq;
314
315 goto done;
316
317bail_ip:
318 kfree(cq->ip);
319bail_wc:
320 vfree(wc);
321bail_cq:
322 kfree(cq);
323done:
324 return ret;
325}
326
327/**
328 * qib_destroy_cq - destroy a completion queue
329 * @ibcq: the completion queue to destroy.
330 *
331 * Returns 0 for success.
332 *
333 * Called by ib_destroy_cq() in the generic verbs code.
334 */
335int qib_destroy_cq(struct ib_cq *ibcq)
336{
337 struct qib_ibdev *dev = to_idev(ibcq->device);
338 struct qib_cq *cq = to_icq(ibcq);
339
340 flush_kthread_work(&cq->comptask);
341 spin_lock(&dev->n_cqs_lock);
342 dev->n_cqs_allocated--;
343 spin_unlock(&dev->n_cqs_lock);
344 if (cq->ip)
345 kref_put(&cq->ip->ref, rvt_release_mmap_info);
346 else
347 vfree(cq->queue);
348 kfree(cq);
349
350 return 0;
351}
352
353/**
354 * qib_req_notify_cq - change the notification type for a completion queue
355 * @ibcq: the completion queue
356 * @notify_flags: the type of notification to request
357 *
358 * Returns 0 for success.
359 *
360 * This may be called from interrupt context. Also called by
361 * ib_req_notify_cq() in the generic verbs code.
362 */
363int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
364{
365 struct qib_cq *cq = to_icq(ibcq);
366 unsigned long flags;
367 int ret = 0;
368
369 spin_lock_irqsave(&cq->lock, flags);
370 /*
371 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
372 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
373 */
374 if (cq->notify != IB_CQ_NEXT_COMP)
375 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
376
377 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
378 cq->queue->head != cq->queue->tail)
379 ret = 1;
380
381 spin_unlock_irqrestore(&cq->lock, flags);
382
383 return ret;
384}
385
386/**
387 * qib_resize_cq - change the size of the CQ
388 * @ibcq: the completion queue
389 *
390 * Returns 0 for success.
391 */
392int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
393{
394 struct qib_cq *cq = to_icq(ibcq);
395 struct qib_cq_wc *old_wc;
396 struct qib_cq_wc *wc;
397 u32 head, tail, n;
398 int ret;
399 u32 sz;
400
401 if (cqe < 1 || cqe > ib_qib_max_cqes) {
402 ret = -EINVAL;
403 goto bail;
404 }
405
406 /*
407 * Need to use vmalloc() if we want to support large #s of entries.
408 */
409 sz = sizeof(*wc);
410 if (udata && udata->outlen >= sizeof(__u64))
411 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
412 else
413 sz += sizeof(struct ib_wc) * (cqe + 1);
414 wc = vmalloc_user(sz);
415 if (!wc) {
416 ret = -ENOMEM;
417 goto bail;
418 }
419
420 /* Check that we can write the offset to mmap. */
421 if (udata && udata->outlen >= sizeof(__u64)) {
422 __u64 offset = 0;
423
424 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
425 if (ret)
426 goto bail_free;
427 }
428
429 spin_lock_irq(&cq->lock);
430 /*
431 * Make sure head and tail are sane since they
432 * might be user writable.
433 */
434 old_wc = cq->queue;
435 head = old_wc->head;
436 if (head > (u32) cq->ibcq.cqe)
437 head = (u32) cq->ibcq.cqe;
438 tail = old_wc->tail;
439 if (tail > (u32) cq->ibcq.cqe)
440 tail = (u32) cq->ibcq.cqe;
441 if (head < tail)
442 n = cq->ibcq.cqe + 1 + head - tail;
443 else
444 n = head - tail;
445 if (unlikely((u32)cqe < n)) {
446 ret = -EINVAL;
447 goto bail_unlock;
448 }
449 for (n = 0; tail != head; n++) {
450 if (cq->ip)
451 wc->uqueue[n] = old_wc->uqueue[tail];
452 else
453 wc->kqueue[n] = old_wc->kqueue[tail];
454 if (tail == (u32) cq->ibcq.cqe)
455 tail = 0;
456 else
457 tail++;
458 }
459 cq->ibcq.cqe = cqe;
460 wc->head = n;
461 wc->tail = 0;
462 cq->queue = wc;
463 spin_unlock_irq(&cq->lock);
464
465 vfree(old_wc);
466
467 if (cq->ip) {
468 struct qib_ibdev *dev = to_idev(ibcq->device);
469 struct rvt_mmap_info *ip = cq->ip;
470
471 rvt_update_mmap_info(&dev->rdi, ip, sz, wc);
472
473 /*
474 * Return the offset to mmap.
475 * See qib_mmap() for details.
476 */
477 if (udata && udata->outlen >= sizeof(__u64)) {
478 ret = ib_copy_to_udata(udata, &ip->offset,
479 sizeof(ip->offset));
480 if (ret)
481 goto bail;
482 }
483
484 spin_lock_irq(&dev->rdi.pending_lock);
485 if (list_empty(&ip->pending_mmaps))
486 list_add(&ip->pending_mmaps, &dev->rdi.pending_mmaps);
487 spin_unlock_irq(&dev->rdi.pending_lock);
488 }
489
490 ret = 0;
491 goto bail;
492
493bail_unlock:
494 spin_unlock_irq(&cq->lock);
495bail_free:
496 vfree(wc);
497bail:
498 return ret;
499}
500
501int qib_cq_init(struct qib_devdata *dd)
502{
503 int ret = 0;
504 int cpu;
505 struct task_struct *task;
506
507 if (dd->worker)
508 return 0;
509 dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL);
510 if (!dd->worker)
511 return -ENOMEM;
512 init_kthread_worker(dd->worker);
513 task = kthread_create_on_node(
514 kthread_worker_fn,
515 dd->worker,
516 dd->assigned_node_id,
517 "qib_cq%d", dd->unit);
518 if (IS_ERR(task))
519 goto task_fail;
520 cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id));
521 kthread_bind(task, cpu);
522 wake_up_process(task);
523out:
524 return ret;
525task_fail:
526 ret = PTR_ERR(task);
527 kfree(dd->worker);
528 dd->worker = NULL;
529 goto out;
530}
531
532void qib_cq_exit(struct qib_devdata *dd)
533{
534 struct kthread_worker *worker;
535
536 worker = dd->worker;
537 if (!worker)
538 return;
539 /* blocks future queuing from send_complete() */
540 dd->worker = NULL;
541 smp_wmb();
542 flush_kthread_worker(worker);
543 kthread_stop(worker->task);
544 kfree(worker);
545}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index a3c74bb4df62..3f062f0dd9d8 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -457,8 +457,6 @@ static int loadtime_init(struct qib_devdata *dd)
457 init_timer(&dd->intrchk_timer); 457 init_timer(&dd->intrchk_timer);
458 dd->intrchk_timer.function = verify_interrupt; 458 dd->intrchk_timer.function = verify_interrupt;
459 dd->intrchk_timer.data = (unsigned long) dd; 459 dd->intrchk_timer.data = (unsigned long) dd;
460
461 ret = qib_cq_init(dd);
462done: 460done:
463 return ret; 461 return ret;
464} 462}
@@ -1435,7 +1433,6 @@ static void cleanup_device_data(struct qib_devdata *dd)
1435 } 1433 }
1436 kfree(tmp); 1434 kfree(tmp);
1437 kfree(dd->boardname); 1435 kfree(dd->boardname);
1438 qib_cq_exit(dd);
1439} 1436}
1440 1437
1441/* 1438/*
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 83dec693845e..6e5a05e35fef 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -473,7 +473,7 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
473 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) { 473 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
474 wc.wr_id = qp->r_wr_id; 474 wc.wr_id = qp->r_wr_id;
475 wc.status = err; 475 wc.status = err;
476 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 476 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
477 } 477 }
478 wc.status = IB_WC_WR_FLUSH_ERR; 478 wc.status = IB_WC_WR_FLUSH_ERR;
479 479
@@ -496,7 +496,7 @@ int qib_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
496 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 496 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
497 if (++tail >= qp->r_rq.size) 497 if (++tail >= qp->r_rq.size)
498 tail = 0; 498 tail = 0;
499 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 499 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
500 } 500 }
501 wq->tail = tail; 501 wq->tail = tail;
502 502
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e118004fab17..8be5d45107ab 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1026,7 +1026,7 @@ void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr)
1026 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1026 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1027 wc.byte_len = wqe->length; 1027 wc.byte_len = wqe->length;
1028 wc.qp = &qp->ibqp; 1028 wc.qp = &qp->ibqp;
1029 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 1029 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
1030 } 1030 }
1031 if (++qp->s_last >= qp->s_size) 1031 if (++qp->s_last >= qp->s_size)
1032 qp->s_last = 0; 1032 qp->s_last = 0;
@@ -1082,7 +1082,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1082 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; 1082 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1083 wc.byte_len = wqe->length; 1083 wc.byte_len = wqe->length;
1084 wc.qp = &qp->ibqp; 1084 wc.qp = &qp->ibqp;
1085 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 1085 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0);
1086 } 1086 }
1087 if (++qp->s_last >= qp->s_size) 1087 if (++qp->s_last >= qp->s_size)
1088 qp->s_last = 0; 1088 qp->s_last = 0;
@@ -2048,7 +2048,7 @@ send_last:
2048 wc.dlid_path_bits = 0; 2048 wc.dlid_path_bits = 0;
2049 wc.port_num = 0; 2049 wc.port_num = 0;
2050 /* Signal completion event if the solicited bit is set. */ 2050 /* Signal completion event if the solicited bit is set. */
2051 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 2051 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
2052 (ohdr->bth[0] & 2052 (ohdr->bth[0] &
2053 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 2053 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2054 break; 2054 break;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f7b3bb794d1b..80f113078720 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -120,7 +120,7 @@ bad_lkey:
120 wc.opcode = IB_WC_RECV; 120 wc.opcode = IB_WC_RECV;
121 wc.qp = &qp->ibqp; 121 wc.qp = &qp->ibqp;
122 /* Signal solicited completion event. */ 122 /* Signal solicited completion event. */
123 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 123 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
124 ret = 0; 124 ret = 0;
125bail: 125bail:
126 return ret; 126 return ret;
@@ -563,8 +563,8 @@ again:
563 wc.sl = qp->remote_ah_attr.sl; 563 wc.sl = qp->remote_ah_attr.sl;
564 wc.port_num = 1; 564 wc.port_num = 1;
565 /* Signal completion event if the solicited bit is set. */ 565 /* Signal completion event if the solicited bit is set. */
566 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 566 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
567 wqe->wr.send_flags & IB_SEND_SOLICITED); 567 wqe->wr.send_flags & IB_SEND_SOLICITED);
568 568
569send_comp: 569send_comp:
570 spin_lock_irqsave(&sqp->s_lock, flags); 570 spin_lock_irqsave(&sqp->s_lock, flags);
@@ -806,7 +806,7 @@ void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
806 wc.qp = &qp->ibqp; 806 wc.qp = &qp->ibqp;
807 if (status == IB_WC_SUCCESS) 807 if (status == IB_WC_SUCCESS)
808 wc.byte_len = wqe->length; 808 wc.byte_len = wqe->length;
809 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 809 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
810 status != IB_WC_SUCCESS); 810 status != IB_WC_SUCCESS);
811 } 811 }
812 812
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index deceb459e990..caf0191651a9 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -415,7 +415,7 @@ last_imm:
415 wc.dlid_path_bits = 0; 415 wc.dlid_path_bits = 0;
416 wc.port_num = 0; 416 wc.port_num = 0;
417 /* Signal completion event if the solicited bit is set. */ 417 /* Signal completion event if the solicited bit is set. */
418 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 418 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
419 (ohdr->bth[0] & 419 (ohdr->bth[0] &
420 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 420 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
421 break; 421 break;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 76f854e7aee8..abca52782a4f 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -217,7 +217,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
217 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); 217 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
218 wc.port_num = qp->port_num; 218 wc.port_num = qp->port_num;
219 /* Signal completion event if the solicited bit is set. */ 219 /* Signal completion event if the solicited bit is set. */
220 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 220 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
221 swqe->wr.send_flags & IB_SEND_SOLICITED); 221 swqe->wr.send_flags & IB_SEND_SOLICITED);
222 ibp->rvp.n_loop_pkts++; 222 ibp->rvp.n_loop_pkts++;
223bail_unlock: 223bail_unlock:
@@ -583,7 +583,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
583 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); 583 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
584 wc.port_num = qp->port_num; 584 wc.port_num = qp->port_num;
585 /* Signal completion event if the solicited bit is set. */ 585 /* Signal completion event if the solicited bit is set. */
586 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 586 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
587 (ohdr->bth[0] & 587 (ohdr->bth[0] &
588 cpu_to_be32(IB_BTH_SOLICITED)) != 0); 588 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
589 return; 589 return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a1815028ef72..a27166b67a78 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1909,7 +1909,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
1909 init_ibport(ppd + i); 1909 init_ibport(ppd + i);
1910 1910
1911 /* Only need to initialize non-zero fields. */ 1911 /* Only need to initialize non-zero fields. */
1912 spin_lock_init(&dev->n_cqs_lock);
1913 spin_lock_init(&dev->n_qps_lock); 1912 spin_lock_init(&dev->n_qps_lock);
1914 spin_lock_init(&dev->n_srqs_lock); 1913 spin_lock_init(&dev->n_srqs_lock);
1915 spin_lock_init(&dev->n_mcast_grps_lock); 1914 spin_lock_init(&dev->n_mcast_grps_lock);
@@ -2021,11 +2020,11 @@ int qib_register_ib_device(struct qib_devdata *dd)
2021 ibdev->post_send = qib_post_send; 2020 ibdev->post_send = qib_post_send;
2022 ibdev->post_recv = qib_post_receive; 2021 ibdev->post_recv = qib_post_receive;
2023 ibdev->post_srq_recv = qib_post_srq_receive; 2022 ibdev->post_srq_recv = qib_post_srq_receive;
2024 ibdev->create_cq = qib_create_cq; 2023 ibdev->create_cq = NULL;
2025 ibdev->destroy_cq = qib_destroy_cq; 2024 ibdev->destroy_cq = NULL;
2026 ibdev->resize_cq = qib_resize_cq; 2025 ibdev->resize_cq = NULL;
2027 ibdev->poll_cq = qib_poll_cq; 2026 ibdev->poll_cq = NULL;
2028 ibdev->req_notify_cq = qib_req_notify_cq; 2027 ibdev->req_notify_cq = NULL;
2029 ibdev->get_dma_mr = NULL; 2028 ibdev->get_dma_mr = NULL;
2030 ibdev->reg_user_mr = NULL; 2029 ibdev->reg_user_mr = NULL;
2031 ibdev->dereg_mr = NULL; 2030 ibdev->dereg_mr = NULL;
@@ -2059,7 +2058,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
2059 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps; 2058 dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps;
2060 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset; 2059 dd->verbs_dev.rdi.driver_f.notify_qp_reset = notify_qp_reset;
2061 2060
2062 dd->verbs_dev.rdi.flags = RVT_FLAG_CQ_INIT_DRIVER; 2061 dd->verbs_dev.rdi.flags = 0;
2063 2062
2064 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size; 2063 dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size;
2065 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size; 2064 dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size;
@@ -2070,6 +2069,10 @@ int qib_register_ib_device(struct qib_devdata *dd)
2070 dd->verbs_dev.rdi.dparms.qos_shift = 1; 2069 dd->verbs_dev.rdi.dparms.qos_shift = 1;
2071 dd->verbs_dev.rdi.dparms.nports = dd->num_pports; 2070 dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
2072 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd); 2071 dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd);
2072 dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
2073 snprintf(dd->verbs_dev.rdi.dparms.cq_name,
2074 sizeof(dd->verbs_dev.rdi.dparms.cq_name),
2075 "qib_cq%d", dd->unit);
2073 2076
2074 qib_fill_device_attr(dd); 2077 qib_fill_device_attr(dd);
2075 2078
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 3383d565b8db..818ac8717386 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -46,6 +46,7 @@
46#include <rdma/ib_pack.h> 46#include <rdma/ib_pack.h>
47#include <rdma/ib_user_verbs.h> 47#include <rdma/ib_user_verbs.h>
48#include <rdma/rdma_vt.h> 48#include <rdma/rdma_vt.h>
49#include <rdma/rdmavt_cq.h>
49 50
50struct qib_ctxtdata; 51struct qib_ctxtdata;
51struct qib_pportdata; 52struct qib_pportdata;
@@ -61,12 +62,6 @@ struct qib_verbs_txreq;
61 */ 62 */
62#define QIB_UVERBS_ABI_VERSION 2 63#define QIB_UVERBS_ABI_VERSION 2
63 64
64/*
65 * Define an ib_cq_notify value that is not valid so we know when CQ
66 * notifications are armed.
67 */
68#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1)
69
70#define IB_SEQ_NAK (3 << 29) 65#define IB_SEQ_NAK (3 << 29)
71 66
72/* AETH NAK opcode values */ 67/* AETH NAK opcode values */
@@ -220,35 +215,6 @@ struct qib_mcast {
220}; 215};
221 216
222/* 217/*
223 * This structure is used to contain the head pointer, tail pointer,
224 * and completion queue entries as a single memory allocation so
225 * it can be mmap'ed into user space.
226 */
227struct qib_cq_wc {
228 u32 head; /* index of next entry to fill */
229 u32 tail; /* index of next ib_poll_cq() entry */
230 union {
231 /* these are actually size ibcq.cqe + 1 */
232 struct ib_uverbs_wc uqueue[0];
233 struct ib_wc kqueue[0];
234 };
235};
236
237/*
238 * The completion queue structure.
239 */
240struct qib_cq {
241 struct ib_cq ibcq;
242 struct kthread_work comptask;
243 struct qib_devdata *dd;
244 spinlock_t lock; /* protect changes in this struct */
245 u8 notify;
246 u8 triggered;
247 struct qib_cq_wc *queue;
248 struct rvt_mmap_info *ip;
249};
250
251/*
252 * qib specific data structure that will be hidden from rvt after the queue pair 218 * qib specific data structure that will be hidden from rvt after the queue pair
253 * is made common. 219 * is made common.
254 */ 220 */
@@ -345,8 +311,6 @@ struct qib_ibdev {
345 u32 n_piowait; 311 u32 n_piowait;
346 u32 n_txwait; 312 u32 n_txwait;
347 313
348 u32 n_cqs_allocated; /* number of CQs allocated for device */
349 spinlock_t n_cqs_lock;
350 u32 n_qps_allocated; /* number of QPs allocated for device */ 314 u32 n_qps_allocated; /* number of QPs allocated for device */
351 spinlock_t n_qps_lock; 315 spinlock_t n_qps_lock;
352 u32 n_srqs_allocated; /* number of SRQs allocated for device */ 316 u32 n_srqs_allocated; /* number of SRQs allocated for device */
@@ -375,11 +339,6 @@ struct qib_verbs_counters {
375 u32 vl15_dropped; 339 u32 vl15_dropped;
376}; 340};
377 341
378static inline struct qib_cq *to_icq(struct ib_cq *ibcq)
379{
380 return container_of(ibcq, struct qib_cq, ibcq);
381}
382
383static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp) 342static inline struct rvt_qp *to_iqp(struct ib_qp *ibqp)
384{ 343{
385 return container_of(ibqp, struct rvt_qp, ibqp); 344 return container_of(ibqp, struct rvt_qp, ibqp);
@@ -545,25 +504,6 @@ int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
545 504
546int qib_destroy_srq(struct ib_srq *ibsrq); 505int qib_destroy_srq(struct ib_srq *ibsrq);
547 506
548int qib_cq_init(struct qib_devdata *dd);
549
550void qib_cq_exit(struct qib_devdata *dd);
551
552void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig);
553
554int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
555
556struct ib_cq *qib_create_cq(struct ib_device *ibdev,
557 const struct ib_cq_init_attr *attr,
558 struct ib_ucontext *context,
559 struct ib_udata *udata);
560
561int qib_destroy_cq(struct ib_cq *ibcq);
562
563int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
564
565int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
566
567void mr_rcu_callback(struct rcu_head *list); 507void mr_rcu_callback(struct rcu_head *list);
568 508
569static inline void qib_put_ss(struct rvt_sge_state *ss) 509static inline void qib_put_ss(struct rvt_sge_state *ss)