diff options
| author | Jubin John <jubin.john@intel.com> | 2016-02-03 17:20:02 -0500 |
|---|---|---|
| committer | Doug Ledford <dledford@redhat.com> | 2016-03-10 20:37:33 -0500 |
| commit | fd0bf5bedfbd898bddc9ea8e646b4cb3779ec9ab (patch) | |
| tree | 8c3d9fc3480a8a0fb3ab7a2a168f2ea9a0b452bf /drivers/infiniband/hw/qib | |
| parent | b7b3cf44647cab47f6b7d8f10bfdc92cafbb952f (diff) | |
IB/qib: Remove srq functionality
srq functionality is now in rdmavt. Remove it from the qib driver.
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Reviewed-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Jubin John <jubin.john@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/qib')
| -rw-r--r-- | drivers/infiniband/hw/qib/Makefile | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_srq.c | 380 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 17 |
4 files changed, 1 insertions, 404 deletions
diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile index 8d5e36bb3942..79ebd79e8405 100644 --- a/drivers/infiniband/hw/qib/Makefile +++ b/drivers/infiniband/hw/qib/Makefile | |||
| @@ -3,7 +3,7 @@ obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o | |||
| 3 | ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \ | 3 | ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \ |
| 4 | qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \ | 4 | qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \ |
| 5 | qib_mad.o qib_pcie.o qib_pio_copy.o \ | 5 | qib_mad.o qib_pcie.o qib_pio_copy.o \ |
| 6 | qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \ | 6 | qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \ |
| 7 | qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \ | 7 | qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \ |
| 8 | qib_user_pages.o qib_user_sdma.o qib_iba7220.o \ | 8 | qib_user_pages.o qib_user_sdma.o qib_iba7220.o \ |
| 9 | qib_sd7220.o qib_iba7322.o qib_verbs.o | 9 | qib_sd7220.o qib_iba7322.o qib_verbs.o |
diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c deleted file mode 100644 index dff8808dbeb3..000000000000 --- a/drivers/infiniband/hw/qib/qib_srq.c +++ /dev/null | |||
| @@ -1,380 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | ||
| 3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
| 4 | * | ||
| 5 | * This software is available to you under a choice of one of two | ||
| 6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
| 7 | * General Public License (GPL) Version 2, available from the file | ||
| 8 | * COPYING in the main directory of this source tree, or the | ||
| 9 | * OpenIB.org BSD license below: | ||
| 10 | * | ||
| 11 | * Redistribution and use in source and binary forms, with or | ||
| 12 | * without modification, are permitted provided that the following | ||
| 13 | * conditions are met: | ||
| 14 | * | ||
| 15 | * - Redistributions of source code must retain the above | ||
| 16 | * copyright notice, this list of conditions and the following | ||
| 17 | * disclaimer. | ||
| 18 | * | ||
| 19 | * - Redistributions in binary form must reproduce the above | ||
| 20 | * copyright notice, this list of conditions and the following | ||
| 21 | * disclaimer in the documentation and/or other materials | ||
| 22 | * provided with the distribution. | ||
| 23 | * | ||
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
| 31 | * SOFTWARE. | ||
| 32 | */ | ||
| 33 | |||
| 34 | #include <linux/err.h> | ||
| 35 | #include <linux/slab.h> | ||
| 36 | #include <linux/vmalloc.h> | ||
| 37 | |||
| 38 | #include "qib_verbs.h" | ||
| 39 | |||
| 40 | /** | ||
| 41 | * qib_post_srq_receive - post a receive on a shared receive queue | ||
| 42 | * @ibsrq: the SRQ to post the receive on | ||
| 43 | * @wr: the list of work requests to post | ||
| 44 | * @bad_wr: A pointer to the first WR to cause a problem is put here | ||
| 45 | * | ||
| 46 | * This may be called from interrupt context. | ||
| 47 | */ | ||
| 48 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
| 49 | struct ib_recv_wr **bad_wr) | ||
| 50 | { | ||
| 51 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); | ||
| 52 | struct rvt_rwq *wq; | ||
| 53 | unsigned long flags; | ||
| 54 | int ret; | ||
| 55 | |||
| 56 | for (; wr; wr = wr->next) { | ||
| 57 | struct rvt_rwqe *wqe; | ||
| 58 | u32 next; | ||
| 59 | int i; | ||
| 60 | |||
| 61 | if ((unsigned) wr->num_sge > srq->rq.max_sge) { | ||
| 62 | *bad_wr = wr; | ||
| 63 | ret = -EINVAL; | ||
| 64 | goto bail; | ||
| 65 | } | ||
| 66 | |||
| 67 | spin_lock_irqsave(&srq->rq.lock, flags); | ||
| 68 | wq = srq->rq.wq; | ||
| 69 | next = wq->head + 1; | ||
| 70 | if (next >= srq->rq.size) | ||
| 71 | next = 0; | ||
| 72 | if (next == wq->tail) { | ||
| 73 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
| 74 | *bad_wr = wr; | ||
| 75 | ret = -ENOMEM; | ||
| 76 | goto bail; | ||
| 77 | } | ||
| 78 | |||
| 79 | wqe = get_rwqe_ptr(&srq->rq, wq->head); | ||
| 80 | wqe->wr_id = wr->wr_id; | ||
| 81 | wqe->num_sge = wr->num_sge; | ||
| 82 | for (i = 0; i < wr->num_sge; i++) | ||
| 83 | wqe->sg_list[i] = wr->sg_list[i]; | ||
| 84 | /* Make sure queue entry is written before the head index. */ | ||
| 85 | smp_wmb(); | ||
| 86 | wq->head = next; | ||
| 87 | spin_unlock_irqrestore(&srq->rq.lock, flags); | ||
| 88 | } | ||
| 89 | ret = 0; | ||
| 90 | |||
| 91 | bail: | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | /** | ||
| 96 | * qib_create_srq - create a shared receive queue | ||
| 97 | * @ibpd: the protection domain of the SRQ to create | ||
| 98 | * @srq_init_attr: the attributes of the SRQ | ||
| 99 | * @udata: data from libibverbs when creating a user SRQ | ||
| 100 | */ | ||
| 101 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | ||
| 102 | struct ib_srq_init_attr *srq_init_attr, | ||
| 103 | struct ib_udata *udata) | ||
| 104 | { | ||
| 105 | struct qib_ibdev *dev = to_idev(ibpd->device); | ||
| 106 | struct rvt_srq *srq; | ||
| 107 | u32 sz; | ||
| 108 | struct ib_srq *ret; | ||
| 109 | |||
| 110 | if (srq_init_attr->srq_type != IB_SRQT_BASIC) { | ||
| 111 | ret = ERR_PTR(-ENOSYS); | ||
| 112 | goto done; | ||
| 113 | } | ||
| 114 | |||
| 115 | if (srq_init_attr->attr.max_sge == 0 || | ||
| 116 | srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || | ||
| 117 | srq_init_attr->attr.max_wr == 0 || | ||
| 118 | srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) { | ||
| 119 | ret = ERR_PTR(-EINVAL); | ||
| 120 | goto done; | ||
| 121 | } | ||
| 122 | |||
| 123 | srq = kmalloc(sizeof(*srq), GFP_KERNEL); | ||
| 124 | if (!srq) { | ||
| 125 | ret = ERR_PTR(-ENOMEM); | ||
| 126 | goto done; | ||
| 127 | } | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Need to use vmalloc() if we want to support large #s of entries. | ||
| 131 | */ | ||
| 132 | srq->rq.size = srq_init_attr->attr.max_wr + 1; | ||
| 133 | srq->rq.max_sge = srq_init_attr->attr.max_sge; | ||
| 134 | sz = sizeof(struct ib_sge) * srq->rq.max_sge + | ||
| 135 | sizeof(struct rvt_rwqe); | ||
| 136 | srq->rq.wq = vmalloc_user(sizeof(struct rvt_rwq) + srq->rq.size * sz); | ||
| 137 | if (!srq->rq.wq) { | ||
| 138 | ret = ERR_PTR(-ENOMEM); | ||
| 139 | goto bail_srq; | ||
| 140 | } | ||
| 141 | |||
| 142 | /* | ||
| 143 | * Return the address of the RWQ as the offset to mmap. | ||
| 144 | * See qib_mmap() for details. | ||
| 145 | */ | ||
| 146 | if (udata && udata->outlen >= sizeof(__u64)) { | ||
| 147 | int err; | ||
| 148 | u32 s = sizeof(struct rvt_rwq) + srq->rq.size * sz; | ||
| 149 | |||
| 150 | srq->ip = | ||
| 151 | rvt_create_mmap_info(&dev->rdi, s, ibpd->uobject->context, | ||
| 152 | srq->rq.wq); | ||
| 153 | if (!srq->ip) { | ||
| 154 | ret = ERR_PTR(-ENOMEM); | ||
| 155 | goto bail_wq; | ||
| 156 | } | ||
| 157 | |||
| 158 | err = ib_copy_to_udata(udata, &srq->ip->offset, | ||
| 159 | sizeof(srq->ip->offset)); | ||
| 160 | if (err) { | ||
| 161 | ret = ERR_PTR(err); | ||
| 162 | goto bail_ip; | ||
| 163 | } | ||
| 164 | } else | ||
| 165 | srq->ip = NULL; | ||
| 166 | |||
| 167 | /* | ||
| 168 | * ib_create_srq() will initialize srq->ibsrq. | ||
| 169 | */ | ||
| 170 | spin_lock_init(&srq->rq.lock); | ||
| 171 | srq->rq.wq->head = 0; | ||
| 172 | srq->rq.wq->tail = 0; | ||
| 173 | srq->limit = srq_init_attr->attr.srq_limit; | ||
| 174 | |||
| 175 | spin_lock(&dev->n_srqs_lock); | ||
| 176 | if (dev->n_srqs_allocated == ib_qib_max_srqs) { | ||
| 177 | spin_unlock(&dev->n_srqs_lock); | ||
| 178 | ret = ERR_PTR(-ENOMEM); | ||
| 179 | goto bail_ip; | ||
| 180 | } | ||
| 181 | |||
| 182 | dev->n_srqs_allocated++; | ||
| 183 | spin_unlock(&dev->n_srqs_lock); | ||
| 184 | |||
| 185 | if (srq->ip) { | ||
| 186 | spin_lock_irq(&dev->rdi.pending_lock); | ||
| 187 | list_add(&srq->ip->pending_mmaps, &dev->rdi.pending_mmaps); | ||
| 188 | spin_unlock_irq(&dev->rdi.pending_lock); | ||
| 189 | } | ||
| 190 | |||
| 191 | ret = &srq->ibsrq; | ||
| 192 | goto done; | ||
| 193 | |||
| 194 | bail_ip: | ||
| 195 | kfree(srq->ip); | ||
| 196 | bail_wq: | ||
| 197 | vfree(srq->rq.wq); | ||
| 198 | bail_srq: | ||
| 199 | kfree(srq); | ||
| 200 | done: | ||
| 201 | return ret; | ||
| 202 | } | ||
| 203 | |||
| 204 | /** | ||
| 205 | * qib_modify_srq - modify a shared receive queue | ||
| 206 | * @ibsrq: the SRQ to modify | ||
| 207 | * @attr: the new attributes of the SRQ | ||
| 208 | * @attr_mask: indicates which attributes to modify | ||
| 209 | * @udata: user data for libibverbs.so | ||
| 210 | */ | ||
| 211 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
| 212 | enum ib_srq_attr_mask attr_mask, | ||
| 213 | struct ib_udata *udata) | ||
| 214 | { | ||
| 215 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); | ||
| 216 | struct rvt_rwq *wq; | ||
| 217 | int ret = 0; | ||
| 218 | |||
| 219 | if (attr_mask & IB_SRQ_MAX_WR) { | ||
| 220 | struct rvt_rwq *owq; | ||
| 221 | struct rvt_rwqe *p; | ||
| 222 | u32 sz, size, n, head, tail; | ||
| 223 | |||
| 224 | /* Check that the requested sizes are below the limits. */ | ||
| 225 | if ((attr->max_wr > ib_qib_max_srq_wrs) || | ||
| 226 | ((attr_mask & IB_SRQ_LIMIT) ? | ||
| 227 | attr->srq_limit : srq->limit) > attr->max_wr) { | ||
| 228 | ret = -EINVAL; | ||
| 229 | goto bail; | ||
| 230 | } | ||
| 231 | |||
| 232 | sz = sizeof(struct rvt_rwqe) + | ||
| 233 | srq->rq.max_sge * sizeof(struct ib_sge); | ||
| 234 | size = attr->max_wr + 1; | ||
| 235 | wq = vmalloc_user(sizeof(struct rvt_rwq) + size * sz); | ||
| 236 | if (!wq) { | ||
| 237 | ret = -ENOMEM; | ||
| 238 | goto bail; | ||
| 239 | } | ||
| 240 | |||
| 241 | /* Check that we can write the offset to mmap. */ | ||
| 242 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
| 243 | __u64 offset_addr; | ||
| 244 | __u64 offset = 0; | ||
| 245 | |||
| 246 | ret = ib_copy_from_udata(&offset_addr, udata, | ||
| 247 | sizeof(offset_addr)); | ||
| 248 | if (ret) | ||
| 249 | goto bail_free; | ||
| 250 | udata->outbuf = | ||
| 251 | (void __user *) (unsigned long) offset_addr; | ||
| 252 | ret = ib_copy_to_udata(udata, &offset, | ||
| 253 | sizeof(offset)); | ||
| 254 | if (ret) | ||
| 255 | goto bail_free; | ||
| 256 | } | ||
| 257 | |||
| 258 | spin_lock_irq(&srq->rq.lock); | ||
| 259 | /* | ||
| 260 | * validate head and tail pointer values and compute | ||
| 261 | * the number of remaining WQEs. | ||
| 262 | */ | ||
| 263 | owq = srq->rq.wq; | ||
| 264 | head = owq->head; | ||
| 265 | tail = owq->tail; | ||
| 266 | if (head >= srq->rq.size || tail >= srq->rq.size) { | ||
| 267 | ret = -EINVAL; | ||
| 268 | goto bail_unlock; | ||
| 269 | } | ||
| 270 | n = head; | ||
| 271 | if (n < tail) | ||
| 272 | n += srq->rq.size - tail; | ||
| 273 | else | ||
| 274 | n -= tail; | ||
| 275 | if (size <= n) { | ||
| 276 | ret = -EINVAL; | ||
| 277 | goto bail_unlock; | ||
| 278 | } | ||
| 279 | n = 0; | ||
| 280 | p = wq->wq; | ||
| 281 | while (tail != head) { | ||
| 282 | struct rvt_rwqe *wqe; | ||
| 283 | int i; | ||
| 284 | |||
| 285 | wqe = get_rwqe_ptr(&srq->rq, tail); | ||
| 286 | p->wr_id = wqe->wr_id; | ||
| 287 | p->num_sge = wqe->num_sge; | ||
| 288 | for (i = 0; i < wqe->num_sge; i++) | ||
| 289 | p->sg_list[i] = wqe->sg_list[i]; | ||
| 290 | n++; | ||
| 291 | p = (struct rvt_rwqe *)((char *)p + sz); | ||
| 292 | if (++tail >= srq->rq.size) | ||
| 293 | tail = 0; | ||
| 294 | } | ||
| 295 | srq->rq.wq = wq; | ||
| 296 | srq->rq.size = size; | ||
| 297 | wq->head = n; | ||
| 298 | wq->tail = 0; | ||
| 299 | if (attr_mask & IB_SRQ_LIMIT) | ||
| 300 | srq->limit = attr->srq_limit; | ||
| 301 | spin_unlock_irq(&srq->rq.lock); | ||
| 302 | |||
| 303 | vfree(owq); | ||
| 304 | |||
| 305 | if (srq->ip) { | ||
| 306 | struct rvt_mmap_info *ip = srq->ip; | ||
| 307 | struct qib_ibdev *dev = to_idev(srq->ibsrq.device); | ||
| 308 | u32 s = sizeof(struct rvt_rwq) + size * sz; | ||
| 309 | |||
| 310 | rvt_update_mmap_info(&dev->rdi, ip, s, wq); | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Return the offset to mmap. | ||
| 314 | * See qib_mmap() for details. | ||
| 315 | */ | ||
| 316 | if (udata && udata->inlen >= sizeof(__u64)) { | ||
| 317 | ret = ib_copy_to_udata(udata, &ip->offset, | ||
| 318 | sizeof(ip->offset)); | ||
| 319 | if (ret) | ||
| 320 | goto bail; | ||
| 321 | } | ||
| 322 | |||
| 323 | /* | ||
| 324 | * Put user mapping info onto the pending list | ||
| 325 | * unless it already is on the list. | ||
| 326 | */ | ||
| 327 | spin_lock_irq(&dev->rdi.pending_lock); | ||
| 328 | if (list_empty(&ip->pending_mmaps)) | ||
| 329 | list_add(&ip->pending_mmaps, | ||
| 330 | &dev->rdi.pending_mmaps); | ||
| 331 | spin_unlock_irq(&dev->rdi.pending_lock); | ||
| 332 | } | ||
| 333 | } else if (attr_mask & IB_SRQ_LIMIT) { | ||
| 334 | spin_lock_irq(&srq->rq.lock); | ||
| 335 | if (attr->srq_limit >= srq->rq.size) | ||
| 336 | ret = -EINVAL; | ||
| 337 | else | ||
| 338 | srq->limit = attr->srq_limit; | ||
| 339 | spin_unlock_irq(&srq->rq.lock); | ||
| 340 | } | ||
| 341 | goto bail; | ||
| 342 | |||
| 343 | bail_unlock: | ||
| 344 | spin_unlock_irq(&srq->rq.lock); | ||
| 345 | bail_free: | ||
| 346 | vfree(wq); | ||
| 347 | bail: | ||
| 348 | return ret; | ||
| 349 | } | ||
| 350 | |||
| 351 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) | ||
| 352 | { | ||
| 353 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); | ||
| 354 | |||
| 355 | attr->max_wr = srq->rq.size - 1; | ||
| 356 | attr->max_sge = srq->rq.max_sge; | ||
| 357 | attr->srq_limit = srq->limit; | ||
| 358 | return 0; | ||
| 359 | } | ||
| 360 | |||
| 361 | /** | ||
| 362 | * qib_destroy_srq - destroy a shared receive queue | ||
| 363 | * @ibsrq: the SRQ to destroy | ||
| 364 | */ | ||
| 365 | int qib_destroy_srq(struct ib_srq *ibsrq) | ||
| 366 | { | ||
| 367 | struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq); | ||
| 368 | struct qib_ibdev *dev = to_idev(ibsrq->device); | ||
| 369 | |||
| 370 | spin_lock(&dev->n_srqs_lock); | ||
| 371 | dev->n_srqs_allocated--; | ||
| 372 | spin_unlock(&dev->n_srqs_lock); | ||
| 373 | if (srq->ip) | ||
| 374 | kref_put(&srq->ip->ref, rvt_release_mmap_info); | ||
| 375 | else | ||
| 376 | vfree(srq->rq.wq); | ||
| 377 | kfree(srq); | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | } | ||
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 8b97ca1787f6..3785a526f2e9 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
| @@ -1656,7 +1656,6 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
| 1656 | 1656 | ||
| 1657 | /* Only need to initialize non-zero fields. */ | 1657 | /* Only need to initialize non-zero fields. */ |
| 1658 | spin_lock_init(&dev->n_qps_lock); | 1658 | spin_lock_init(&dev->n_qps_lock); |
| 1659 | spin_lock_init(&dev->n_srqs_lock); | ||
| 1660 | init_timer(&dev->mem_timer); | 1659 | init_timer(&dev->mem_timer); |
| 1661 | dev->mem_timer.function = mem_timer; | 1660 | dev->mem_timer.function = mem_timer; |
| 1662 | dev->mem_timer.data = (unsigned long) dev; | 1661 | dev->mem_timer.data = (unsigned long) dev; |
| @@ -1754,17 +1753,12 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
| 1754 | ibdev->destroy_ah = NULL; | 1753 | ibdev->destroy_ah = NULL; |
| 1755 | ibdev->modify_ah = NULL; | 1754 | ibdev->modify_ah = NULL; |
| 1756 | ibdev->query_ah = NULL; | 1755 | ibdev->query_ah = NULL; |
| 1757 | ibdev->create_srq = qib_create_srq; | ||
| 1758 | ibdev->modify_srq = qib_modify_srq; | ||
| 1759 | ibdev->query_srq = qib_query_srq; | ||
| 1760 | ibdev->destroy_srq = qib_destroy_srq; | ||
| 1761 | ibdev->create_qp = NULL; | 1756 | ibdev->create_qp = NULL; |
| 1762 | ibdev->modify_qp = qib_modify_qp; | 1757 | ibdev->modify_qp = qib_modify_qp; |
| 1763 | ibdev->query_qp = NULL; | 1758 | ibdev->query_qp = NULL; |
| 1764 | ibdev->destroy_qp = qib_destroy_qp; | 1759 | ibdev->destroy_qp = qib_destroy_qp; |
| 1765 | ibdev->post_send = NULL; | 1760 | ibdev->post_send = NULL; |
| 1766 | ibdev->post_recv = NULL; | 1761 | ibdev->post_recv = NULL; |
| 1767 | ibdev->post_srq_recv = qib_post_srq_receive; | ||
| 1768 | ibdev->create_cq = NULL; | 1762 | ibdev->create_cq = NULL; |
| 1769 | ibdev->destroy_cq = NULL; | 1763 | ibdev->destroy_cq = NULL; |
| 1770 | ibdev->resize_cq = NULL; | 1764 | ibdev->resize_cq = NULL; |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 34f778424f61..a7e3c7111e14 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
| @@ -270,8 +270,6 @@ struct qib_ibdev { | |||
| 270 | 270 | ||
| 271 | u32 n_qps_allocated; /* number of QPs allocated for device */ | 271 | u32 n_qps_allocated; /* number of QPs allocated for device */ |
| 272 | spinlock_t n_qps_lock; | 272 | spinlock_t n_qps_lock; |
| 273 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ | ||
| 274 | spinlock_t n_srqs_lock; | ||
| 275 | #ifdef CONFIG_DEBUG_FS | 273 | #ifdef CONFIG_DEBUG_FS |
| 276 | /* per HCA debugfs */ | 274 | /* per HCA debugfs */ |
| 277 | struct dentry *qib_ibdev_dbg; | 275 | struct dentry *qib_ibdev_dbg; |
| @@ -428,21 +426,6 @@ int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr); | |||
| 428 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | 426 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
| 429 | int has_grh, void *data, u32 tlen, struct rvt_qp *qp); | 427 | int has_grh, void *data, u32 tlen, struct rvt_qp *qp); |
| 430 | 428 | ||
| 431 | int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | ||
| 432 | struct ib_recv_wr **bad_wr); | ||
| 433 | |||
| 434 | struct ib_srq *qib_create_srq(struct ib_pd *ibpd, | ||
| 435 | struct ib_srq_init_attr *srq_init_attr, | ||
| 436 | struct ib_udata *udata); | ||
| 437 | |||
| 438 | int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | ||
| 439 | enum ib_srq_attr_mask attr_mask, | ||
| 440 | struct ib_udata *udata); | ||
| 441 | |||
| 442 | int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | ||
| 443 | |||
| 444 | int qib_destroy_srq(struct ib_srq *ibsrq); | ||
| 445 | |||
| 446 | void mr_rcu_callback(struct rcu_head *list); | 429 | void mr_rcu_callback(struct rcu_head *list); |
| 447 | 430 | ||
| 448 | static inline void qib_put_ss(struct rvt_sge_state *ss) | 431 | static inline void qib_put_ss(struct rvt_sge_state *ss) |
