aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-07-07 20:57:11 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-07 21:23:48 -0400
commitb5e81bf5e7084796d93167f438ec073e59aca9ed (patch)
tree573883691b631eb5df77411a442bf82b279833a7 /drivers/infiniband
parente2773c062e41f710d8ef1e8a790c7e558aff663d (diff)
[PATCH] IB uverbs: update kernel midlayer for new API
Update kernel InfiniBand midlayer to compile against the updated API for low-level drivers. This just amounts to passing NULL for all userspace-related parameters, and setting userspace-related structure members to NULL. Signed-off-by: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/verbs.c32
1 files changed, 20 insertions, 12 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7c08ed0cd7dd..2516f9646515 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -4,6 +4,7 @@
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Cisco Systems. All rights reserved.
7 * 8 *
8 * This software is available to you under a choice of one of two 9 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 10 * licenses. You may choose to be licensed under the terms of the GNU
@@ -47,10 +48,11 @@ struct ib_pd *ib_alloc_pd(struct ib_device *device)
47{ 48{
48 struct ib_pd *pd; 49 struct ib_pd *pd;
49 50
50 pd = device->alloc_pd(device); 51 pd = device->alloc_pd(device, NULL, NULL);
51 52
52 if (!IS_ERR(pd)) { 53 if (!IS_ERR(pd)) {
53 pd->device = device; 54 pd->device = device;
55 pd->uobject = NULL;
54 atomic_set(&pd->usecnt, 0); 56 atomic_set(&pd->usecnt, 0);
55 } 57 }
56 58
@@ -76,8 +78,9 @@ struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
76 ah = pd->device->create_ah(pd, ah_attr); 78 ah = pd->device->create_ah(pd, ah_attr);
77 79
78 if (!IS_ERR(ah)) { 80 if (!IS_ERR(ah)) {
79 ah->device = pd->device; 81 ah->device = pd->device;
80 ah->pd = pd; 82 ah->pd = pd;
83 ah->uobject = NULL;
81 atomic_inc(&pd->usecnt); 84 atomic_inc(&pd->usecnt);
82 } 85 }
83 86
@@ -122,7 +125,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
122{ 125{
123 struct ib_qp *qp; 126 struct ib_qp *qp;
124 127
125 qp = pd->device->create_qp(pd, qp_init_attr); 128 qp = pd->device->create_qp(pd, qp_init_attr, NULL);
126 129
127 if (!IS_ERR(qp)) { 130 if (!IS_ERR(qp)) {
128 qp->device = pd->device; 131 qp->device = pd->device;
@@ -130,6 +133,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
130 qp->send_cq = qp_init_attr->send_cq; 133 qp->send_cq = qp_init_attr->send_cq;
131 qp->recv_cq = qp_init_attr->recv_cq; 134 qp->recv_cq = qp_init_attr->recv_cq;
132 qp->srq = qp_init_attr->srq; 135 qp->srq = qp_init_attr->srq;
136 qp->uobject = NULL;
133 qp->event_handler = qp_init_attr->event_handler; 137 qp->event_handler = qp_init_attr->event_handler;
134 qp->qp_context = qp_init_attr->qp_context; 138 qp->qp_context = qp_init_attr->qp_context;
135 qp->qp_type = qp_init_attr->qp_type; 139 qp->qp_type = qp_init_attr->qp_type;
@@ -197,10 +201,11 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
197{ 201{
198 struct ib_cq *cq; 202 struct ib_cq *cq;
199 203
200 cq = device->create_cq(device, cqe); 204 cq = device->create_cq(device, cqe, NULL, NULL);
201 205
202 if (!IS_ERR(cq)) { 206 if (!IS_ERR(cq)) {
203 cq->device = device; 207 cq->device = device;
208 cq->uobject = NULL;
204 cq->comp_handler = comp_handler; 209 cq->comp_handler = comp_handler;
205 cq->event_handler = event_handler; 210 cq->event_handler = event_handler;
206 cq->cq_context = cq_context; 211 cq->cq_context = cq_context;
@@ -245,8 +250,9 @@ struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
245 mr = pd->device->get_dma_mr(pd, mr_access_flags); 250 mr = pd->device->get_dma_mr(pd, mr_access_flags);
246 251
247 if (!IS_ERR(mr)) { 252 if (!IS_ERR(mr)) {
248 mr->device = pd->device; 253 mr->device = pd->device;
249 mr->pd = pd; 254 mr->pd = pd;
255 mr->uobject = NULL;
250 atomic_inc(&pd->usecnt); 256 atomic_inc(&pd->usecnt);
251 atomic_set(&mr->usecnt, 0); 257 atomic_set(&mr->usecnt, 0);
252 } 258 }
@@ -267,8 +273,9 @@ struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
267 mr_access_flags, iova_start); 273 mr_access_flags, iova_start);
268 274
269 if (!IS_ERR(mr)) { 275 if (!IS_ERR(mr)) {
270 mr->device = pd->device; 276 mr->device = pd->device;
271 mr->pd = pd; 277 mr->pd = pd;
278 mr->uobject = NULL;
272 atomic_inc(&pd->usecnt); 279 atomic_inc(&pd->usecnt);
273 atomic_set(&mr->usecnt, 0); 280 atomic_set(&mr->usecnt, 0);
274 } 281 }
@@ -344,8 +351,9 @@ struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
344 351
345 mw = pd->device->alloc_mw(pd); 352 mw = pd->device->alloc_mw(pd);
346 if (!IS_ERR(mw)) { 353 if (!IS_ERR(mw)) {
347 mw->device = pd->device; 354 mw->device = pd->device;
348 mw->pd = pd; 355 mw->pd = pd;
356 mw->uobject = NULL;
349 atomic_inc(&pd->usecnt); 357 atomic_inc(&pd->usecnt);
350 } 358 }
351 359