diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2006-08-03 17:02:40 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:46 -0400 |
commit | 922a8e9fb2e0711212badce47a41137e2ca04cb3 (patch) | |
tree | 54af57ac9f2ddcaf0e6fdead4d9175eecd9e06e2 /drivers | |
parent | 3cd965646b7cb75ae84dd0daf6258adf20e4f169 (diff) |
RDMA: iWARP Connection Manager.
Add an iWARP Connection Manager (CM), which abstracts connection
management for iWARP devices (RNICs). It is a logical instance of the
xx_cm where xx is the transport type (ib or iw). The symbols exported
are used by the transport independent rdma_cm module, and are
available also for transport dependent ULPs.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 1019 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.h | 62 |
2 files changed, 1081 insertions, 0 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c new file mode 100644 index 000000000000..c3fb304a4e86 --- /dev/null +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -0,0 +1,1019 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. | ||
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | ||
4 | * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. | ||
5 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
6 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
7 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. | ||
8 | * | ||
9 | * This software is available to you under a choice of one of two | ||
10 | * licenses. You may choose to be licensed under the terms of the GNU | ||
11 | * General Public License (GPL) Version 2, available from the file | ||
12 | * COPYING in the main directory of this source tree, or the | ||
13 | * OpenIB.org BSD license below: | ||
14 | * | ||
15 | * Redistribution and use in source and binary forms, with or | ||
16 | * without modification, are permitted provided that the following | ||
17 | * conditions are met: | ||
18 | * | ||
19 | * - Redistributions of source code must retain the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer. | ||
22 | * | ||
23 | * - Redistributions in binary form must reproduce the above | ||
24 | * copyright notice, this list of conditions and the following | ||
25 | * disclaimer in the documentation and/or other materials | ||
26 | * provided with the distribution. | ||
27 | * | ||
28 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
29 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
30 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
31 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
32 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
33 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
34 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
35 | * SOFTWARE. | ||
36 | * | ||
37 | */ | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <linux/err.h> | ||
40 | #include <linux/idr.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/rbtree.h> | ||
44 | #include <linux/spinlock.h> | ||
45 | #include <linux/workqueue.h> | ||
46 | #include <linux/completion.h> | ||
47 | |||
48 | #include <rdma/iw_cm.h> | ||
49 | #include <rdma/ib_addr.h> | ||
50 | |||
51 | #include "iwcm.h" | ||
52 | |||
53 | MODULE_AUTHOR("Tom Tucker"); | ||
54 | MODULE_DESCRIPTION("iWARP CM"); | ||
55 | MODULE_LICENSE("Dual BSD/GPL"); | ||
56 | |||
57 | static struct workqueue_struct *iwcm_wq; | ||
58 | struct iwcm_work { | ||
59 | struct work_struct work; | ||
60 | struct iwcm_id_private *cm_id; | ||
61 | struct list_head list; | ||
62 | struct iw_cm_event event; | ||
63 | struct list_head free_list; | ||
64 | }; | ||
65 | |||
66 | /* | ||
67 | * The following services provide a mechanism for pre-allocating iwcm_work | ||
68 | * elements. The design pre-allocates them based on the cm_id type: | ||
69 | * LISTENING IDS: Get enough elements preallocated to handle the | ||
70 | * listen backlog. | ||
71 | * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE | ||
72 | * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE | ||
73 | * | ||
74 | * Allocating them in connect and listen avoids having to deal | ||
75 | * with allocation failures on the event upcall from the provider (which | ||
76 | * is called in the interrupt context). | ||
77 | * | ||
78 | * One exception is when creating the cm_id for incoming connection requests. | ||
79 | * There are two cases: | ||
80 | * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If | ||
81 | * the backlog is exceeded, then no more connection request events will | ||
82 | * be processed. cm_event_handler() returns -ENOMEM in this case. Its up | ||
83 | * to the provider to reject the connectino request. | ||
84 | * 2) in the connection request workqueue handler, cm_conn_req_handler(). | ||
85 | * If work elements cannot be allocated for the new connect request cm_id, | ||
86 | * then IWCM will call the provider reject method. This is ok since | ||
87 | * cm_conn_req_handler() runs in the workqueue thread context. | ||
88 | */ | ||
89 | |||
90 | static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv) | ||
91 | { | ||
92 | struct iwcm_work *work; | ||
93 | |||
94 | if (list_empty(&cm_id_priv->work_free_list)) | ||
95 | return NULL; | ||
96 | work = list_entry(cm_id_priv->work_free_list.next, struct iwcm_work, | ||
97 | free_list); | ||
98 | list_del_init(&work->free_list); | ||
99 | return work; | ||
100 | } | ||
101 | |||
102 | static void put_work(struct iwcm_work *work) | ||
103 | { | ||
104 | list_add(&work->free_list, &work->cm_id->work_free_list); | ||
105 | } | ||
106 | |||
107 | static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv) | ||
108 | { | ||
109 | struct list_head *e, *tmp; | ||
110 | |||
111 | list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) | ||
112 | kfree(list_entry(e, struct iwcm_work, free_list)); | ||
113 | } | ||
114 | |||
115 | static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count) | ||
116 | { | ||
117 | struct iwcm_work *work; | ||
118 | |||
119 | BUG_ON(!list_empty(&cm_id_priv->work_free_list)); | ||
120 | while (count--) { | ||
121 | work = kmalloc(sizeof(struct iwcm_work), GFP_KERNEL); | ||
122 | if (!work) { | ||
123 | dealloc_work_entries(cm_id_priv); | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | work->cm_id = cm_id_priv; | ||
127 | INIT_LIST_HEAD(&work->list); | ||
128 | put_work(work); | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Save private data from incoming connection requests in the | ||
135 | * cm_id_priv so the low level driver doesn't have to. Adjust | ||
136 | * the event ptr to point to the local copy. | ||
137 | */ | ||
138 | static int copy_private_data(struct iwcm_id_private *cm_id_priv, | ||
139 | struct iw_cm_event *event) | ||
140 | { | ||
141 | void *p; | ||
142 | |||
143 | p = kmalloc(event->private_data_len, GFP_ATOMIC); | ||
144 | if (!p) | ||
145 | return -ENOMEM; | ||
146 | memcpy(p, event->private_data, event->private_data_len); | ||
147 | event->private_data = p; | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Release a reference on cm_id. If the last reference is being removed | ||
153 | * and iw_destroy_cm_id is waiting, wake up the waiting thread. | ||
154 | */ | ||
155 | static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv) | ||
156 | { | ||
157 | int ret = 0; | ||
158 | |||
159 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | ||
160 | if (atomic_dec_and_test(&cm_id_priv->refcount)) { | ||
161 | BUG_ON(!list_empty(&cm_id_priv->work_list)); | ||
162 | if (waitqueue_active(&cm_id_priv->destroy_comp.wait)) { | ||
163 | BUG_ON(cm_id_priv->state != IW_CM_STATE_DESTROYING); | ||
164 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, | ||
165 | &cm_id_priv->flags)); | ||
166 | ret = 1; | ||
167 | } | ||
168 | complete(&cm_id_priv->destroy_comp); | ||
169 | } | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static void add_ref(struct iw_cm_id *cm_id) | ||
175 | { | ||
176 | struct iwcm_id_private *cm_id_priv; | ||
177 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
178 | atomic_inc(&cm_id_priv->refcount); | ||
179 | } | ||
180 | |||
181 | static void rem_ref(struct iw_cm_id *cm_id) | ||
182 | { | ||
183 | struct iwcm_id_private *cm_id_priv; | ||
184 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
185 | iwcm_deref_id(cm_id_priv); | ||
186 | } | ||
187 | |||
188 | static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); | ||
189 | |||
190 | struct iw_cm_id *iw_create_cm_id(struct ib_device *device, | ||
191 | iw_cm_handler cm_handler, | ||
192 | void *context) | ||
193 | { | ||
194 | struct iwcm_id_private *cm_id_priv; | ||
195 | |||
196 | cm_id_priv = kzalloc(sizeof(*cm_id_priv), GFP_KERNEL); | ||
197 | if (!cm_id_priv) | ||
198 | return ERR_PTR(-ENOMEM); | ||
199 | |||
200 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
201 | cm_id_priv->id.device = device; | ||
202 | cm_id_priv->id.cm_handler = cm_handler; | ||
203 | cm_id_priv->id.context = context; | ||
204 | cm_id_priv->id.event_handler = cm_event_handler; | ||
205 | cm_id_priv->id.add_ref = add_ref; | ||
206 | cm_id_priv->id.rem_ref = rem_ref; | ||
207 | spin_lock_init(&cm_id_priv->lock); | ||
208 | atomic_set(&cm_id_priv->refcount, 1); | ||
209 | init_waitqueue_head(&cm_id_priv->connect_wait); | ||
210 | init_completion(&cm_id_priv->destroy_comp); | ||
211 | INIT_LIST_HEAD(&cm_id_priv->work_list); | ||
212 | INIT_LIST_HEAD(&cm_id_priv->work_free_list); | ||
213 | |||
214 | return &cm_id_priv->id; | ||
215 | } | ||
216 | EXPORT_SYMBOL(iw_create_cm_id); | ||
217 | |||
218 | |||
219 | static int iwcm_modify_qp_err(struct ib_qp *qp) | ||
220 | { | ||
221 | struct ib_qp_attr qp_attr; | ||
222 | |||
223 | if (!qp) | ||
224 | return -EINVAL; | ||
225 | |||
226 | qp_attr.qp_state = IB_QPS_ERR; | ||
227 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * This is really the RDMAC CLOSING state. It is most similar to the | ||
232 | * IB SQD QP state. | ||
233 | */ | ||
234 | static int iwcm_modify_qp_sqd(struct ib_qp *qp) | ||
235 | { | ||
236 | struct ib_qp_attr qp_attr; | ||
237 | |||
238 | BUG_ON(qp == NULL); | ||
239 | qp_attr.qp_state = IB_QPS_SQD; | ||
240 | return ib_modify_qp(qp, &qp_attr, IB_QP_STATE); | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * CM_ID <-- CLOSING | ||
245 | * | ||
246 | * Block if a passive or active connection is currenlty being processed. Then | ||
247 | * process the event as follows: | ||
248 | * - If we are ESTABLISHED, move to CLOSING and modify the QP state | ||
249 | * based on the abrupt flag | ||
250 | * - If the connection is already in the CLOSING or IDLE state, the peer is | ||
251 | * disconnecting concurrently with us and we've already seen the | ||
252 | * DISCONNECT event -- ignore the request and return 0 | ||
253 | * - Disconnect on a listening endpoint returns -EINVAL | ||
254 | */ | ||
255 | int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) | ||
256 | { | ||
257 | struct iwcm_id_private *cm_id_priv; | ||
258 | unsigned long flags; | ||
259 | int ret = 0; | ||
260 | struct ib_qp *qp = NULL; | ||
261 | |||
262 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
263 | /* Wait if we're currently in a connect or accept downcall */ | ||
264 | wait_event(cm_id_priv->connect_wait, | ||
265 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | ||
266 | |||
267 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
268 | switch (cm_id_priv->state) { | ||
269 | case IW_CM_STATE_ESTABLISHED: | ||
270 | cm_id_priv->state = IW_CM_STATE_CLOSING; | ||
271 | |||
272 | /* QP could be <nul> for user-mode client */ | ||
273 | if (cm_id_priv->qp) | ||
274 | qp = cm_id_priv->qp; | ||
275 | else | ||
276 | ret = -EINVAL; | ||
277 | break; | ||
278 | case IW_CM_STATE_LISTEN: | ||
279 | ret = -EINVAL; | ||
280 | break; | ||
281 | case IW_CM_STATE_CLOSING: | ||
282 | /* remote peer closed first */ | ||
283 | case IW_CM_STATE_IDLE: | ||
284 | /* accept or connect returned !0 */ | ||
285 | break; | ||
286 | case IW_CM_STATE_CONN_RECV: | ||
287 | /* | ||
288 | * App called disconnect before/without calling accept after | ||
289 | * connect_request event delivered. | ||
290 | */ | ||
291 | break; | ||
292 | case IW_CM_STATE_CONN_SENT: | ||
293 | /* Can only get here if wait above fails */ | ||
294 | default: | ||
295 | BUG(); | ||
296 | } | ||
297 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
298 | |||
299 | if (qp) { | ||
300 | if (abrupt) | ||
301 | ret = iwcm_modify_qp_err(qp); | ||
302 | else | ||
303 | ret = iwcm_modify_qp_sqd(qp); | ||
304 | |||
305 | /* | ||
306 | * If both sides are disconnecting the QP could | ||
307 | * already be in ERR or SQD states | ||
308 | */ | ||
309 | ret = 0; | ||
310 | } | ||
311 | |||
312 | return ret; | ||
313 | } | ||
314 | EXPORT_SYMBOL(iw_cm_disconnect); | ||
315 | |||
316 | /* | ||
317 | * CM_ID <-- DESTROYING | ||
318 | * | ||
319 | * Clean up all resources associated with the connection and release | ||
320 | * the initial reference taken by iw_create_cm_id. | ||
321 | */ | ||
322 | static void destroy_cm_id(struct iw_cm_id *cm_id) | ||
323 | { | ||
324 | struct iwcm_id_private *cm_id_priv; | ||
325 | unsigned long flags; | ||
326 | int ret; | ||
327 | |||
328 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
329 | /* | ||
330 | * Wait if we're currently in a connect or accept downcall. A | ||
331 | * listening endpoint should never block here. | ||
332 | */ | ||
333 | wait_event(cm_id_priv->connect_wait, | ||
334 | !test_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags)); | ||
335 | |||
336 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
337 | switch (cm_id_priv->state) { | ||
338 | case IW_CM_STATE_LISTEN: | ||
339 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
340 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
341 | /* destroy the listening endpoint */ | ||
342 | ret = cm_id->device->iwcm->destroy_listen(cm_id); | ||
343 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
344 | break; | ||
345 | case IW_CM_STATE_ESTABLISHED: | ||
346 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
347 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
348 | /* Abrupt close of the connection */ | ||
349 | (void)iwcm_modify_qp_err(cm_id_priv->qp); | ||
350 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
351 | break; | ||
352 | case IW_CM_STATE_IDLE: | ||
353 | case IW_CM_STATE_CLOSING: | ||
354 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
355 | break; | ||
356 | case IW_CM_STATE_CONN_RECV: | ||
357 | /* | ||
358 | * App called destroy before/without calling accept after | ||
359 | * receiving connection request event notification. | ||
360 | */ | ||
361 | cm_id_priv->state = IW_CM_STATE_DESTROYING; | ||
362 | break; | ||
363 | case IW_CM_STATE_CONN_SENT: | ||
364 | case IW_CM_STATE_DESTROYING: | ||
365 | default: | ||
366 | BUG(); | ||
367 | break; | ||
368 | } | ||
369 | if (cm_id_priv->qp) { | ||
370 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
371 | cm_id_priv->qp = NULL; | ||
372 | } | ||
373 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
374 | |||
375 | (void)iwcm_deref_id(cm_id_priv); | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * This function is only called by the application thread and cannot | ||
380 | * be called by the event thread. The function will wait for all | ||
381 | * references to be released on the cm_id and then kfree the cm_id | ||
382 | * object. | ||
383 | */ | ||
384 | void iw_destroy_cm_id(struct iw_cm_id *cm_id) | ||
385 | { | ||
386 | struct iwcm_id_private *cm_id_priv; | ||
387 | |||
388 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
389 | BUG_ON(test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)); | ||
390 | |||
391 | destroy_cm_id(cm_id); | ||
392 | |||
393 | wait_for_completion(&cm_id_priv->destroy_comp); | ||
394 | |||
395 | dealloc_work_entries(cm_id_priv); | ||
396 | |||
397 | kfree(cm_id_priv); | ||
398 | } | ||
399 | EXPORT_SYMBOL(iw_destroy_cm_id); | ||
400 | |||
401 | /* | ||
402 | * CM_ID <-- LISTEN | ||
403 | * | ||
404 | * Start listening for connect requests. Generates one CONNECT_REQUEST | ||
405 | * event for each inbound connect request. | ||
406 | */ | ||
407 | int iw_cm_listen(struct iw_cm_id *cm_id, int backlog) | ||
408 | { | ||
409 | struct iwcm_id_private *cm_id_priv; | ||
410 | unsigned long flags; | ||
411 | int ret = 0; | ||
412 | |||
413 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
414 | |||
415 | ret = alloc_work_entries(cm_id_priv, backlog); | ||
416 | if (ret) | ||
417 | return ret; | ||
418 | |||
419 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
420 | switch (cm_id_priv->state) { | ||
421 | case IW_CM_STATE_IDLE: | ||
422 | cm_id_priv->state = IW_CM_STATE_LISTEN; | ||
423 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
424 | ret = cm_id->device->iwcm->create_listen(cm_id, backlog); | ||
425 | if (ret) | ||
426 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
427 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
428 | break; | ||
429 | default: | ||
430 | ret = -EINVAL; | ||
431 | } | ||
432 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
433 | |||
434 | return ret; | ||
435 | } | ||
436 | EXPORT_SYMBOL(iw_cm_listen); | ||
437 | |||
438 | /* | ||
439 | * CM_ID <-- IDLE | ||
440 | * | ||
441 | * Rejects an inbound connection request. No events are generated. | ||
442 | */ | ||
443 | int iw_cm_reject(struct iw_cm_id *cm_id, | ||
444 | const void *private_data, | ||
445 | u8 private_data_len) | ||
446 | { | ||
447 | struct iwcm_id_private *cm_id_priv; | ||
448 | unsigned long flags; | ||
449 | int ret; | ||
450 | |||
451 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
452 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
453 | |||
454 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
455 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | ||
456 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
457 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
458 | wake_up_all(&cm_id_priv->connect_wait); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
462 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
463 | |||
464 | ret = cm_id->device->iwcm->reject(cm_id, private_data, | ||
465 | private_data_len); | ||
466 | |||
467 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
468 | wake_up_all(&cm_id_priv->connect_wait); | ||
469 | |||
470 | return ret; | ||
471 | } | ||
472 | EXPORT_SYMBOL(iw_cm_reject); | ||
473 | |||
474 | /* | ||
475 | * CM_ID <-- ESTABLISHED | ||
476 | * | ||
477 | * Accepts an inbound connection request and generates an ESTABLISHED | ||
478 | * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block | ||
479 | * until the ESTABLISHED event is received from the provider. | ||
480 | */ | ||
481 | int iw_cm_accept(struct iw_cm_id *cm_id, | ||
482 | struct iw_cm_conn_param *iw_param) | ||
483 | { | ||
484 | struct iwcm_id_private *cm_id_priv; | ||
485 | struct ib_qp *qp; | ||
486 | unsigned long flags; | ||
487 | int ret; | ||
488 | |||
489 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
490 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
491 | |||
492 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
493 | if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) { | ||
494 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
495 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
496 | wake_up_all(&cm_id_priv->connect_wait); | ||
497 | return -EINVAL; | ||
498 | } | ||
499 | /* Get the ib_qp given the QPN */ | ||
500 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | ||
501 | if (!qp) { | ||
502 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
503 | return -EINVAL; | ||
504 | } | ||
505 | cm_id->device->iwcm->add_ref(qp); | ||
506 | cm_id_priv->qp = qp; | ||
507 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
508 | |||
509 | ret = cm_id->device->iwcm->accept(cm_id, iw_param); | ||
510 | if (ret) { | ||
511 | /* An error on accept precludes provider events */ | ||
512 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | ||
513 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
514 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
515 | if (cm_id_priv->qp) { | ||
516 | cm_id->device->iwcm->rem_ref(qp); | ||
517 | cm_id_priv->qp = NULL; | ||
518 | } | ||
519 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
520 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
521 | wake_up_all(&cm_id_priv->connect_wait); | ||
522 | } | ||
523 | |||
524 | return ret; | ||
525 | } | ||
526 | EXPORT_SYMBOL(iw_cm_accept); | ||
527 | |||
528 | /* | ||
529 | * Active Side: CM_ID <-- CONN_SENT | ||
530 | * | ||
531 | * If successful, results in the generation of a CONNECT_REPLY | ||
532 | * event. iw_cm_disconnect and iw_cm_destroy will block until the | ||
533 | * CONNECT_REPLY event is received from the provider. | ||
534 | */ | ||
535 | int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | ||
536 | { | ||
537 | struct iwcm_id_private *cm_id_priv; | ||
538 | int ret = 0; | ||
539 | unsigned long flags; | ||
540 | struct ib_qp *qp; | ||
541 | |||
542 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
543 | |||
544 | ret = alloc_work_entries(cm_id_priv, 4); | ||
545 | if (ret) | ||
546 | return ret; | ||
547 | |||
548 | set_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
549 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
550 | |||
551 | if (cm_id_priv->state != IW_CM_STATE_IDLE) { | ||
552 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
553 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
554 | wake_up_all(&cm_id_priv->connect_wait); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | /* Get the ib_qp given the QPN */ | ||
559 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | ||
560 | if (!qp) { | ||
561 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
562 | return -EINVAL; | ||
563 | } | ||
564 | cm_id->device->iwcm->add_ref(qp); | ||
565 | cm_id_priv->qp = qp; | ||
566 | cm_id_priv->state = IW_CM_STATE_CONN_SENT; | ||
567 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
568 | |||
569 | ret = cm_id->device->iwcm->connect(cm_id, iw_param); | ||
570 | if (ret) { | ||
571 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
572 | if (cm_id_priv->qp) { | ||
573 | cm_id->device->iwcm->rem_ref(qp); | ||
574 | cm_id_priv->qp = NULL; | ||
575 | } | ||
576 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
577 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | ||
578 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
579 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
580 | wake_up_all(&cm_id_priv->connect_wait); | ||
581 | } | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | EXPORT_SYMBOL(iw_cm_connect); | ||
586 | |||
587 | /* | ||
588 | * Passive Side: new CM_ID <-- CONN_RECV | ||
589 | * | ||
590 | * Handles an inbound connect request. The function creates a new | ||
591 | * iw_cm_id to represent the new connection and inherits the client | ||
592 | * callback function and other attributes from the listening parent. | ||
593 | * | ||
594 | * The work item contains a pointer to the listen_cm_id and the event. The | ||
595 | * listen_cm_id contains the client cm_handler, context and | ||
596 | * device. These are copied when the device is cloned. The event | ||
597 | * contains the new four tuple. | ||
598 | * | ||
599 | * An error on the child should not affect the parent, so this | ||
600 | * function does not return a value. | ||
601 | */ | ||
602 | static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | ||
603 | struct iw_cm_event *iw_event) | ||
604 | { | ||
605 | unsigned long flags; | ||
606 | struct iw_cm_id *cm_id; | ||
607 | struct iwcm_id_private *cm_id_priv; | ||
608 | int ret; | ||
609 | |||
610 | /* | ||
611 | * The provider should never generate a connection request | ||
612 | * event with a bad status. | ||
613 | */ | ||
614 | BUG_ON(iw_event->status); | ||
615 | |||
616 | /* | ||
617 | * We could be destroying the listening id. If so, ignore this | ||
618 | * upcall. | ||
619 | */ | ||
620 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
621 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
622 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
623 | return; | ||
624 | } | ||
625 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
626 | |||
627 | cm_id = iw_create_cm_id(listen_id_priv->id.device, | ||
628 | listen_id_priv->id.cm_handler, | ||
629 | listen_id_priv->id.context); | ||
630 | /* If the cm_id could not be created, ignore the request */ | ||
631 | if (IS_ERR(cm_id)) | ||
632 | return; | ||
633 | |||
634 | cm_id->provider_data = iw_event->provider_data; | ||
635 | cm_id->local_addr = iw_event->local_addr; | ||
636 | cm_id->remote_addr = iw_event->remote_addr; | ||
637 | |||
638 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
639 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | ||
640 | |||
641 | ret = alloc_work_entries(cm_id_priv, 3); | ||
642 | if (ret) { | ||
643 | iw_cm_reject(cm_id, NULL, 0); | ||
644 | iw_destroy_cm_id(cm_id); | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | /* Call the client CM handler */ | ||
649 | ret = cm_id->cm_handler(cm_id, iw_event); | ||
650 | if (ret) { | ||
651 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
652 | destroy_cm_id(cm_id); | ||
653 | if (atomic_read(&cm_id_priv->refcount)==0) | ||
654 | kfree(cm_id); | ||
655 | } | ||
656 | |||
657 | if (iw_event->private_data_len) | ||
658 | kfree(iw_event->private_data); | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Passive Side: CM_ID <-- ESTABLISHED | ||
663 | * | ||
664 | * The provider generated an ESTABLISHED event which means that | ||
665 | * the MPA negotion has completed successfully and we are now in MPA | ||
666 | * FPDU mode. | ||
667 | * | ||
668 | * This event can only be received in the CONN_RECV state. If the | ||
669 | * remote peer closed, the ESTABLISHED event would be received followed | ||
670 | * by the CLOSE event. If the app closes, it will block until we wake | ||
671 | * it up after processing this event. | ||
672 | */ | ||
673 | static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv, | ||
674 | struct iw_cm_event *iw_event) | ||
675 | { | ||
676 | unsigned long flags; | ||
677 | int ret = 0; | ||
678 | |||
679 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
680 | |||
681 | /* | ||
682 | * We clear the CONNECT_WAIT bit here to allow the callback | ||
683 | * function to call iw_cm_disconnect. Calling iw_destroy_cm_id | ||
684 | * from a callback handler is not allowed. | ||
685 | */ | ||
686 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
687 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV); | ||
688 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | ||
689 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
690 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
691 | wake_up_all(&cm_id_priv->connect_wait); | ||
692 | |||
693 | return ret; | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * Active Side: CM_ID <-- ESTABLISHED | ||
698 | * | ||
699 | * The app has called connect and is waiting for the established event to | ||
700 | * post it's requests to the server. This event will wake up anyone | ||
701 | * blocked in iw_cm_disconnect or iw_destroy_id. | ||
702 | */ | ||
703 | static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, | ||
704 | struct iw_cm_event *iw_event) | ||
705 | { | ||
706 | unsigned long flags; | ||
707 | int ret = 0; | ||
708 | |||
709 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
710 | /* | ||
711 | * Clear the connect wait bit so a callback function calling | ||
712 | * iw_cm_disconnect will not wait and deadlock this thread | ||
713 | */ | ||
714 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
715 | BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); | ||
716 | if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { | ||
717 | cm_id_priv->id.local_addr = iw_event->local_addr; | ||
718 | cm_id_priv->id.remote_addr = iw_event->remote_addr; | ||
719 | cm_id_priv->state = IW_CM_STATE_ESTABLISHED; | ||
720 | } else { | ||
721 | /* REJECTED or RESET */ | ||
722 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
723 | cm_id_priv->qp = NULL; | ||
724 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
725 | } | ||
726 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
727 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
728 | |||
729 | if (iw_event->private_data_len) | ||
730 | kfree(iw_event->private_data); | ||
731 | |||
732 | /* Wake up waiters on connect complete */ | ||
733 | wake_up_all(&cm_id_priv->connect_wait); | ||
734 | |||
735 | return ret; | ||
736 | } | ||
737 | |||
738 | /* | ||
739 | * CM_ID <-- CLOSING | ||
740 | * | ||
741 | * If in the ESTABLISHED state, move to CLOSING. | ||
742 | */ | ||
743 | static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv, | ||
744 | struct iw_cm_event *iw_event) | ||
745 | { | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
749 | if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED) | ||
750 | cm_id_priv->state = IW_CM_STATE_CLOSING; | ||
751 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
752 | } | ||
753 | |||
754 | /* | ||
755 | * CM_ID <-- IDLE | ||
756 | * | ||
757 | * If in the ESTBLISHED or CLOSING states, the QP will have have been | ||
758 | * moved by the provider to the ERR state. Disassociate the CM_ID from | ||
759 | * the QP, move to IDLE, and remove the 'connected' reference. | ||
760 | * | ||
761 | * If in some other state, the cm_id was destroyed asynchronously. | ||
762 | * This is the last reference that will result in waking up | ||
763 | * the app thread blocked in iw_destroy_cm_id. | ||
764 | */ | ||
765 | static int cm_close_handler(struct iwcm_id_private *cm_id_priv, | ||
766 | struct iw_cm_event *iw_event) | ||
767 | { | ||
768 | unsigned long flags; | ||
769 | int ret = 0; | ||
770 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
771 | |||
772 | if (cm_id_priv->qp) { | ||
773 | cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp); | ||
774 | cm_id_priv->qp = NULL; | ||
775 | } | ||
776 | switch (cm_id_priv->state) { | ||
777 | case IW_CM_STATE_ESTABLISHED: | ||
778 | case IW_CM_STATE_CLOSING: | ||
779 | cm_id_priv->state = IW_CM_STATE_IDLE; | ||
780 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
781 | ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event); | ||
782 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
783 | break; | ||
784 | case IW_CM_STATE_DESTROYING: | ||
785 | break; | ||
786 | default: | ||
787 | BUG(); | ||
788 | } | ||
789 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
790 | |||
791 | return ret; | ||
792 | } | ||
793 | |||
794 | static int process_event(struct iwcm_id_private *cm_id_priv, | ||
795 | struct iw_cm_event *iw_event) | ||
796 | { | ||
797 | int ret = 0; | ||
798 | |||
799 | switch (iw_event->event) { | ||
800 | case IW_CM_EVENT_CONNECT_REQUEST: | ||
801 | cm_conn_req_handler(cm_id_priv, iw_event); | ||
802 | break; | ||
803 | case IW_CM_EVENT_CONNECT_REPLY: | ||
804 | ret = cm_conn_rep_handler(cm_id_priv, iw_event); | ||
805 | break; | ||
806 | case IW_CM_EVENT_ESTABLISHED: | ||
807 | ret = cm_conn_est_handler(cm_id_priv, iw_event); | ||
808 | break; | ||
809 | case IW_CM_EVENT_DISCONNECT: | ||
810 | cm_disconnect_handler(cm_id_priv, iw_event); | ||
811 | break; | ||
812 | case IW_CM_EVENT_CLOSE: | ||
813 | ret = cm_close_handler(cm_id_priv, iw_event); | ||
814 | break; | ||
815 | default: | ||
816 | BUG(); | ||
817 | } | ||
818 | |||
819 | return ret; | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * Process events on the work_list for the cm_id. If the callback | ||
824 | * function requests that the cm_id be deleted, a flag is set in the | ||
825 | * cm_id flags to indicate that when the last reference is | ||
826 | * removed, the cm_id is to be destroyed. This is necessary to | ||
827 | * distinguish between an object that will be destroyed by the app | ||
828 | * thread asleep on the destroy_comp list vs. an object destroyed | ||
829 | * here synchronously when the last reference is removed. | ||
830 | */ | ||
831 | static void cm_work_handler(void *arg) | ||
832 | { | ||
833 | struct iwcm_work *work = arg, lwork; | ||
834 | struct iwcm_id_private *cm_id_priv = work->cm_id; | ||
835 | unsigned long flags; | ||
836 | int empty; | ||
837 | int ret = 0; | ||
838 | |||
839 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
840 | empty = list_empty(&cm_id_priv->work_list); | ||
841 | while (!empty) { | ||
842 | work = list_entry(cm_id_priv->work_list.next, | ||
843 | struct iwcm_work, list); | ||
844 | list_del_init(&work->list); | ||
845 | empty = list_empty(&cm_id_priv->work_list); | ||
846 | lwork = *work; | ||
847 | put_work(work); | ||
848 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
849 | |||
850 | ret = process_event(cm_id_priv, &work->event); | ||
851 | if (ret) { | ||
852 | set_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); | ||
853 | destroy_cm_id(&cm_id_priv->id); | ||
854 | } | ||
855 | BUG_ON(atomic_read(&cm_id_priv->refcount)==0); | ||
856 | if (iwcm_deref_id(cm_id_priv)) | ||
857 | return; | ||
858 | |||
859 | if (atomic_read(&cm_id_priv->refcount)==0 && | ||
860 | test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) { | ||
861 | dealloc_work_entries(cm_id_priv); | ||
862 | kfree(cm_id_priv); | ||
863 | return; | ||
864 | } | ||
865 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
866 | } | ||
867 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
868 | } | ||
869 | |||
870 | /* | ||
871 | * This function is called on interrupt context. Schedule events on | ||
872 | * the iwcm_wq thread to allow callback functions to downcall into | ||
873 | * the CM and/or block. Events are queued to a per-CM_ID | ||
874 | * work_list. If this is the first event on the work_list, the work | ||
875 | * element is also queued on the iwcm_wq thread. | ||
876 | * | ||
877 | * Each event holds a reference on the cm_id. Until the last posted | ||
878 | * event has been delivered and processed, the cm_id cannot be | ||
879 | * deleted. | ||
880 | * | ||
881 | * Returns: | ||
882 | * 0 - the event was handled. | ||
883 | * -ENOMEM - the event was not handled due to lack of resources. | ||
884 | */ | ||
885 | static int cm_event_handler(struct iw_cm_id *cm_id, | ||
886 | struct iw_cm_event *iw_event) | ||
887 | { | ||
888 | struct iwcm_work *work; | ||
889 | struct iwcm_id_private *cm_id_priv; | ||
890 | unsigned long flags; | ||
891 | int ret = 0; | ||
892 | |||
893 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
894 | |||
895 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
896 | work = get_work(cm_id_priv); | ||
897 | if (!work) { | ||
898 | ret = -ENOMEM; | ||
899 | goto out; | ||
900 | } | ||
901 | |||
902 | INIT_WORK(&work->work, cm_work_handler, work); | ||
903 | work->cm_id = cm_id_priv; | ||
904 | work->event = *iw_event; | ||
905 | |||
906 | if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST || | ||
907 | work->event.event == IW_CM_EVENT_CONNECT_REPLY) && | ||
908 | work->event.private_data_len) { | ||
909 | ret = copy_private_data(cm_id_priv, &work->event); | ||
910 | if (ret) { | ||
911 | put_work(work); | ||
912 | goto out; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | atomic_inc(&cm_id_priv->refcount); | ||
917 | if (list_empty(&cm_id_priv->work_list)) { | ||
918 | list_add_tail(&work->list, &cm_id_priv->work_list); | ||
919 | queue_work(iwcm_wq, &work->work); | ||
920 | } else | ||
921 | list_add_tail(&work->list, &cm_id_priv->work_list); | ||
922 | out: | ||
923 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
924 | return ret; | ||
925 | } | ||
926 | |||
927 | static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv, | ||
928 | struct ib_qp_attr *qp_attr, | ||
929 | int *qp_attr_mask) | ||
930 | { | ||
931 | unsigned long flags; | ||
932 | int ret; | ||
933 | |||
934 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
935 | switch (cm_id_priv->state) { | ||
936 | case IW_CM_STATE_IDLE: | ||
937 | case IW_CM_STATE_CONN_SENT: | ||
938 | case IW_CM_STATE_CONN_RECV: | ||
939 | case IW_CM_STATE_ESTABLISHED: | ||
940 | *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; | ||
941 | qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | | ||
942 | IB_ACCESS_REMOTE_WRITE| | ||
943 | IB_ACCESS_REMOTE_READ; | ||
944 | ret = 0; | ||
945 | break; | ||
946 | default: | ||
947 | ret = -EINVAL; | ||
948 | break; | ||
949 | } | ||
950 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
951 | return ret; | ||
952 | } | ||
953 | |||
954 | static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv, | ||
955 | struct ib_qp_attr *qp_attr, | ||
956 | int *qp_attr_mask) | ||
957 | { | ||
958 | unsigned long flags; | ||
959 | int ret; | ||
960 | |||
961 | spin_lock_irqsave(&cm_id_priv->lock, flags); | ||
962 | switch (cm_id_priv->state) { | ||
963 | case IW_CM_STATE_IDLE: | ||
964 | case IW_CM_STATE_CONN_SENT: | ||
965 | case IW_CM_STATE_CONN_RECV: | ||
966 | case IW_CM_STATE_ESTABLISHED: | ||
967 | *qp_attr_mask = 0; | ||
968 | ret = 0; | ||
969 | break; | ||
970 | default: | ||
971 | ret = -EINVAL; | ||
972 | break; | ||
973 | } | ||
974 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | ||
975 | return ret; | ||
976 | } | ||
977 | |||
978 | int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, | ||
979 | struct ib_qp_attr *qp_attr, | ||
980 | int *qp_attr_mask) | ||
981 | { | ||
982 | struct iwcm_id_private *cm_id_priv; | ||
983 | int ret; | ||
984 | |||
985 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | ||
986 | switch (qp_attr->qp_state) { | ||
987 | case IB_QPS_INIT: | ||
988 | case IB_QPS_RTR: | ||
989 | ret = iwcm_init_qp_init_attr(cm_id_priv, | ||
990 | qp_attr, qp_attr_mask); | ||
991 | break; | ||
992 | case IB_QPS_RTS: | ||
993 | ret = iwcm_init_qp_rts_attr(cm_id_priv, | ||
994 | qp_attr, qp_attr_mask); | ||
995 | break; | ||
996 | default: | ||
997 | ret = -EINVAL; | ||
998 | break; | ||
999 | } | ||
1000 | return ret; | ||
1001 | } | ||
1002 | EXPORT_SYMBOL(iw_cm_init_qp_attr); | ||
1003 | |||
1004 | static int __init iw_cm_init(void) | ||
1005 | { | ||
1006 | iwcm_wq = create_singlethread_workqueue("iw_cm_wq"); | ||
1007 | if (!iwcm_wq) | ||
1008 | return -ENOMEM; | ||
1009 | |||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static void __exit iw_cm_cleanup(void) | ||
1014 | { | ||
1015 | destroy_workqueue(iwcm_wq); | ||
1016 | } | ||
1017 | |||
1018 | module_init(iw_cm_init); | ||
1019 | module_exit(iw_cm_cleanup); | ||
diff --git a/drivers/infiniband/core/iwcm.h b/drivers/infiniband/core/iwcm.h new file mode 100644 index 000000000000..3f6cc82564c8 --- /dev/null +++ b/drivers/infiniband/core/iwcm.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005 Network Appliance, Inc. All rights reserved. | ||
3 | * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | #ifndef IWCM_H | ||
34 | #define IWCM_H | ||
35 | |||
36 | enum iw_cm_state { | ||
37 | IW_CM_STATE_IDLE, /* unbound, inactive */ | ||
38 | IW_CM_STATE_LISTEN, /* listen waiting for connect */ | ||
39 | IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */ | ||
40 | IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */ | ||
41 | IW_CM_STATE_ESTABLISHED, /* established */ | ||
42 | IW_CM_STATE_CLOSING, /* disconnect */ | ||
43 | IW_CM_STATE_DESTROYING /* object being deleted */ | ||
44 | }; | ||
45 | |||
46 | struct iwcm_id_private { | ||
47 | struct iw_cm_id id; | ||
48 | enum iw_cm_state state; | ||
49 | unsigned long flags; | ||
50 | struct ib_qp *qp; | ||
51 | struct completion destroy_comp; | ||
52 | wait_queue_head_t connect_wait; | ||
53 | struct list_head work_list; | ||
54 | spinlock_t lock; | ||
55 | atomic_t refcount; | ||
56 | struct list_head work_free_list; | ||
57 | }; | ||
58 | |||
59 | #define IWCM_F_CALLBACK_DESTROY 1 | ||
60 | #define IWCM_F_CONNECT_WAIT 2 | ||
61 | |||
62 | #endif /* IWCM_H */ | ||