aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/Makefile5
-rw-r--r--drivers/infiniband/core/cm.c3324
-rw-r--r--drivers/infiniband/core/cm_msgs.h819
3 files changed, 4147 insertions, 1 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 96b8eba95849..216cb281abdd 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,6 +1,7 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include 1EXTRA_CFLAGS += -Idrivers/infiniband/include
2 2
3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o ib_umad.o 3obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
4 ib_cm.o ib_umad.o
4obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o 5obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
5 6
6ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 7ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
@@ -10,6 +11,8 @@ ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
10 11
11ib_sa-y := sa_query.o 12ib_sa-y := sa_query.o
12 13
14ib_cm-y := cm.o
15
13ib_umad-y := user_mad.o 16ib_umad-y := user_mad.o
14 17
15ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o 18ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
new file mode 100644
index 000000000000..403ed125d8f4
--- /dev/null
+++ b/drivers/infiniband/core/cm.c
@@ -0,0 +1,3324 @@
1/*
2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36 */
37#include <linux/dma-mapping.h>
38#include <linux/err.h>
39#include <linux/idr.h>
40#include <linux/interrupt.h>
41#include <linux/pci.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/workqueue.h>
45
46#include <ib_cache.h>
47#include <ib_cm.h>
48#include "cm_msgs.h"
49
50MODULE_AUTHOR("Sean Hefty");
51MODULE_DESCRIPTION("InfiniBand CM");
52MODULE_LICENSE("Dual BSD/GPL");
53
54static void cm_add_one(struct ib_device *device);
55static void cm_remove_one(struct ib_device *device);
56
57static struct ib_client cm_client = {
58 .name = "cm",
59 .add = cm_add_one,
60 .remove = cm_remove_one
61};
62
63static struct ib_cm {
64 spinlock_t lock;
65 struct list_head device_list;
66 rwlock_t device_lock;
67 struct rb_root listen_service_table;
68 u64 listen_service_id;
69 /* struct rb_root peer_service_table; todo: fix peer to peer */
70 struct rb_root remote_qp_table;
71 struct rb_root remote_id_table;
72 struct rb_root remote_sidr_table;
73 struct idr local_id_table;
74 struct workqueue_struct *wq;
75} cm;
76
77struct cm_port {
78 struct cm_device *cm_dev;
79 struct ib_mad_agent *mad_agent;
80 u8 port_num;
81};
82
83struct cm_device {
84 struct list_head list;
85 struct ib_device *device;
86 u64 ca_guid;
87 struct cm_port port[0];
88};
89
90struct cm_av {
91 struct cm_port *port;
92 union ib_gid dgid;
93 struct ib_ah_attr ah_attr;
94 u16 pkey_index;
95 u8 packet_life_time;
96};
97
98struct cm_work {
99 struct work_struct work;
100 struct list_head list;
101 struct cm_port *port;
102 struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
103 u32 local_id; /* Established / timewait */
104 u32 remote_id;
105 struct ib_cm_event cm_event;
106 struct ib_sa_path_rec path[0];
107};
108
109struct cm_timewait_info {
110 struct cm_work work; /* Must be first. */
111 struct rb_node remote_qp_node;
112 struct rb_node remote_id_node;
113 u64 remote_ca_guid;
114 u32 remote_qpn;
115 u8 inserted_remote_qp;
116 u8 inserted_remote_id;
117};
118
119struct cm_id_private {
120 struct ib_cm_id id;
121
122 struct rb_node service_node;
123 struct rb_node sidr_id_node;
124 spinlock_t lock;
125 wait_queue_head_t wait;
126 atomic_t refcount;
127
128 struct ib_mad_send_buf *msg;
129 struct cm_timewait_info *timewait_info;
130 /* todo: use alternate port on send failure */
131 struct cm_av av;
132 struct cm_av alt_av;
133
134 void *private_data;
135 u64 tid;
136 u32 local_qpn;
137 u32 remote_qpn;
138 u32 sq_psn;
139 u32 rq_psn;
140 int timeout_ms;
141 enum ib_mtu path_mtu;
142 u8 private_data_len;
143 u8 max_cm_retries;
144 u8 peer_to_peer;
145 u8 responder_resources;
146 u8 initiator_depth;
147 u8 local_ack_timeout;
148 u8 retry_count;
149 u8 rnr_retry_count;
150 u8 service_timeout;
151
152 struct list_head work_list;
153 atomic_t work_count;
154};
155
156static void cm_work_handler(void *data);
157
158static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
159{
160 if (atomic_dec_and_test(&cm_id_priv->refcount))
161 wake_up(&cm_id_priv->wait);
162}
163
164static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
165 struct ib_mad_send_buf **msg)
166{
167 struct ib_mad_agent *mad_agent;
168 struct ib_mad_send_buf *m;
169 struct ib_ah *ah;
170
171 mad_agent = cm_id_priv->av.port->mad_agent;
172 ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
173 if (IS_ERR(ah))
174 return PTR_ERR(ah);
175
176 m = ib_create_send_mad(mad_agent, 1, cm_id_priv->av.pkey_index,
177 ah, 0, sizeof(struct ib_mad_hdr),
178 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
179 GFP_ATOMIC);
180 if (IS_ERR(m)) {
181 ib_destroy_ah(ah);
182 return PTR_ERR(m);
183 }
184
185 /* Timeout set by caller if response is expected. */
186 m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
187
188 atomic_inc(&cm_id_priv->refcount);
189 m->context[0] = cm_id_priv;
190 *msg = m;
191 return 0;
192}
193
194static int cm_alloc_response_msg(struct cm_port *port,
195 struct ib_mad_recv_wc *mad_recv_wc,
196 struct ib_mad_send_buf **msg)
197{
198 struct ib_mad_send_buf *m;
199 struct ib_ah *ah;
200
201 ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
202 mad_recv_wc->recv_buf.grh, port->port_num);
203 if (IS_ERR(ah))
204 return PTR_ERR(ah);
205
206 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
207 ah, 0, sizeof(struct ib_mad_hdr),
208 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
209 GFP_ATOMIC);
210 if (IS_ERR(m)) {
211 ib_destroy_ah(ah);
212 return PTR_ERR(m);
213 }
214 *msg = m;
215 return 0;
216}
217
218static void cm_free_msg(struct ib_mad_send_buf *msg)
219{
220 ib_destroy_ah(msg->send_wr.wr.ud.ah);
221 if (msg->context[0])
222 cm_deref_id(msg->context[0]);
223 ib_free_send_mad(msg);
224}
225
226static void * cm_copy_private_data(const void *private_data,
227 u8 private_data_len)
228{
229 void *data;
230
231 if (!private_data || !private_data_len)
232 return NULL;
233
234 data = kmalloc(private_data_len, GFP_KERNEL);
235 if (!data)
236 return ERR_PTR(-ENOMEM);
237
238 memcpy(data, private_data, private_data_len);
239 return data;
240}
241
242static void cm_set_private_data(struct cm_id_private *cm_id_priv,
243 void *private_data, u8 private_data_len)
244{
245 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
246 kfree(cm_id_priv->private_data);
247
248 cm_id_priv->private_data = private_data;
249 cm_id_priv->private_data_len = private_data_len;
250}
251
252static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
253 u16 dlid, u8 sl, u16 src_path_bits)
254{
255 memset(ah_attr, 0, sizeof ah_attr);
256 ah_attr->dlid = be16_to_cpu(dlid);
257 ah_attr->sl = sl;
258 ah_attr->src_path_bits = src_path_bits;
259 ah_attr->port_num = port_num;
260}
261
262static void cm_init_av_for_response(struct cm_port *port,
263 struct ib_wc *wc, struct cm_av *av)
264{
265 av->port = port;
266 av->pkey_index = wc->pkey_index;
267 cm_set_ah_attr(&av->ah_attr, port->port_num, cpu_to_be16(wc->slid),
268 wc->sl, wc->dlid_path_bits);
269}
270
271static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
272{
273 struct cm_device *cm_dev;
274 struct cm_port *port = NULL;
275 unsigned long flags;
276 int ret;
277 u8 p;
278
279 read_lock_irqsave(&cm.device_lock, flags);
280 list_for_each_entry(cm_dev, &cm.device_list, list) {
281 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
282 &p, NULL)) {
283 port = &cm_dev->port[p-1];
284 break;
285 }
286 }
287 read_unlock_irqrestore(&cm.device_lock, flags);
288
289 if (!port)
290 return -EINVAL;
291
292 ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
293 be16_to_cpu(path->pkey), &av->pkey_index);
294 if (ret)
295 return ret;
296
297 av->port = port;
298 cm_set_ah_attr(&av->ah_attr, av->port->port_num, path->dlid,
299 path->sl, path->slid & 0x7F);
300 av->packet_life_time = path->packet_life_time;
301 return 0;
302}
303
304static int cm_alloc_id(struct cm_id_private *cm_id_priv)
305{
306 unsigned long flags;
307 int ret;
308
309 do {
310 spin_lock_irqsave(&cm.lock, flags);
311 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
312 (int *) &cm_id_priv->id.local_id);
313 spin_unlock_irqrestore(&cm.lock, flags);
314 } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
315 return ret;
316}
317
318static void cm_free_id(u32 local_id)
319{
320 unsigned long flags;
321
322 spin_lock_irqsave(&cm.lock, flags);
323 idr_remove(&cm.local_id_table, (int) local_id);
324 spin_unlock_irqrestore(&cm.lock, flags);
325}
326
327static struct cm_id_private * cm_get_id(u32 local_id, u32 remote_id)
328{
329 struct cm_id_private *cm_id_priv;
330
331 cm_id_priv = idr_find(&cm.local_id_table, (int) local_id);
332 if (cm_id_priv) {
333 if (cm_id_priv->id.remote_id == remote_id)
334 atomic_inc(&cm_id_priv->refcount);
335 else
336 cm_id_priv = NULL;
337 }
338
339 return cm_id_priv;
340}
341
342static struct cm_id_private * cm_acquire_id(u32 local_id, u32 remote_id)
343{
344 struct cm_id_private *cm_id_priv;
345 unsigned long flags;
346
347 spin_lock_irqsave(&cm.lock, flags);
348 cm_id_priv = cm_get_id(local_id, remote_id);
349 spin_unlock_irqrestore(&cm.lock, flags);
350
351 return cm_id_priv;
352}
353
354static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
355{
356 struct rb_node **link = &cm.listen_service_table.rb_node;
357 struct rb_node *parent = NULL;
358 struct cm_id_private *cur_cm_id_priv;
359 u64 service_id = cm_id_priv->id.service_id;
360 u64 service_mask = cm_id_priv->id.service_mask;
361
362 while (*link) {
363 parent = *link;
364 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
365 service_node);
366 if ((cur_cm_id_priv->id.service_mask & service_id) ==
367 (service_mask & cur_cm_id_priv->id.service_id))
368 return cm_id_priv;
369 if (service_id < cur_cm_id_priv->id.service_id)
370 link = &(*link)->rb_left;
371 else
372 link = &(*link)->rb_right;
373 }
374 rb_link_node(&cm_id_priv->service_node, parent, link);
375 rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
376 return NULL;
377}
378
379static struct cm_id_private * cm_find_listen(u64 service_id)
380{
381 struct rb_node *node = cm.listen_service_table.rb_node;
382 struct cm_id_private *cm_id_priv;
383
384 while (node) {
385 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
386 if ((cm_id_priv->id.service_mask & service_id) ==
387 (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
388 return cm_id_priv;
389 if (service_id < cm_id_priv->id.service_id)
390 node = node->rb_left;
391 else
392 node = node->rb_right;
393 }
394 return NULL;
395}
396
397static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
398 *timewait_info)
399{
400 struct rb_node **link = &cm.remote_id_table.rb_node;
401 struct rb_node *parent = NULL;
402 struct cm_timewait_info *cur_timewait_info;
403 u64 remote_ca_guid = timewait_info->remote_ca_guid;
404 u32 remote_id = timewait_info->work.remote_id;
405
406 while (*link) {
407 parent = *link;
408 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
409 remote_id_node);
410 if (remote_id < cur_timewait_info->work.remote_id)
411 link = &(*link)->rb_left;
412 else if (remote_id > cur_timewait_info->work.remote_id)
413 link = &(*link)->rb_right;
414 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
415 link = &(*link)->rb_left;
416 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
417 link = &(*link)->rb_right;
418 else
419 return cur_timewait_info;
420 }
421 timewait_info->inserted_remote_id = 1;
422 rb_link_node(&timewait_info->remote_id_node, parent, link);
423 rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
424 return NULL;
425}
426
427static struct cm_timewait_info * cm_find_remote_id(u64 remote_ca_guid,
428 u32 remote_id)
429{
430 struct rb_node *node = cm.remote_id_table.rb_node;
431 struct cm_timewait_info *timewait_info;
432
433 while (node) {
434 timewait_info = rb_entry(node, struct cm_timewait_info,
435 remote_id_node);
436 if (remote_id < timewait_info->work.remote_id)
437 node = node->rb_left;
438 else if (remote_id > timewait_info->work.remote_id)
439 node = node->rb_right;
440 else if (remote_ca_guid < timewait_info->remote_ca_guid)
441 node = node->rb_left;
442 else if (remote_ca_guid > timewait_info->remote_ca_guid)
443 node = node->rb_right;
444 else
445 return timewait_info;
446 }
447 return NULL;
448}
449
450static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
451 *timewait_info)
452{
453 struct rb_node **link = &cm.remote_qp_table.rb_node;
454 struct rb_node *parent = NULL;
455 struct cm_timewait_info *cur_timewait_info;
456 u64 remote_ca_guid = timewait_info->remote_ca_guid;
457 u32 remote_qpn = timewait_info->remote_qpn;
458
459 while (*link) {
460 parent = *link;
461 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
462 remote_qp_node);
463 if (remote_qpn < cur_timewait_info->remote_qpn)
464 link = &(*link)->rb_left;
465 else if (remote_qpn > cur_timewait_info->remote_qpn)
466 link = &(*link)->rb_right;
467 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
468 link = &(*link)->rb_left;
469 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
470 link = &(*link)->rb_right;
471 else
472 return cur_timewait_info;
473 }
474 timewait_info->inserted_remote_qp = 1;
475 rb_link_node(&timewait_info->remote_qp_node, parent, link);
476 rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
477 return NULL;
478}
479
480static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
481 *cm_id_priv)
482{
483 struct rb_node **link = &cm.remote_sidr_table.rb_node;
484 struct rb_node *parent = NULL;
485 struct cm_id_private *cur_cm_id_priv;
486 union ib_gid *port_gid = &cm_id_priv->av.dgid;
487 u32 remote_id = cm_id_priv->id.remote_id;
488
489 while (*link) {
490 parent = *link;
491 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
492 sidr_id_node);
493 if (remote_id < cur_cm_id_priv->id.remote_id)
494 link = &(*link)->rb_left;
495 else if (remote_id > cur_cm_id_priv->id.remote_id)
496 link = &(*link)->rb_right;
497 else {
498 int cmp;
499 cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
500 sizeof *port_gid);
501 if (cmp < 0)
502 link = &(*link)->rb_left;
503 else if (cmp > 0)
504 link = &(*link)->rb_right;
505 else
506 return cur_cm_id_priv;
507 }
508 }
509 rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
510 rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
511 return NULL;
512}
513
514static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
515 enum ib_cm_sidr_status status)
516{
517 struct ib_cm_sidr_rep_param param;
518
519 memset(&param, 0, sizeof param);
520 param.status = status;
521 ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
522}
523
524struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
525 void *context)
526{
527 struct cm_id_private *cm_id_priv;
528 int ret;
529
530 cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
531 if (!cm_id_priv)
532 return ERR_PTR(-ENOMEM);
533
534 memset(cm_id_priv, 0, sizeof *cm_id_priv);
535 cm_id_priv->id.state = IB_CM_IDLE;
536 cm_id_priv->id.cm_handler = cm_handler;
537 cm_id_priv->id.context = context;
538 ret = cm_alloc_id(cm_id_priv);
539 if (ret)
540 goto error;
541
542 spin_lock_init(&cm_id_priv->lock);
543 init_waitqueue_head(&cm_id_priv->wait);
544 INIT_LIST_HEAD(&cm_id_priv->work_list);
545 atomic_set(&cm_id_priv->work_count, -1);
546 atomic_set(&cm_id_priv->refcount, 1);
547 return &cm_id_priv->id;
548
549error:
550 kfree(cm_id_priv);
551 return ERR_PTR(-ENOMEM);
552}
553EXPORT_SYMBOL(ib_create_cm_id);
554
555static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
556{
557 struct cm_work *work;
558
559 if (list_empty(&cm_id_priv->work_list))
560 return NULL;
561
562 work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
563 list_del(&work->list);
564 return work;
565}
566
567static void cm_free_work(struct cm_work *work)
568{
569 if (work->mad_recv_wc)
570 ib_free_recv_mad(work->mad_recv_wc);
571 kfree(work);
572}
573
574static inline int cm_convert_to_ms(int iba_time)
575{
576 /* approximate conversion to ms from 4.096us x 2^iba_time */
577 return 1 << max(iba_time - 8, 0);
578}
579
580static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
581{
582 unsigned long flags;
583
584 if (!timewait_info->inserted_remote_id &&
585 !timewait_info->inserted_remote_qp)
586 return;
587
588 spin_lock_irqsave(&cm.lock, flags);
589 if (timewait_info->inserted_remote_id) {
590 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
591 timewait_info->inserted_remote_id = 0;
592 }
593
594 if (timewait_info->inserted_remote_qp) {
595 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
596 timewait_info->inserted_remote_qp = 0;
597 }
598 spin_unlock_irqrestore(&cm.lock, flags);
599}
600
601static struct cm_timewait_info * cm_create_timewait_info(u32 local_id)
602{
603 struct cm_timewait_info *timewait_info;
604
605 timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
606 if (!timewait_info)
607 return ERR_PTR(-ENOMEM);
608 memset(timewait_info, 0, sizeof *timewait_info);
609
610 timewait_info->work.local_id = local_id;
611 INIT_WORK(&timewait_info->work.work, cm_work_handler,
612 &timewait_info->work);
613 timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
614 return timewait_info;
615}
616
617static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
618{
619 int wait_time;
620
621 /*
622 * The cm_id could be destroyed by the user before we exit timewait.
623 * To protect against this, we search for the cm_id after exiting
624 * timewait before notifying the user that we've exited timewait.
625 */
626 cm_id_priv->id.state = IB_CM_TIMEWAIT;
627 wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
628 queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
629 msecs_to_jiffies(wait_time));
630 cm_id_priv->timewait_info = NULL;
631}
632
633static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
634{
635 cm_id_priv->id.state = IB_CM_IDLE;
636 if (cm_id_priv->timewait_info) {
637 cm_cleanup_timewait(cm_id_priv->timewait_info);
638 kfree(cm_id_priv->timewait_info);
639 cm_id_priv->timewait_info = NULL;
640 }
641}
642
643void ib_destroy_cm_id(struct ib_cm_id *cm_id)
644{
645 struct cm_id_private *cm_id_priv;
646 struct cm_work *work;
647 unsigned long flags;
648
649 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
650retest:
651 spin_lock_irqsave(&cm_id_priv->lock, flags);
652 switch (cm_id->state) {
653 case IB_CM_LISTEN:
654 cm_id->state = IB_CM_IDLE;
655 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
656 spin_lock_irqsave(&cm.lock, flags);
657 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
658 spin_unlock_irqrestore(&cm.lock, flags);
659 break;
660 case IB_CM_SIDR_REQ_SENT:
661 cm_id->state = IB_CM_IDLE;
662 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
663 (unsigned long) cm_id_priv->msg);
664 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
665 break;
666 case IB_CM_SIDR_REQ_RCVD:
667 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
668 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
669 break;
670 case IB_CM_REQ_SENT:
671 case IB_CM_MRA_REQ_RCVD:
672 case IB_CM_REP_SENT:
673 case IB_CM_MRA_REP_RCVD:
674 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
675 (unsigned long) cm_id_priv->msg);
676 /* Fall through */
677 case IB_CM_REQ_RCVD:
678 case IB_CM_MRA_REQ_SENT:
679 case IB_CM_REP_RCVD:
680 case IB_CM_MRA_REP_SENT:
681 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
682 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
683 &cm_id_priv->av.port->cm_dev->ca_guid,
684 sizeof cm_id_priv->av.port->cm_dev->ca_guid,
685 NULL, 0);
686 break;
687 case IB_CM_ESTABLISHED:
688 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
689 ib_send_cm_dreq(cm_id, NULL, 0);
690 goto retest;
691 case IB_CM_DREQ_SENT:
692 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
693 (unsigned long) cm_id_priv->msg);
694 cm_enter_timewait(cm_id_priv);
695 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
696 break;
697 case IB_CM_DREQ_RCVD:
698 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
699 ib_send_cm_drep(cm_id, NULL, 0);
700 break;
701 default:
702 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
703 break;
704 }
705
706 cm_free_id(cm_id->local_id);
707 atomic_dec(&cm_id_priv->refcount);
708 wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
709 while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
710 cm_free_work(work);
711 if (cm_id_priv->private_data && cm_id_priv->private_data_len)
712 kfree(cm_id_priv->private_data);
713 kfree(cm_id_priv);
714}
715EXPORT_SYMBOL(ib_destroy_cm_id);
716
717int ib_cm_listen(struct ib_cm_id *cm_id,
718 u64 service_id,
719 u64 service_mask)
720{
721 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
722 unsigned long flags;
723 int ret = 0;
724
725 service_mask = service_mask ? service_mask : ~0ULL;
726 service_id &= service_mask;
727 if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
728 (service_id != IB_CM_ASSIGN_SERVICE_ID))
729 return -EINVAL;
730
731 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
732 BUG_ON(cm_id->state != IB_CM_IDLE);
733
734 cm_id->state = IB_CM_LISTEN;
735
736 spin_lock_irqsave(&cm.lock, flags);
737 if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
738 cm_id->service_id = __cpu_to_be64(cm.listen_service_id++);
739 cm_id->service_mask = ~0ULL;
740 } else {
741 cm_id->service_id = service_id;
742 cm_id->service_mask = service_mask;
743 }
744 cur_cm_id_priv = cm_insert_listen(cm_id_priv);
745 spin_unlock_irqrestore(&cm.lock, flags);
746
747 if (cur_cm_id_priv) {
748 cm_id->state = IB_CM_IDLE;
749 ret = -EBUSY;
750 }
751 return ret;
752}
753EXPORT_SYMBOL(ib_cm_listen);
754
755static u64 cm_form_tid(struct cm_id_private *cm_id_priv,
756 enum cm_msg_sequence msg_seq)
757{
758 u64 hi_tid, low_tid;
759
760 hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
761 low_tid = (u64) (cm_id_priv->id.local_id | (msg_seq << 30));
762 return cpu_to_be64(hi_tid | low_tid);
763}
764
765static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
766 enum cm_msg_attr_id attr_id, u64 tid)
767{
768 hdr->base_version = IB_MGMT_BASE_VERSION;
769 hdr->mgmt_class = IB_MGMT_CLASS_CM;
770 hdr->class_version = IB_CM_CLASS_VERSION;
771 hdr->method = IB_MGMT_METHOD_SEND;
772 hdr->attr_id = attr_id;
773 hdr->tid = tid;
774}
775
776static void cm_format_req(struct cm_req_msg *req_msg,
777 struct cm_id_private *cm_id_priv,
778 struct ib_cm_req_param *param)
779{
780 cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
781 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
782
783 req_msg->local_comm_id = cm_id_priv->id.local_id;
784 req_msg->service_id = param->service_id;
785 req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
786 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
787 cm_req_set_resp_res(req_msg, param->responder_resources);
788 cm_req_set_init_depth(req_msg, param->initiator_depth);
789 cm_req_set_remote_resp_timeout(req_msg,
790 param->remote_cm_response_timeout);
791 cm_req_set_qp_type(req_msg, param->qp_type);
792 cm_req_set_flow_ctrl(req_msg, param->flow_control);
793 cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
794 cm_req_set_local_resp_timeout(req_msg,
795 param->local_cm_response_timeout);
796 cm_req_set_retry_count(req_msg, param->retry_count);
797 req_msg->pkey = param->primary_path->pkey;
798 cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
799 cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
800 cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
801 cm_req_set_srq(req_msg, param->srq);
802
803 req_msg->primary_local_lid = param->primary_path->slid;
804 req_msg->primary_remote_lid = param->primary_path->dlid;
805 req_msg->primary_local_gid = param->primary_path->sgid;
806 req_msg->primary_remote_gid = param->primary_path->dgid;
807 cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
808 cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
809 req_msg->primary_traffic_class = param->primary_path->traffic_class;
810 req_msg->primary_hop_limit = param->primary_path->hop_limit;
811 cm_req_set_primary_sl(req_msg, param->primary_path->sl);
812 cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
813 cm_req_set_primary_local_ack_timeout(req_msg,
814 min(31, param->primary_path->packet_life_time + 1));
815
816 if (param->alternate_path) {
817 req_msg->alt_local_lid = param->alternate_path->slid;
818 req_msg->alt_remote_lid = param->alternate_path->dlid;
819 req_msg->alt_local_gid = param->alternate_path->sgid;
820 req_msg->alt_remote_gid = param->alternate_path->dgid;
821 cm_req_set_alt_flow_label(req_msg,
822 param->alternate_path->flow_label);
823 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
824 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
825 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
826 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
827 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
828 cm_req_set_alt_local_ack_timeout(req_msg,
829 min(31, param->alternate_path->packet_life_time + 1));
830 }
831
832 if (param->private_data && param->private_data_len)
833 memcpy(req_msg->private_data, param->private_data,
834 param->private_data_len);
835}
836
837static inline int cm_validate_req_param(struct ib_cm_req_param *param)
838{
839 /* peer-to-peer not supported */
840 if (param->peer_to_peer)
841 return -EINVAL;
842
843 if (!param->primary_path)
844 return -EINVAL;
845
846 if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
847 return -EINVAL;
848
849 if (param->private_data &&
850 param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
851 return -EINVAL;
852
853 if (param->alternate_path &&
854 (param->alternate_path->pkey != param->primary_path->pkey ||
855 param->alternate_path->mtu != param->primary_path->mtu))
856 return -EINVAL;
857
858 return 0;
859}
860
861int ib_send_cm_req(struct ib_cm_id *cm_id,
862 struct ib_cm_req_param *param)
863{
864 struct cm_id_private *cm_id_priv;
865 struct ib_send_wr *bad_send_wr;
866 struct cm_req_msg *req_msg;
867 unsigned long flags;
868 int ret;
869
870 ret = cm_validate_req_param(param);
871 if (ret)
872 return ret;
873
874 /* Verify that we're not in timewait. */
875 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
876 spin_lock_irqsave(&cm_id_priv->lock, flags);
877 if (cm_id->state != IB_CM_IDLE) {
878 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
879 ret = -EINVAL;
880 goto out;
881 }
882 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
883
884 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
885 id.local_id);
886 if (IS_ERR(cm_id_priv->timewait_info))
887 goto out;
888
889 ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
890 if (ret)
891 goto error1;
892 if (param->alternate_path) {
893 ret = cm_init_av_by_path(param->alternate_path,
894 &cm_id_priv->alt_av);
895 if (ret)
896 goto error1;
897 }
898 cm_id->service_id = param->service_id;
899 cm_id->service_mask = ~0ULL;
900 cm_id_priv->timeout_ms = cm_convert_to_ms(
901 param->primary_path->packet_life_time) * 2 +
902 cm_convert_to_ms(
903 param->remote_cm_response_timeout);
904 cm_id_priv->max_cm_retries = param->max_cm_retries;
905 cm_id_priv->initiator_depth = param->initiator_depth;
906 cm_id_priv->responder_resources = param->responder_resources;
907 cm_id_priv->retry_count = param->retry_count;
908 cm_id_priv->path_mtu = param->primary_path->mtu;
909
910 ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
911 if (ret)
912 goto error1;
913
914 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
915 cm_format_req(req_msg, cm_id_priv, param);
916 cm_id_priv->tid = req_msg->hdr.tid;
917 cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
918 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
919
920 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
921 cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
922 cm_id_priv->local_ack_timeout =
923 cm_req_get_primary_local_ack_timeout(req_msg);
924
925 spin_lock_irqsave(&cm_id_priv->lock, flags);
926 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
927 &cm_id_priv->msg->send_wr, &bad_send_wr);
928 if (ret) {
929 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
930 goto error2;
931 }
932 BUG_ON(cm_id->state != IB_CM_IDLE);
933 cm_id->state = IB_CM_REQ_SENT;
934 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
935 return 0;
936
937error2: cm_free_msg(cm_id_priv->msg);
938error1: kfree(cm_id_priv->timewait_info);
939out: return ret;
940}
941EXPORT_SYMBOL(ib_send_cm_req);
942
943static int cm_issue_rej(struct cm_port *port,
944 struct ib_mad_recv_wc *mad_recv_wc,
945 enum ib_cm_rej_reason reason,
946 enum cm_msg_response msg_rejected,
947 void *ari, u8 ari_length)
948{
949 struct ib_mad_send_buf *msg = NULL;
950 struct ib_send_wr *bad_send_wr;
951 struct cm_rej_msg *rej_msg, *rcv_msg;
952 int ret;
953
954 ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
955 if (ret)
956 return ret;
957
958 /* We just need common CM header information. Cast to any message. */
959 rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
960 rej_msg = (struct cm_rej_msg *) msg->mad;
961
962 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
963 rej_msg->remote_comm_id = rcv_msg->local_comm_id;
964 rej_msg->local_comm_id = rcv_msg->remote_comm_id;
965 cm_rej_set_msg_rejected(rej_msg, msg_rejected);
966 rej_msg->reason = reason;
967
968 if (ari && ari_length) {
969 cm_rej_set_reject_info_len(rej_msg, ari_length);
970 memcpy(rej_msg->ari, ari, ari_length);
971 }
972
973 ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
974 if (ret)
975 cm_free_msg(msg);
976
977 return ret;
978}
979
980static inline int cm_is_active_peer(u64 local_ca_guid, u64 remote_ca_guid,
981 u32 local_qpn, u32 remote_qpn)
982{
983 return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
984 ((local_ca_guid == remote_ca_guid) &&
985 (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
986}
987
988static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
989 struct ib_sa_path_rec *primary_path,
990 struct ib_sa_path_rec *alt_path)
991{
992 memset(primary_path, 0, sizeof *primary_path);
993 primary_path->dgid = req_msg->primary_local_gid;
994 primary_path->sgid = req_msg->primary_remote_gid;
995 primary_path->dlid = req_msg->primary_local_lid;
996 primary_path->slid = req_msg->primary_remote_lid;
997 primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
998 primary_path->hop_limit = req_msg->primary_hop_limit;
999 primary_path->traffic_class = req_msg->primary_traffic_class;
1000 primary_path->reversible = 1;
1001 primary_path->pkey = req_msg->pkey;
1002 primary_path->sl = cm_req_get_primary_sl(req_msg);
1003 primary_path->mtu_selector = IB_SA_EQ;
1004 primary_path->mtu = cm_req_get_path_mtu(req_msg);
1005 primary_path->rate_selector = IB_SA_EQ;
1006 primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1007 primary_path->packet_life_time_selector = IB_SA_EQ;
1008 primary_path->packet_life_time =
1009 cm_req_get_primary_local_ack_timeout(req_msg);
1010 primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1011
1012 if (req_msg->alt_local_lid) {
1013 memset(alt_path, 0, sizeof *alt_path);
1014 alt_path->dgid = req_msg->alt_local_gid;
1015 alt_path->sgid = req_msg->alt_remote_gid;
1016 alt_path->dlid = req_msg->alt_local_lid;
1017 alt_path->slid = req_msg->alt_remote_lid;
1018 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1019 alt_path->hop_limit = req_msg->alt_hop_limit;
1020 alt_path->traffic_class = req_msg->alt_traffic_class;
1021 alt_path->reversible = 1;
1022 alt_path->pkey = req_msg->pkey;
1023 alt_path->sl = cm_req_get_alt_sl(req_msg);
1024 alt_path->mtu_selector = IB_SA_EQ;
1025 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1026 alt_path->rate_selector = IB_SA_EQ;
1027 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1028 alt_path->packet_life_time_selector = IB_SA_EQ;
1029 alt_path->packet_life_time =
1030 cm_req_get_alt_local_ack_timeout(req_msg);
1031 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1032 }
1033}
1034
1035static void cm_format_req_event(struct cm_work *work,
1036 struct cm_id_private *cm_id_priv,
1037 struct ib_cm_id *listen_id)
1038{
1039 struct cm_req_msg *req_msg;
1040 struct ib_cm_req_event_param *param;
1041
1042 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1043 param = &work->cm_event.param.req_rcvd;
1044 param->listen_id = listen_id;
1045 param->device = cm_id_priv->av.port->mad_agent->device;
1046 param->port = cm_id_priv->av.port->port_num;
1047 param->primary_path = &work->path[0];
1048 if (req_msg->alt_local_lid)
1049 param->alternate_path = &work->path[1];
1050 else
1051 param->alternate_path = NULL;
1052 param->remote_ca_guid = req_msg->local_ca_guid;
1053 param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1054 param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1055 param->qp_type = cm_req_get_qp_type(req_msg);
1056 param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1057 param->responder_resources = cm_req_get_init_depth(req_msg);
1058 param->initiator_depth = cm_req_get_resp_res(req_msg);
1059 param->local_cm_response_timeout =
1060 cm_req_get_remote_resp_timeout(req_msg);
1061 param->flow_control = cm_req_get_flow_ctrl(req_msg);
1062 param->remote_cm_response_timeout =
1063 cm_req_get_local_resp_timeout(req_msg);
1064 param->retry_count = cm_req_get_retry_count(req_msg);
1065 param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1066 param->srq = cm_req_get_srq(req_msg);
1067 work->cm_event.private_data = &req_msg->private_data;
1068}
1069
1070static void cm_process_work(struct cm_id_private *cm_id_priv,
1071 struct cm_work *work)
1072{
1073 unsigned long flags;
1074 int ret;
1075
1076 /* We will typically only have the current event to report. */
1077 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1078 cm_free_work(work);
1079
1080 while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1081 spin_lock_irqsave(&cm_id_priv->lock, flags);
1082 work = cm_dequeue_work(cm_id_priv);
1083 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1084 BUG_ON(!work);
1085 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1086 &work->cm_event);
1087 cm_free_work(work);
1088 }
1089 cm_deref_id(cm_id_priv);
1090 if (ret)
1091 ib_destroy_cm_id(&cm_id_priv->id);
1092}
1093
1094static void cm_format_mra(struct cm_mra_msg *mra_msg,
1095 struct cm_id_private *cm_id_priv,
1096 enum cm_msg_response msg_mraed, u8 service_timeout,
1097 const void *private_data, u8 private_data_len)
1098{
1099 cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1100 cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1101 mra_msg->local_comm_id = cm_id_priv->id.local_id;
1102 mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1103 cm_mra_set_service_timeout(mra_msg, service_timeout);
1104
1105 if (private_data && private_data_len)
1106 memcpy(mra_msg->private_data, private_data, private_data_len);
1107}
1108
1109static void cm_format_rej(struct cm_rej_msg *rej_msg,
1110 struct cm_id_private *cm_id_priv,
1111 enum ib_cm_rej_reason reason,
1112 void *ari,
1113 u8 ari_length,
1114 const void *private_data,
1115 u8 private_data_len)
1116{
1117 cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1118 rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1119
1120 switch(cm_id_priv->id.state) {
1121 case IB_CM_REQ_RCVD:
1122 rej_msg->local_comm_id = 0;
1123 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1124 break;
1125 case IB_CM_MRA_REQ_SENT:
1126 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1127 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1128 break;
1129 case IB_CM_REP_RCVD:
1130 case IB_CM_MRA_REP_SENT:
1131 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1132 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1133 break;
1134 default:
1135 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1136 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1137 break;
1138 }
1139
1140 rej_msg->reason = reason;
1141 if (ari && ari_length) {
1142 cm_rej_set_reject_info_len(rej_msg, ari_length);
1143 memcpy(rej_msg->ari, ari, ari_length);
1144 }
1145
1146 if (private_data && private_data_len)
1147 memcpy(rej_msg->private_data, private_data, private_data_len);
1148}
1149
1150static void cm_dup_req_handler(struct cm_work *work,
1151 struct cm_id_private *cm_id_priv)
1152{
1153 struct ib_mad_send_buf *msg = NULL;
1154 struct ib_send_wr *bad_send_wr;
1155 unsigned long flags;
1156 int ret;
1157
1158 /* Quick state check to discard duplicate REQs. */
1159 if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1160 return;
1161
1162 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1163 if (ret)
1164 return;
1165
1166 spin_lock_irqsave(&cm_id_priv->lock, flags);
1167 switch (cm_id_priv->id.state) {
1168 case IB_CM_MRA_REQ_SENT:
1169 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1170 CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1171 cm_id_priv->private_data,
1172 cm_id_priv->private_data_len);
1173 break;
1174 case IB_CM_TIMEWAIT:
1175 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1176 IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1177 break;
1178 default:
1179 goto unlock;
1180 }
1181 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1182
1183 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1184 &bad_send_wr);
1185 if (ret)
1186 goto free;
1187 return;
1188
1189unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1190free: cm_free_msg(msg);
1191}
1192
1193static struct cm_id_private * cm_match_req(struct cm_work *work,
1194 struct cm_id_private *cm_id_priv)
1195{
1196 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1197 struct cm_timewait_info *timewait_info;
1198 struct cm_req_msg *req_msg;
1199 unsigned long flags;
1200
1201 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1202
1203 /* Check for duplicate REQ and stale connections. */
1204 spin_lock_irqsave(&cm.lock, flags);
1205 timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1206 if (!timewait_info)
1207 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1208
1209 if (timewait_info) {
1210 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1211 timewait_info->work.remote_id);
1212 spin_unlock_irqrestore(&cm.lock, flags);
1213 if (cur_cm_id_priv) {
1214 cm_dup_req_handler(work, cur_cm_id_priv);
1215 cm_deref_id(cur_cm_id_priv);
1216 } else
1217 cm_issue_rej(work->port, work->mad_recv_wc,
1218 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1219 NULL, 0);
1220 goto error;
1221 }
1222
1223 /* Find matching listen request. */
1224 listen_cm_id_priv = cm_find_listen(req_msg->service_id);
1225 if (!listen_cm_id_priv) {
1226 spin_unlock_irqrestore(&cm.lock, flags);
1227 cm_issue_rej(work->port, work->mad_recv_wc,
1228 IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1229 NULL, 0);
1230 goto error;
1231 }
1232 atomic_inc(&listen_cm_id_priv->refcount);
1233 atomic_inc(&cm_id_priv->refcount);
1234 cm_id_priv->id.state = IB_CM_REQ_RCVD;
1235 atomic_inc(&cm_id_priv->work_count);
1236 spin_unlock_irqrestore(&cm.lock, flags);
1237 return listen_cm_id_priv;
1238
1239error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1240 return NULL;
1241}
1242
1243static int cm_req_handler(struct cm_work *work)
1244{
1245 struct ib_cm_id *cm_id;
1246 struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1247 struct cm_req_msg *req_msg;
1248 int ret;
1249
1250 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1251
1252 cm_id = ib_create_cm_id(NULL, NULL);
1253 if (IS_ERR(cm_id))
1254 return PTR_ERR(cm_id);
1255
1256 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1257 cm_id_priv->id.remote_id = req_msg->local_comm_id;
1258 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1259 &cm_id_priv->av);
1260 cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1261 id.local_id);
1262 if (IS_ERR(cm_id_priv->timewait_info)) {
1263 ret = PTR_ERR(cm_id_priv->timewait_info);
1264 goto error1;
1265 }
1266 cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1267 cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1268 cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1269
1270 listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1271 if (!listen_cm_id_priv) {
1272 ret = -EINVAL;
1273 goto error2;
1274 }
1275
1276 cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1277 cm_id_priv->id.context = listen_cm_id_priv->id.context;
1278 cm_id_priv->id.service_id = req_msg->service_id;
1279 cm_id_priv->id.service_mask = ~0ULL;
1280
1281 cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1282 ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1283 if (ret)
1284 goto error3;
1285 if (req_msg->alt_local_lid) {
1286 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1287 if (ret)
1288 goto error3;
1289 }
1290 cm_id_priv->tid = req_msg->hdr.tid;
1291 cm_id_priv->timeout_ms = cm_convert_to_ms(
1292 cm_req_get_local_resp_timeout(req_msg));
1293 cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1294 cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1295 cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1296 cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1297 cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1298 cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1299 cm_id_priv->local_ack_timeout =
1300 cm_req_get_primary_local_ack_timeout(req_msg);
1301 cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1302 cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1303
1304 cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1305 cm_process_work(cm_id_priv, work);
1306 cm_deref_id(listen_cm_id_priv);
1307 return 0;
1308
1309error3: atomic_dec(&cm_id_priv->refcount);
1310 cm_deref_id(listen_cm_id_priv);
1311 cm_cleanup_timewait(cm_id_priv->timewait_info);
1312error2: kfree(cm_id_priv->timewait_info);
1313error1: ib_destroy_cm_id(&cm_id_priv->id);
1314 return ret;
1315}
1316
1317static void cm_format_rep(struct cm_rep_msg *rep_msg,
1318 struct cm_id_private *cm_id_priv,
1319 struct ib_cm_rep_param *param)
1320{
1321 cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1322 rep_msg->local_comm_id = cm_id_priv->id.local_id;
1323 rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1324 cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1325 cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1326 rep_msg->resp_resources = param->responder_resources;
1327 rep_msg->initiator_depth = param->initiator_depth;
1328 cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1329 cm_rep_set_failover(rep_msg, param->failover_accepted);
1330 cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1331 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1332 cm_rep_set_srq(rep_msg, param->srq);
1333 rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1334
1335 if (param->private_data && param->private_data_len)
1336 memcpy(rep_msg->private_data, param->private_data,
1337 param->private_data_len);
1338}
1339
1340int ib_send_cm_rep(struct ib_cm_id *cm_id,
1341 struct ib_cm_rep_param *param)
1342{
1343 struct cm_id_private *cm_id_priv;
1344 struct ib_mad_send_buf *msg;
1345 struct cm_rep_msg *rep_msg;
1346 struct ib_send_wr *bad_send_wr;
1347 unsigned long flags;
1348 int ret;
1349
1350 if (param->private_data &&
1351 param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1352 return -EINVAL;
1353
1354 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1355 spin_lock_irqsave(&cm_id_priv->lock, flags);
1356 if (cm_id->state != IB_CM_REQ_RCVD &&
1357 cm_id->state != IB_CM_MRA_REQ_SENT) {
1358 ret = -EINVAL;
1359 goto out;
1360 }
1361
1362 ret = cm_alloc_msg(cm_id_priv, &msg);
1363 if (ret)
1364 goto out;
1365
1366 rep_msg = (struct cm_rep_msg *) msg->mad;
1367 cm_format_rep(rep_msg, cm_id_priv, param);
1368 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1369 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1370
1371 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1372 &msg->send_wr, &bad_send_wr);
1373 if (ret) {
1374 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1375 cm_free_msg(msg);
1376 return ret;
1377 }
1378
1379 cm_id->state = IB_CM_REP_SENT;
1380 cm_id_priv->msg = msg;
1381 cm_id_priv->initiator_depth = param->initiator_depth;
1382 cm_id_priv->responder_resources = param->responder_resources;
1383 cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1384 cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1385
1386out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1387 return ret;
1388}
1389EXPORT_SYMBOL(ib_send_cm_rep);
1390
1391static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1392 struct cm_id_private *cm_id_priv,
1393 const void *private_data,
1394 u8 private_data_len)
1395{
1396 cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1397 rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1398 rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1399
1400 if (private_data && private_data_len)
1401 memcpy(rtu_msg->private_data, private_data, private_data_len);
1402}
1403
1404int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1405 const void *private_data,
1406 u8 private_data_len)
1407{
1408 struct cm_id_private *cm_id_priv;
1409 struct ib_mad_send_buf *msg;
1410 struct ib_send_wr *bad_send_wr;
1411 unsigned long flags;
1412 void *data;
1413 int ret;
1414
1415 if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1416 return -EINVAL;
1417
1418 data = cm_copy_private_data(private_data, private_data_len);
1419 if (IS_ERR(data))
1420 return PTR_ERR(data);
1421
1422 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1423 spin_lock_irqsave(&cm_id_priv->lock, flags);
1424 if (cm_id->state != IB_CM_REP_RCVD &&
1425 cm_id->state != IB_CM_MRA_REP_SENT) {
1426 ret = -EINVAL;
1427 goto error;
1428 }
1429
1430 ret = cm_alloc_msg(cm_id_priv, &msg);
1431 if (ret)
1432 goto error;
1433
1434 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1435 private_data, private_data_len);
1436
1437 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1438 &msg->send_wr, &bad_send_wr);
1439 if (ret) {
1440 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1441 cm_free_msg(msg);
1442 kfree(data);
1443 return ret;
1444 }
1445
1446 cm_id->state = IB_CM_ESTABLISHED;
1447 cm_set_private_data(cm_id_priv, data, private_data_len);
1448 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1449 return 0;
1450
1451error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1452 kfree(data);
1453 return ret;
1454}
1455EXPORT_SYMBOL(ib_send_cm_rtu);
1456
1457static void cm_format_rep_event(struct cm_work *work)
1458{
1459 struct cm_rep_msg *rep_msg;
1460 struct ib_cm_rep_event_param *param;
1461
1462 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1463 param = &work->cm_event.param.rep_rcvd;
1464 param->remote_ca_guid = rep_msg->local_ca_guid;
1465 param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1466 param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1467 param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1468 param->responder_resources = rep_msg->initiator_depth;
1469 param->initiator_depth = rep_msg->resp_resources;
1470 param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1471 param->failover_accepted = cm_rep_get_failover(rep_msg);
1472 param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1473 param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1474 param->srq = cm_rep_get_srq(rep_msg);
1475 work->cm_event.private_data = &rep_msg->private_data;
1476}
1477
1478static void cm_dup_rep_handler(struct cm_work *work)
1479{
1480 struct cm_id_private *cm_id_priv;
1481 struct cm_rep_msg *rep_msg;
1482 struct ib_mad_send_buf *msg = NULL;
1483 struct ib_send_wr *bad_send_wr;
1484 unsigned long flags;
1485 int ret;
1486
1487 rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1488 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1489 rep_msg->local_comm_id);
1490 if (!cm_id_priv)
1491 return;
1492
1493 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1494 if (ret)
1495 goto deref;
1496
1497 spin_lock_irqsave(&cm_id_priv->lock, flags);
1498 if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1499 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1500 cm_id_priv->private_data,
1501 cm_id_priv->private_data_len);
1502 else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1503 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1504 CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1505 cm_id_priv->private_data,
1506 cm_id_priv->private_data_len);
1507 else
1508 goto unlock;
1509 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1510
1511 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1512 &bad_send_wr);
1513 if (ret)
1514 goto free;
1515 goto deref;
1516
1517unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1518free: cm_free_msg(msg);
1519deref: cm_deref_id(cm_id_priv);
1520}
1521
1522static int cm_rep_handler(struct cm_work *work)
1523{
1524 struct cm_id_private *cm_id_priv;
1525 struct cm_rep_msg *rep_msg;
1526 unsigned long flags;
1527 int ret;
1528
1529 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1530 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1531 if (!cm_id_priv) {
1532 cm_dup_rep_handler(work);
1533 return -EINVAL;
1534 }
1535
1536 cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1537 cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1538 cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1539
1540 spin_lock_irqsave(&cm.lock, flags);
1541 /* Check for duplicate REP. */
1542 if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1543 spin_unlock_irqrestore(&cm.lock, flags);
1544 ret = -EINVAL;
1545 goto error;
1546 }
1547 /* Check for a stale connection. */
1548 if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1549 spin_unlock_irqrestore(&cm.lock, flags);
1550 cm_issue_rej(work->port, work->mad_recv_wc,
1551 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1552 NULL, 0);
1553 ret = -EINVAL;
1554 goto error;
1555 }
1556 spin_unlock_irqrestore(&cm.lock, flags);
1557
1558 cm_format_rep_event(work);
1559
1560 spin_lock_irqsave(&cm_id_priv->lock, flags);
1561 switch (cm_id_priv->id.state) {
1562 case IB_CM_REQ_SENT:
1563 case IB_CM_MRA_REQ_RCVD:
1564 break;
1565 default:
1566 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1567 ret = -EINVAL;
1568 goto error;
1569 }
1570 cm_id_priv->id.state = IB_CM_REP_RCVD;
1571 cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1572 cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1573 cm_id_priv->initiator_depth = rep_msg->resp_resources;
1574 cm_id_priv->responder_resources = rep_msg->initiator_depth;
1575 cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1576 cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1577
1578 /* todo: handle peer_to_peer */
1579
1580 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1581 (unsigned long) cm_id_priv->msg);
1582 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1583 if (!ret)
1584 list_add_tail(&work->list, &cm_id_priv->work_list);
1585 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1586
1587 if (ret)
1588 cm_process_work(cm_id_priv, work);
1589 else
1590 cm_deref_id(cm_id_priv);
1591 return 0;
1592
1593error: cm_cleanup_timewait(cm_id_priv->timewait_info);
1594 cm_deref_id(cm_id_priv);
1595 return ret;
1596}
1597
1598static int cm_establish_handler(struct cm_work *work)
1599{
1600 struct cm_id_private *cm_id_priv;
1601 unsigned long flags;
1602 int ret;
1603
1604 /* See comment in ib_cm_establish about lookup. */
1605 cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1606 if (!cm_id_priv)
1607 return -EINVAL;
1608
1609 spin_lock_irqsave(&cm_id_priv->lock, flags);
1610 if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1611 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1612 goto out;
1613 }
1614
1615 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1616 (unsigned long) cm_id_priv->msg);
1617 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1618 if (!ret)
1619 list_add_tail(&work->list, &cm_id_priv->work_list);
1620 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1621
1622 if (ret)
1623 cm_process_work(cm_id_priv, work);
1624 else
1625 cm_deref_id(cm_id_priv);
1626 return 0;
1627out:
1628 cm_deref_id(cm_id_priv);
1629 return -EINVAL;
1630}
1631
1632static int cm_rtu_handler(struct cm_work *work)
1633{
1634 struct cm_id_private *cm_id_priv;
1635 struct cm_rtu_msg *rtu_msg;
1636 unsigned long flags;
1637 int ret;
1638
1639 rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1640 cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1641 rtu_msg->local_comm_id);
1642 if (!cm_id_priv)
1643 return -EINVAL;
1644
1645 work->cm_event.private_data = &rtu_msg->private_data;
1646
1647 spin_lock_irqsave(&cm_id_priv->lock, flags);
1648 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1649 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1650 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1651 goto out;
1652 }
1653 cm_id_priv->id.state = IB_CM_ESTABLISHED;
1654
1655 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1656 (unsigned long) cm_id_priv->msg);
1657 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1658 if (!ret)
1659 list_add_tail(&work->list, &cm_id_priv->work_list);
1660 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1661
1662 if (ret)
1663 cm_process_work(cm_id_priv, work);
1664 else
1665 cm_deref_id(cm_id_priv);
1666 return 0;
1667out:
1668 cm_deref_id(cm_id_priv);
1669 return -EINVAL;
1670}
1671
1672static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1673 struct cm_id_private *cm_id_priv,
1674 const void *private_data,
1675 u8 private_data_len)
1676{
1677 cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1678 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1679 dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1680 dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1681 cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1682
1683 if (private_data && private_data_len)
1684 memcpy(dreq_msg->private_data, private_data, private_data_len);
1685}
1686
1687int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1688 const void *private_data,
1689 u8 private_data_len)
1690{
1691 struct cm_id_private *cm_id_priv;
1692 struct ib_mad_send_buf *msg;
1693 struct ib_send_wr *bad_send_wr;
1694 unsigned long flags;
1695 int ret;
1696
1697 if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1698 return -EINVAL;
1699
1700 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1701 spin_lock_irqsave(&cm_id_priv->lock, flags);
1702 if (cm_id->state != IB_CM_ESTABLISHED) {
1703 ret = -EINVAL;
1704 goto out;
1705 }
1706
1707 ret = cm_alloc_msg(cm_id_priv, &msg);
1708 if (ret) {
1709 cm_enter_timewait(cm_id_priv);
1710 goto out;
1711 }
1712
1713 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1714 private_data, private_data_len);
1715 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1716 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1717
1718 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1719 &msg->send_wr, &bad_send_wr);
1720 if (ret) {
1721 cm_enter_timewait(cm_id_priv);
1722 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1723 cm_free_msg(msg);
1724 return ret;
1725 }
1726
1727 cm_id->state = IB_CM_DREQ_SENT;
1728 cm_id_priv->msg = msg;
1729out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1730 return ret;
1731}
1732EXPORT_SYMBOL(ib_send_cm_dreq);
1733
1734static void cm_format_drep(struct cm_drep_msg *drep_msg,
1735 struct cm_id_private *cm_id_priv,
1736 const void *private_data,
1737 u8 private_data_len)
1738{
1739 cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1740 drep_msg->local_comm_id = cm_id_priv->id.local_id;
1741 drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1742
1743 if (private_data && private_data_len)
1744 memcpy(drep_msg->private_data, private_data, private_data_len);
1745}
1746
1747int ib_send_cm_drep(struct ib_cm_id *cm_id,
1748 const void *private_data,
1749 u8 private_data_len)
1750{
1751 struct cm_id_private *cm_id_priv;
1752 struct ib_mad_send_buf *msg;
1753 struct ib_send_wr *bad_send_wr;
1754 unsigned long flags;
1755 void *data;
1756 int ret;
1757
1758 if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1759 return -EINVAL;
1760
1761 data = cm_copy_private_data(private_data, private_data_len);
1762 if (IS_ERR(data))
1763 return PTR_ERR(data);
1764
1765 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1766 spin_lock_irqsave(&cm_id_priv->lock, flags);
1767 if (cm_id->state != IB_CM_DREQ_RCVD) {
1768 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1769 kfree(data);
1770 return -EINVAL;
1771 }
1772
1773 cm_set_private_data(cm_id_priv, data, private_data_len);
1774 cm_enter_timewait(cm_id_priv);
1775
1776 ret = cm_alloc_msg(cm_id_priv, &msg);
1777 if (ret)
1778 goto out;
1779
1780 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1781 private_data, private_data_len);
1782
1783 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1784 &bad_send_wr);
1785 if (ret) {
1786 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1787 cm_free_msg(msg);
1788 return ret;
1789 }
1790
1791out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1792 return ret;
1793}
1794EXPORT_SYMBOL(ib_send_cm_drep);
1795
1796static int cm_dreq_handler(struct cm_work *work)
1797{
1798 struct cm_id_private *cm_id_priv;
1799 struct cm_dreq_msg *dreq_msg;
1800 struct ib_mad_send_buf *msg = NULL;
1801 struct ib_send_wr *bad_send_wr;
1802 unsigned long flags;
1803 int ret;
1804
1805 dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1806 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1807 dreq_msg->local_comm_id);
1808 if (!cm_id_priv)
1809 return -EINVAL;
1810
1811 work->cm_event.private_data = &dreq_msg->private_data;
1812
1813 spin_lock_irqsave(&cm_id_priv->lock, flags);
1814 if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1815 goto unlock;
1816
1817 switch (cm_id_priv->id.state) {
1818 case IB_CM_REP_SENT:
1819 case IB_CM_DREQ_SENT:
1820 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1821 (unsigned long) cm_id_priv->msg);
1822 break;
1823 case IB_CM_ESTABLISHED:
1824 case IB_CM_MRA_REP_RCVD:
1825 break;
1826 case IB_CM_TIMEWAIT:
1827 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1828 goto unlock;
1829
1830 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1831 cm_id_priv->private_data,
1832 cm_id_priv->private_data_len);
1833 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1834
1835 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1836 &msg->send_wr, &bad_send_wr))
1837 cm_free_msg(msg);
1838 goto deref;
1839 default:
1840 goto unlock;
1841 }
1842 cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1843 cm_id_priv->tid = dreq_msg->hdr.tid;
1844 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1845 if (!ret)
1846 list_add_tail(&work->list, &cm_id_priv->work_list);
1847 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1848
1849 if (ret)
1850 cm_process_work(cm_id_priv, work);
1851 else
1852 cm_deref_id(cm_id_priv);
1853 return 0;
1854
1855unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1856deref: cm_deref_id(cm_id_priv);
1857 return -EINVAL;
1858}
1859
1860static int cm_drep_handler(struct cm_work *work)
1861{
1862 struct cm_id_private *cm_id_priv;
1863 struct cm_drep_msg *drep_msg;
1864 unsigned long flags;
1865 int ret;
1866
1867 drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1868 cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1869 drep_msg->local_comm_id);
1870 if (!cm_id_priv)
1871 return -EINVAL;
1872
1873 work->cm_event.private_data = &drep_msg->private_data;
1874
1875 spin_lock_irqsave(&cm_id_priv->lock, flags);
1876 if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1877 cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1878 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1879 goto out;
1880 }
1881 cm_enter_timewait(cm_id_priv);
1882
1883 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1884 (unsigned long) cm_id_priv->msg);
1885 ret = atomic_inc_and_test(&cm_id_priv->work_count);
1886 if (!ret)
1887 list_add_tail(&work->list, &cm_id_priv->work_list);
1888 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1889
1890 if (ret)
1891 cm_process_work(cm_id_priv, work);
1892 else
1893 cm_deref_id(cm_id_priv);
1894 return 0;
1895out:
1896 cm_deref_id(cm_id_priv);
1897 return -EINVAL;
1898}
1899
1900int ib_send_cm_rej(struct ib_cm_id *cm_id,
1901 enum ib_cm_rej_reason reason,
1902 void *ari,
1903 u8 ari_length,
1904 const void *private_data,
1905 u8 private_data_len)
1906{
1907 struct cm_id_private *cm_id_priv;
1908 struct ib_mad_send_buf *msg;
1909 struct ib_send_wr *bad_send_wr;
1910 unsigned long flags;
1911 int ret;
1912
1913 if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1914 (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1915 return -EINVAL;
1916
1917 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1918
1919 spin_lock_irqsave(&cm_id_priv->lock, flags);
1920 switch (cm_id->state) {
1921 case IB_CM_REQ_SENT:
1922 case IB_CM_MRA_REQ_RCVD:
1923 case IB_CM_REQ_RCVD:
1924 case IB_CM_MRA_REQ_SENT:
1925 case IB_CM_REP_RCVD:
1926 case IB_CM_MRA_REP_SENT:
1927 ret = cm_alloc_msg(cm_id_priv, &msg);
1928 if (!ret)
1929 cm_format_rej((struct cm_rej_msg *) msg->mad,
1930 cm_id_priv, reason, ari, ari_length,
1931 private_data, private_data_len);
1932
1933 cm_reset_to_idle(cm_id_priv);
1934 break;
1935 case IB_CM_REP_SENT:
1936 case IB_CM_MRA_REP_RCVD:
1937 ret = cm_alloc_msg(cm_id_priv, &msg);
1938 if (!ret)
1939 cm_format_rej((struct cm_rej_msg *) msg->mad,
1940 cm_id_priv, reason, ari, ari_length,
1941 private_data, private_data_len);
1942
1943 cm_enter_timewait(cm_id_priv);
1944 break;
1945 default:
1946 ret = -EINVAL;
1947 goto out;
1948 }
1949
1950 if (ret)
1951 goto out;
1952
1953 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1954 &msg->send_wr, &bad_send_wr);
1955 if (ret)
1956 cm_free_msg(msg);
1957
1958out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1959 return ret;
1960}
1961EXPORT_SYMBOL(ib_send_cm_rej);
1962
1963static void cm_format_rej_event(struct cm_work *work)
1964{
1965 struct cm_rej_msg *rej_msg;
1966 struct ib_cm_rej_event_param *param;
1967
1968 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1969 param = &work->cm_event.param.rej_rcvd;
1970 param->ari = rej_msg->ari;
1971 param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1972 param->reason = rej_msg->reason;
1973 work->cm_event.private_data = &rej_msg->private_data;
1974}
1975
1976static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1977{
1978 struct cm_timewait_info *timewait_info;
1979 struct cm_id_private *cm_id_priv;
1980 unsigned long flags;
1981 u32 remote_id;
1982
1983 remote_id = rej_msg->local_comm_id;
1984
1985 if (rej_msg->reason == IB_CM_REJ_TIMEOUT) {
1986 spin_lock_irqsave(&cm.lock, flags);
1987 timewait_info = cm_find_remote_id( *((u64 *) rej_msg->ari),
1988 remote_id);
1989 if (!timewait_info) {
1990 spin_unlock_irqrestore(&cm.lock, flags);
1991 return NULL;
1992 }
1993 cm_id_priv = idr_find(&cm.local_id_table,
1994 (int) timewait_info->work.local_id);
1995 if (cm_id_priv) {
1996 if (cm_id_priv->id.remote_id == remote_id)
1997 atomic_inc(&cm_id_priv->refcount);
1998 else
1999 cm_id_priv = NULL;
2000 }
2001 spin_unlock_irqrestore(&cm.lock, flags);
2002 } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2003 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2004 else
2005 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2006
2007 return cm_id_priv;
2008}
2009
2010static int cm_rej_handler(struct cm_work *work)
2011{
2012 struct cm_id_private *cm_id_priv;
2013 struct cm_rej_msg *rej_msg;
2014 unsigned long flags;
2015 int ret;
2016
2017 rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2018 cm_id_priv = cm_acquire_rejected_id(rej_msg);
2019 if (!cm_id_priv)
2020 return -EINVAL;
2021
2022 cm_format_rej_event(work);
2023
2024 spin_lock_irqsave(&cm_id_priv->lock, flags);
2025 switch (cm_id_priv->id.state) {
2026 case IB_CM_REQ_SENT:
2027 case IB_CM_MRA_REQ_RCVD:
2028 case IB_CM_REP_SENT:
2029 case IB_CM_MRA_REP_RCVD:
2030 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2031 (unsigned long) cm_id_priv->msg);
2032 /* fall through */
2033 case IB_CM_REQ_RCVD:
2034 case IB_CM_MRA_REQ_SENT:
2035 if (rej_msg->reason == IB_CM_REJ_STALE_CONN)
2036 cm_enter_timewait(cm_id_priv);
2037 else
2038 cm_reset_to_idle(cm_id_priv);
2039 break;
2040 case IB_CM_DREQ_SENT:
2041 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2042 (unsigned long) cm_id_priv->msg);
2043 /* fall through */
2044 case IB_CM_REP_RCVD:
2045 case IB_CM_MRA_REP_SENT:
2046 case IB_CM_ESTABLISHED:
2047 cm_enter_timewait(cm_id_priv);
2048 break;
2049 default:
2050 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2051 ret = -EINVAL;
2052 goto out;
2053 }
2054
2055 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2056 if (!ret)
2057 list_add_tail(&work->list, &cm_id_priv->work_list);
2058 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2059
2060 if (ret)
2061 cm_process_work(cm_id_priv, work);
2062 else
2063 cm_deref_id(cm_id_priv);
2064 return 0;
2065out:
2066 cm_deref_id(cm_id_priv);
2067 return -EINVAL;
2068}
2069
2070int ib_send_cm_mra(struct ib_cm_id *cm_id,
2071 u8 service_timeout,
2072 const void *private_data,
2073 u8 private_data_len)
2074{
2075 struct cm_id_private *cm_id_priv;
2076 struct ib_mad_send_buf *msg;
2077 struct ib_send_wr *bad_send_wr;
2078 void *data;
2079 unsigned long flags;
2080 int ret;
2081
2082 if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2083 return -EINVAL;
2084
2085 data = cm_copy_private_data(private_data, private_data_len);
2086 if (IS_ERR(data))
2087 return PTR_ERR(data);
2088
2089 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2090
2091 spin_lock_irqsave(&cm_id_priv->lock, flags);
2092 switch(cm_id_priv->id.state) {
2093 case IB_CM_REQ_RCVD:
2094 ret = cm_alloc_msg(cm_id_priv, &msg);
2095 if (ret)
2096 goto error1;
2097
2098 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2099 CM_MSG_RESPONSE_REQ, service_timeout,
2100 private_data, private_data_len);
2101 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2102 &msg->send_wr, &bad_send_wr);
2103 if (ret)
2104 goto error2;
2105 cm_id->state = IB_CM_MRA_REQ_SENT;
2106 break;
2107 case IB_CM_REP_RCVD:
2108 ret = cm_alloc_msg(cm_id_priv, &msg);
2109 if (ret)
2110 goto error1;
2111
2112 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2113 CM_MSG_RESPONSE_REP, service_timeout,
2114 private_data, private_data_len);
2115 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2116 &msg->send_wr, &bad_send_wr);
2117 if (ret)
2118 goto error2;
2119 cm_id->state = IB_CM_MRA_REP_SENT;
2120 break;
2121 case IB_CM_ESTABLISHED:
2122 ret = cm_alloc_msg(cm_id_priv, &msg);
2123 if (ret)
2124 goto error1;
2125
2126 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2127 CM_MSG_RESPONSE_OTHER, service_timeout,
2128 private_data, private_data_len);
2129 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2130 &msg->send_wr, &bad_send_wr);
2131 if (ret)
2132 goto error2;
2133 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2134 break;
2135 default:
2136 ret = -EINVAL;
2137 goto error1;
2138 }
2139 cm_id_priv->service_timeout = service_timeout;
2140 cm_set_private_data(cm_id_priv, data, private_data_len);
2141 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2142 return 0;
2143
2144error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2145 kfree(data);
2146 return ret;
2147
2148error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2149 kfree(data);
2150 cm_free_msg(msg);
2151 return ret;
2152}
2153EXPORT_SYMBOL(ib_send_cm_mra);
2154
2155static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2156{
2157 switch (cm_mra_get_msg_mraed(mra_msg)) {
2158 case CM_MSG_RESPONSE_REQ:
2159 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2160 case CM_MSG_RESPONSE_REP:
2161 case CM_MSG_RESPONSE_OTHER:
2162 return cm_acquire_id(mra_msg->remote_comm_id,
2163 mra_msg->local_comm_id);
2164 default:
2165 return NULL;
2166 }
2167}
2168
2169static int cm_mra_handler(struct cm_work *work)
2170{
2171 struct cm_id_private *cm_id_priv;
2172 struct cm_mra_msg *mra_msg;
2173 unsigned long flags;
2174 int timeout, ret;
2175
2176 mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2177 cm_id_priv = cm_acquire_mraed_id(mra_msg);
2178 if (!cm_id_priv)
2179 return -EINVAL;
2180
2181 work->cm_event.private_data = &mra_msg->private_data;
2182 work->cm_event.param.mra_rcvd.service_timeout =
2183 cm_mra_get_service_timeout(mra_msg);
2184 timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2185 cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2186
2187 spin_lock_irqsave(&cm_id_priv->lock, flags);
2188 switch (cm_id_priv->id.state) {
2189 case IB_CM_REQ_SENT:
2190 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2191 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2192 (unsigned long) cm_id_priv->msg, timeout))
2193 goto out;
2194 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2195 break;
2196 case IB_CM_REP_SENT:
2197 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2198 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2199 (unsigned long) cm_id_priv->msg, timeout))
2200 goto out;
2201 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2202 break;
2203 case IB_CM_ESTABLISHED:
2204 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2205 cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2206 ib_modify_mad(cm_id_priv->av.port->mad_agent,
2207 (unsigned long) cm_id_priv->msg, timeout))
2208 goto out;
2209 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2210 break;
2211 default:
2212 goto out;
2213 }
2214
2215 cm_id_priv->msg->context[1] = (void *) (unsigned long)
2216 cm_id_priv->id.state;
2217 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2218 if (!ret)
2219 list_add_tail(&work->list, &cm_id_priv->work_list);
2220 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2221
2222 if (ret)
2223 cm_process_work(cm_id_priv, work);
2224 else
2225 cm_deref_id(cm_id_priv);
2226 return 0;
2227out:
2228 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2229 cm_deref_id(cm_id_priv);
2230 return -EINVAL;
2231}
2232
2233static void cm_format_lap(struct cm_lap_msg *lap_msg,
2234 struct cm_id_private *cm_id_priv,
2235 struct ib_sa_path_rec *alternate_path,
2236 const void *private_data,
2237 u8 private_data_len)
2238{
2239 cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2240 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2241 lap_msg->local_comm_id = cm_id_priv->id.local_id;
2242 lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2243 cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2244 /* todo: need remote CM response timeout */
2245 cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2246 lap_msg->alt_local_lid = alternate_path->slid;
2247 lap_msg->alt_remote_lid = alternate_path->dlid;
2248 lap_msg->alt_local_gid = alternate_path->sgid;
2249 lap_msg->alt_remote_gid = alternate_path->dgid;
2250 cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2251 cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2252 lap_msg->alt_hop_limit = alternate_path->hop_limit;
2253 cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2254 cm_lap_set_sl(lap_msg, alternate_path->sl);
2255 cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2256 cm_lap_set_local_ack_timeout(lap_msg,
2257 min(31, alternate_path->packet_life_time + 1));
2258
2259 if (private_data && private_data_len)
2260 memcpy(lap_msg->private_data, private_data, private_data_len);
2261}
2262
2263int ib_send_cm_lap(struct ib_cm_id *cm_id,
2264 struct ib_sa_path_rec *alternate_path,
2265 const void *private_data,
2266 u8 private_data_len)
2267{
2268 struct cm_id_private *cm_id_priv;
2269 struct ib_mad_send_buf *msg;
2270 struct ib_send_wr *bad_send_wr;
2271 unsigned long flags;
2272 int ret;
2273
2274 if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2275 return -EINVAL;
2276
2277 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2278 spin_lock_irqsave(&cm_id_priv->lock, flags);
2279 if (cm_id->state != IB_CM_ESTABLISHED ||
2280 cm_id->lap_state != IB_CM_LAP_IDLE) {
2281 ret = -EINVAL;
2282 goto out;
2283 }
2284
2285 ret = cm_alloc_msg(cm_id_priv, &msg);
2286 if (ret)
2287 goto out;
2288
2289 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2290 alternate_path, private_data, private_data_len);
2291 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2292 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2293
2294 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2295 &msg->send_wr, &bad_send_wr);
2296 if (ret) {
2297 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2298 cm_free_msg(msg);
2299 return ret;
2300 }
2301
2302 cm_id->lap_state = IB_CM_LAP_SENT;
2303 cm_id_priv->msg = msg;
2304
2305out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2306 return ret;
2307}
2308EXPORT_SYMBOL(ib_send_cm_lap);
2309
2310static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2311 struct cm_lap_msg *lap_msg)
2312{
2313 memset(path, 0, sizeof *path);
2314 path->dgid = lap_msg->alt_local_gid;
2315 path->sgid = lap_msg->alt_remote_gid;
2316 path->dlid = lap_msg->alt_local_lid;
2317 path->slid = lap_msg->alt_remote_lid;
2318 path->flow_label = cm_lap_get_flow_label(lap_msg);
2319 path->hop_limit = lap_msg->alt_hop_limit;
2320 path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2321 path->reversible = 1;
2322 /* pkey is same as in REQ */
2323 path->sl = cm_lap_get_sl(lap_msg);
2324 path->mtu_selector = IB_SA_EQ;
2325 /* mtu is same as in REQ */
2326 path->rate_selector = IB_SA_EQ;
2327 path->rate = cm_lap_get_packet_rate(lap_msg);
2328 path->packet_life_time_selector = IB_SA_EQ;
2329 path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2330 path->packet_life_time -= (path->packet_life_time > 0);
2331}
2332
2333static int cm_lap_handler(struct cm_work *work)
2334{
2335 struct cm_id_private *cm_id_priv;
2336 struct cm_lap_msg *lap_msg;
2337 struct ib_cm_lap_event_param *param;
2338 struct ib_mad_send_buf *msg = NULL;
2339 struct ib_send_wr *bad_send_wr;
2340 unsigned long flags;
2341 int ret;
2342
2343 /* todo: verify LAP request and send reject APR if invalid. */
2344 lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2345 cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2346 lap_msg->local_comm_id);
2347 if (!cm_id_priv)
2348 return -EINVAL;
2349
2350 param = &work->cm_event.param.lap_rcvd;
2351 param->alternate_path = &work->path[0];
2352 cm_format_path_from_lap(param->alternate_path, lap_msg);
2353 work->cm_event.private_data = &lap_msg->private_data;
2354
2355 spin_lock_irqsave(&cm_id_priv->lock, flags);
2356 if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2357 goto unlock;
2358
2359 switch (cm_id_priv->id.lap_state) {
2360 case IB_CM_LAP_IDLE:
2361 break;
2362 case IB_CM_MRA_LAP_SENT:
2363 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2364 goto unlock;
2365
2366 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2367 CM_MSG_RESPONSE_OTHER,
2368 cm_id_priv->service_timeout,
2369 cm_id_priv->private_data,
2370 cm_id_priv->private_data_len);
2371 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2372
2373 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2374 &msg->send_wr, &bad_send_wr))
2375 cm_free_msg(msg);
2376 goto deref;
2377 default:
2378 goto unlock;
2379 }
2380
2381 cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2382 cm_id_priv->tid = lap_msg->hdr.tid;
2383 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2384 if (!ret)
2385 list_add_tail(&work->list, &cm_id_priv->work_list);
2386 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2387
2388 if (ret)
2389 cm_process_work(cm_id_priv, work);
2390 else
2391 cm_deref_id(cm_id_priv);
2392 return 0;
2393
2394unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2395deref: cm_deref_id(cm_id_priv);
2396 return -EINVAL;
2397}
2398
2399static void cm_format_apr(struct cm_apr_msg *apr_msg,
2400 struct cm_id_private *cm_id_priv,
2401 enum ib_cm_apr_status status,
2402 void *info,
2403 u8 info_length,
2404 const void *private_data,
2405 u8 private_data_len)
2406{
2407 cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2408 apr_msg->local_comm_id = cm_id_priv->id.local_id;
2409 apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2410 apr_msg->ap_status = (u8) status;
2411
2412 if (info && info_length) {
2413 apr_msg->info_length = info_length;
2414 memcpy(apr_msg->info, info, info_length);
2415 }
2416
2417 if (private_data && private_data_len)
2418 memcpy(apr_msg->private_data, private_data, private_data_len);
2419}
2420
2421int ib_send_cm_apr(struct ib_cm_id *cm_id,
2422 enum ib_cm_apr_status status,
2423 void *info,
2424 u8 info_length,
2425 const void *private_data,
2426 u8 private_data_len)
2427{
2428 struct cm_id_private *cm_id_priv;
2429 struct ib_mad_send_buf *msg;
2430 struct ib_send_wr *bad_send_wr;
2431 unsigned long flags;
2432 int ret;
2433
2434 if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2435 (info && info_length > IB_CM_APR_INFO_LENGTH))
2436 return -EINVAL;
2437
2438 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2439 spin_lock_irqsave(&cm_id_priv->lock, flags);
2440 if (cm_id->state != IB_CM_ESTABLISHED ||
2441 (cm_id->lap_state != IB_CM_LAP_RCVD &&
2442 cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2443 ret = -EINVAL;
2444 goto out;
2445 }
2446
2447 ret = cm_alloc_msg(cm_id_priv, &msg);
2448 if (ret)
2449 goto out;
2450
2451 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2452 info, info_length, private_data, private_data_len);
2453 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2454 &msg->send_wr, &bad_send_wr);
2455 if (ret) {
2456 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2457 cm_free_msg(msg);
2458 return ret;
2459 }
2460
2461 cm_id->lap_state = IB_CM_LAP_IDLE;
2462out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2463 return ret;
2464}
2465EXPORT_SYMBOL(ib_send_cm_apr);
2466
2467static int cm_apr_handler(struct cm_work *work)
2468{
2469 struct cm_id_private *cm_id_priv;
2470 struct cm_apr_msg *apr_msg;
2471 unsigned long flags;
2472 int ret;
2473
2474 apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2475 cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2476 apr_msg->local_comm_id);
2477 if (!cm_id_priv)
2478 return -EINVAL; /* Unmatched reply. */
2479
2480 work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2481 work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2482 work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2483 work->cm_event.private_data = &apr_msg->private_data;
2484
2485 spin_lock_irqsave(&cm_id_priv->lock, flags);
2486 if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2487 (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2488 cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2489 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2490 goto out;
2491 }
2492 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2493 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2494 (unsigned long) cm_id_priv->msg);
2495 cm_id_priv->msg = NULL;
2496
2497 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2498 if (!ret)
2499 list_add_tail(&work->list, &cm_id_priv->work_list);
2500 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2501
2502 if (ret)
2503 cm_process_work(cm_id_priv, work);
2504 else
2505 cm_deref_id(cm_id_priv);
2506 return 0;
2507out:
2508 cm_deref_id(cm_id_priv);
2509 return -EINVAL;
2510}
2511
2512static int cm_timewait_handler(struct cm_work *work)
2513{
2514 struct cm_timewait_info *timewait_info;
2515 struct cm_id_private *cm_id_priv;
2516 unsigned long flags;
2517 int ret;
2518
2519 timewait_info = (struct cm_timewait_info *)work;
2520 cm_cleanup_timewait(timewait_info);
2521
2522 cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2523 timewait_info->work.remote_id);
2524 if (!cm_id_priv)
2525 return -EINVAL;
2526
2527 spin_lock_irqsave(&cm_id_priv->lock, flags);
2528 if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2529 cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2530 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2531 goto out;
2532 }
2533 cm_id_priv->id.state = IB_CM_IDLE;
2534 ret = atomic_inc_and_test(&cm_id_priv->work_count);
2535 if (!ret)
2536 list_add_tail(&work->list, &cm_id_priv->work_list);
2537 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2538
2539 if (ret)
2540 cm_process_work(cm_id_priv, work);
2541 else
2542 cm_deref_id(cm_id_priv);
2543 return 0;
2544out:
2545 cm_deref_id(cm_id_priv);
2546 return -EINVAL;
2547}
2548
2549static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2550 struct cm_id_private *cm_id_priv,
2551 struct ib_cm_sidr_req_param *param)
2552{
2553 cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2554 cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2555 sidr_req_msg->request_id = cm_id_priv->id.local_id;
2556 sidr_req_msg->pkey = param->pkey;
2557 sidr_req_msg->service_id = param->service_id;
2558
2559 if (param->private_data && param->private_data_len)
2560 memcpy(sidr_req_msg->private_data, param->private_data,
2561 param->private_data_len);
2562}
2563
2564int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2565 struct ib_cm_sidr_req_param *param)
2566{
2567 struct cm_id_private *cm_id_priv;
2568 struct ib_mad_send_buf *msg;
2569 struct ib_send_wr *bad_send_wr;
2570 unsigned long flags;
2571 int ret;
2572
2573 if (!param->path || (param->private_data &&
2574 param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2575 return -EINVAL;
2576
2577 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2578 ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2579 if (ret)
2580 goto out;
2581
2582 cm_id->service_id = param->service_id;
2583 cm_id->service_mask = ~0ULL;
2584 cm_id_priv->timeout_ms = param->timeout_ms;
2585 cm_id_priv->max_cm_retries = param->max_cm_retries;
2586 ret = cm_alloc_msg(cm_id_priv, &msg);
2587 if (ret)
2588 goto out;
2589
2590 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2591 param);
2592 msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2593 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2594
2595 spin_lock_irqsave(&cm_id_priv->lock, flags);
2596 if (cm_id->state == IB_CM_IDLE)
2597 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2598 &msg->send_wr, &bad_send_wr);
2599 else
2600 ret = -EINVAL;
2601
2602 if (ret) {
2603 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2604 cm_free_msg(msg);
2605 goto out;
2606 }
2607 cm_id->state = IB_CM_SIDR_REQ_SENT;
2608 cm_id_priv->msg = msg;
2609 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2610out:
2611 return ret;
2612}
2613EXPORT_SYMBOL(ib_send_cm_sidr_req);
2614
2615static void cm_format_sidr_req_event(struct cm_work *work,
2616 struct ib_cm_id *listen_id)
2617{
2618 struct cm_sidr_req_msg *sidr_req_msg;
2619 struct ib_cm_sidr_req_event_param *param;
2620
2621 sidr_req_msg = (struct cm_sidr_req_msg *)
2622 work->mad_recv_wc->recv_buf.mad;
2623 param = &work->cm_event.param.sidr_req_rcvd;
2624 param->pkey = sidr_req_msg->pkey;
2625 param->listen_id = listen_id;
2626 param->device = work->port->mad_agent->device;
2627 param->port = work->port->port_num;
2628 work->cm_event.private_data = &sidr_req_msg->private_data;
2629}
2630
2631static int cm_sidr_req_handler(struct cm_work *work)
2632{
2633 struct ib_cm_id *cm_id;
2634 struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2635 struct cm_sidr_req_msg *sidr_req_msg;
2636 struct ib_wc *wc;
2637 unsigned long flags;
2638
2639 cm_id = ib_create_cm_id(NULL, NULL);
2640 if (IS_ERR(cm_id))
2641 return PTR_ERR(cm_id);
2642 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2643
2644 /* Record SGID/SLID and request ID for lookup. */
2645 sidr_req_msg = (struct cm_sidr_req_msg *)
2646 work->mad_recv_wc->recv_buf.mad;
2647 wc = work->mad_recv_wc->wc;
2648 cm_id_priv->av.dgid.global.subnet_prefix = wc->slid;
2649 cm_id_priv->av.dgid.global.interface_id = 0;
2650 cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2651 &cm_id_priv->av);
2652 cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2653 cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2654 cm_id_priv->tid = sidr_req_msg->hdr.tid;
2655 atomic_inc(&cm_id_priv->work_count);
2656
2657 spin_lock_irqsave(&cm.lock, flags);
2658 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2659 if (cur_cm_id_priv) {
2660 spin_unlock_irqrestore(&cm.lock, flags);
2661 goto out; /* Duplicate message. */
2662 }
2663 cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
2664 if (!cur_cm_id_priv) {
2665 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2666 spin_unlock_irqrestore(&cm.lock, flags);
2667 /* todo: reply with no match */
2668 goto out; /* No match. */
2669 }
2670 atomic_inc(&cur_cm_id_priv->refcount);
2671 spin_unlock_irqrestore(&cm.lock, flags);
2672
2673 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2674 cm_id_priv->id.context = cur_cm_id_priv->id.context;
2675 cm_id_priv->id.service_id = sidr_req_msg->service_id;
2676 cm_id_priv->id.service_mask = ~0ULL;
2677
2678 cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2679 cm_process_work(cm_id_priv, work);
2680 cm_deref_id(cur_cm_id_priv);
2681 return 0;
2682out:
2683 ib_destroy_cm_id(&cm_id_priv->id);
2684 return -EINVAL;
2685}
2686
2687static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2688 struct cm_id_private *cm_id_priv,
2689 struct ib_cm_sidr_rep_param *param)
2690{
2691 cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2692 cm_id_priv->tid);
2693 sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2694 sidr_rep_msg->status = param->status;
2695 cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2696 sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2697 sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2698
2699 if (param->info && param->info_length)
2700 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2701
2702 if (param->private_data && param->private_data_len)
2703 memcpy(sidr_rep_msg->private_data, param->private_data,
2704 param->private_data_len);
2705}
2706
2707int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2708 struct ib_cm_sidr_rep_param *param)
2709{
2710 struct cm_id_private *cm_id_priv;
2711 struct ib_mad_send_buf *msg;
2712 struct ib_send_wr *bad_send_wr;
2713 unsigned long flags;
2714 int ret;
2715
2716 if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2717 (param->private_data &&
2718 param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2719 return -EINVAL;
2720
2721 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2722 spin_lock_irqsave(&cm_id_priv->lock, flags);
2723 if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2724 ret = -EINVAL;
2725 goto error;
2726 }
2727
2728 ret = cm_alloc_msg(cm_id_priv, &msg);
2729 if (ret)
2730 goto error;
2731
2732 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2733 param);
2734 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2735 &msg->send_wr, &bad_send_wr);
2736 if (ret) {
2737 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2738 cm_free_msg(msg);
2739 return ret;
2740 }
2741 cm_id->state = IB_CM_IDLE;
2742 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2743
2744 spin_lock_irqsave(&cm.lock, flags);
2745 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2746 spin_unlock_irqrestore(&cm.lock, flags);
2747 return 0;
2748
2749error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2750 return ret;
2751}
2752EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2753
2754static void cm_format_sidr_rep_event(struct cm_work *work)
2755{
2756 struct cm_sidr_rep_msg *sidr_rep_msg;
2757 struct ib_cm_sidr_rep_event_param *param;
2758
2759 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2760 work->mad_recv_wc->recv_buf.mad;
2761 param = &work->cm_event.param.sidr_rep_rcvd;
2762 param->status = sidr_rep_msg->status;
2763 param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2764 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2765 param->info = &sidr_rep_msg->info;
2766 param->info_len = sidr_rep_msg->info_length;
2767 work->cm_event.private_data = &sidr_rep_msg->private_data;
2768}
2769
2770static int cm_sidr_rep_handler(struct cm_work *work)
2771{
2772 struct cm_sidr_rep_msg *sidr_rep_msg;
2773 struct cm_id_private *cm_id_priv;
2774 unsigned long flags;
2775
2776 sidr_rep_msg = (struct cm_sidr_rep_msg *)
2777 work->mad_recv_wc->recv_buf.mad;
2778 cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2779 if (!cm_id_priv)
2780 return -EINVAL; /* Unmatched reply. */
2781
2782 spin_lock_irqsave(&cm_id_priv->lock, flags);
2783 if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2784 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2785 goto out;
2786 }
2787 cm_id_priv->id.state = IB_CM_IDLE;
2788 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2789 (unsigned long) cm_id_priv->msg);
2790 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2791
2792 cm_format_sidr_rep_event(work);
2793 cm_process_work(cm_id_priv, work);
2794 return 0;
2795out:
2796 cm_deref_id(cm_id_priv);
2797 return -EINVAL;
2798}
2799
2800static void cm_process_send_error(struct ib_mad_send_buf *msg,
2801 enum ib_wc_status wc_status)
2802{
2803 struct cm_id_private *cm_id_priv;
2804 struct ib_cm_event cm_event;
2805 enum ib_cm_state state;
2806 unsigned long flags;
2807 int ret;
2808
2809 memset(&cm_event, 0, sizeof cm_event);
2810 cm_id_priv = msg->context[0];
2811
2812 /* Discard old sends or ones without a response. */
2813 spin_lock_irqsave(&cm_id_priv->lock, flags);
2814 state = (enum ib_cm_state) (unsigned long) msg->context[1];
2815 if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2816 goto discard;
2817
2818 switch (state) {
2819 case IB_CM_REQ_SENT:
2820 case IB_CM_MRA_REQ_RCVD:
2821 cm_reset_to_idle(cm_id_priv);
2822 cm_event.event = IB_CM_REQ_ERROR;
2823 break;
2824 case IB_CM_REP_SENT:
2825 case IB_CM_MRA_REP_RCVD:
2826 cm_reset_to_idle(cm_id_priv);
2827 cm_event.event = IB_CM_REP_ERROR;
2828 break;
2829 case IB_CM_DREQ_SENT:
2830 cm_enter_timewait(cm_id_priv);
2831 cm_event.event = IB_CM_DREQ_ERROR;
2832 break;
2833 case IB_CM_SIDR_REQ_SENT:
2834 cm_id_priv->id.state = IB_CM_IDLE;
2835 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2836 break;
2837 default:
2838 goto discard;
2839 }
2840 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2841 cm_event.param.send_status = wc_status;
2842
2843 /* No other events can occur on the cm_id at this point. */
2844 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2845 cm_free_msg(msg);
2846 if (ret)
2847 ib_destroy_cm_id(&cm_id_priv->id);
2848 return;
2849discard:
2850 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2851 cm_free_msg(msg);
2852}
2853
2854static void cm_send_handler(struct ib_mad_agent *mad_agent,
2855 struct ib_mad_send_wc *mad_send_wc)
2856{
2857 struct ib_mad_send_buf *msg;
2858
2859 msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
2860
2861 switch (mad_send_wc->status) {
2862 case IB_WC_SUCCESS:
2863 case IB_WC_WR_FLUSH_ERR:
2864 cm_free_msg(msg);
2865 break;
2866 default:
2867 if (msg->context[0] && msg->context[1])
2868 cm_process_send_error(msg, mad_send_wc->status);
2869 else
2870 cm_free_msg(msg);
2871 break;
2872 }
2873}
2874
2875static void cm_work_handler(void *data)
2876{
2877 struct cm_work *work = data;
2878 int ret;
2879
2880 switch (work->cm_event.event) {
2881 case IB_CM_REQ_RECEIVED:
2882 ret = cm_req_handler(work);
2883 break;
2884 case IB_CM_MRA_RECEIVED:
2885 ret = cm_mra_handler(work);
2886 break;
2887 case IB_CM_REJ_RECEIVED:
2888 ret = cm_rej_handler(work);
2889 break;
2890 case IB_CM_REP_RECEIVED:
2891 ret = cm_rep_handler(work);
2892 break;
2893 case IB_CM_RTU_RECEIVED:
2894 ret = cm_rtu_handler(work);
2895 break;
2896 case IB_CM_USER_ESTABLISHED:
2897 ret = cm_establish_handler(work);
2898 break;
2899 case IB_CM_DREQ_RECEIVED:
2900 ret = cm_dreq_handler(work);
2901 break;
2902 case IB_CM_DREP_RECEIVED:
2903 ret = cm_drep_handler(work);
2904 break;
2905 case IB_CM_SIDR_REQ_RECEIVED:
2906 ret = cm_sidr_req_handler(work);
2907 break;
2908 case IB_CM_SIDR_REP_RECEIVED:
2909 ret = cm_sidr_rep_handler(work);
2910 break;
2911 case IB_CM_LAP_RECEIVED:
2912 ret = cm_lap_handler(work);
2913 break;
2914 case IB_CM_APR_RECEIVED:
2915 ret = cm_apr_handler(work);
2916 break;
2917 case IB_CM_TIMEWAIT_EXIT:
2918 ret = cm_timewait_handler(work);
2919 break;
2920 default:
2921 ret = -EINVAL;
2922 break;
2923 }
2924 if (ret)
2925 cm_free_work(work);
2926}
2927
2928int ib_cm_establish(struct ib_cm_id *cm_id)
2929{
2930 struct cm_id_private *cm_id_priv;
2931 struct cm_work *work;
2932 unsigned long flags;
2933 int ret = 0;
2934
2935 work = kmalloc(sizeof *work, GFP_ATOMIC);
2936 if (!work)
2937 return -ENOMEM;
2938
2939 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2940 spin_lock_irqsave(&cm_id_priv->lock, flags);
2941 switch (cm_id->state)
2942 {
2943 case IB_CM_REP_SENT:
2944 case IB_CM_MRA_REP_RCVD:
2945 cm_id->state = IB_CM_ESTABLISHED;
2946 break;
2947 case IB_CM_ESTABLISHED:
2948 ret = -EISCONN;
2949 break;
2950 default:
2951 ret = -EINVAL;
2952 break;
2953 }
2954 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2955
2956 if (ret) {
2957 kfree(work);
2958 goto out;
2959 }
2960
2961 /*
2962 * The CM worker thread may try to destroy the cm_id before it
2963 * can execute this work item. To prevent potential deadlock,
2964 * we need to find the cm_id once we're in the context of the
2965 * worker thread, rather than holding a reference on it.
2966 */
2967 INIT_WORK(&work->work, cm_work_handler, work);
2968 work->local_id = cm_id->local_id;
2969 work->remote_id = cm_id->remote_id;
2970 work->mad_recv_wc = NULL;
2971 work->cm_event.event = IB_CM_USER_ESTABLISHED;
2972 queue_work(cm.wq, &work->work);
2973out:
2974 return ret;
2975}
2976EXPORT_SYMBOL(ib_cm_establish);
2977
2978static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2979 struct ib_mad_recv_wc *mad_recv_wc)
2980{
2981 struct cm_work *work;
2982 enum ib_cm_event_type event;
2983 int paths = 0;
2984
2985 switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2986 case CM_REQ_ATTR_ID:
2987 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2988 alt_local_lid != 0);
2989 event = IB_CM_REQ_RECEIVED;
2990 break;
2991 case CM_MRA_ATTR_ID:
2992 event = IB_CM_MRA_RECEIVED;
2993 break;
2994 case CM_REJ_ATTR_ID:
2995 event = IB_CM_REJ_RECEIVED;
2996 break;
2997 case CM_REP_ATTR_ID:
2998 event = IB_CM_REP_RECEIVED;
2999 break;
3000 case CM_RTU_ATTR_ID:
3001 event = IB_CM_RTU_RECEIVED;
3002 break;
3003 case CM_DREQ_ATTR_ID:
3004 event = IB_CM_DREQ_RECEIVED;
3005 break;
3006 case CM_DREP_ATTR_ID:
3007 event = IB_CM_DREP_RECEIVED;
3008 break;
3009 case CM_SIDR_REQ_ATTR_ID:
3010 event = IB_CM_SIDR_REQ_RECEIVED;
3011 break;
3012 case CM_SIDR_REP_ATTR_ID:
3013 event = IB_CM_SIDR_REP_RECEIVED;
3014 break;
3015 case CM_LAP_ATTR_ID:
3016 paths = 1;
3017 event = IB_CM_LAP_RECEIVED;
3018 break;
3019 case CM_APR_ATTR_ID:
3020 event = IB_CM_APR_RECEIVED;
3021 break;
3022 default:
3023 ib_free_recv_mad(mad_recv_wc);
3024 return;
3025 }
3026
3027 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3028 GFP_KERNEL);
3029 if (!work) {
3030 ib_free_recv_mad(mad_recv_wc);
3031 return;
3032 }
3033
3034 INIT_WORK(&work->work, cm_work_handler, work);
3035 work->cm_event.event = event;
3036 work->mad_recv_wc = mad_recv_wc;
3037 work->port = (struct cm_port *)mad_agent->context;
3038 queue_work(cm.wq, &work->work);
3039}
3040
3041static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3042 struct ib_qp_attr *qp_attr,
3043 int *qp_attr_mask)
3044{
3045 unsigned long flags;
3046 int ret;
3047
3048 spin_lock_irqsave(&cm_id_priv->lock, flags);
3049 switch (cm_id_priv->id.state) {
3050 case IB_CM_REQ_SENT:
3051 case IB_CM_MRA_REQ_RCVD:
3052 case IB_CM_REQ_RCVD:
3053 case IB_CM_MRA_REQ_SENT:
3054 case IB_CM_REP_RCVD:
3055 case IB_CM_MRA_REP_SENT:
3056 case IB_CM_REP_SENT:
3057 case IB_CM_MRA_REP_RCVD:
3058 case IB_CM_ESTABLISHED:
3059 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3060 IB_QP_PKEY_INDEX | IB_QP_PORT;
3061 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
3062 if (cm_id_priv->responder_resources)
3063 qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
3064 IB_ACCESS_REMOTE_READ;
3065 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3066 qp_attr->port_num = cm_id_priv->av.port->port_num;
3067 ret = 0;
3068 break;
3069 default:
3070 ret = -EINVAL;
3071 break;
3072 }
3073 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3074 return ret;
3075}
3076
3077static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3078 struct ib_qp_attr *qp_attr,
3079 int *qp_attr_mask)
3080{
3081 unsigned long flags;
3082 int ret;
3083
3084 spin_lock_irqsave(&cm_id_priv->lock, flags);
3085 switch (cm_id_priv->id.state) {
3086 case IB_CM_REQ_RCVD:
3087 case IB_CM_MRA_REQ_SENT:
3088 case IB_CM_REP_RCVD:
3089 case IB_CM_MRA_REP_SENT:
3090 case IB_CM_REP_SENT:
3091 case IB_CM_MRA_REP_RCVD:
3092 case IB_CM_ESTABLISHED:
3093 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3094 IB_QP_DEST_QPN | IB_QP_RQ_PSN |
3095 IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
3096 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3097 qp_attr->path_mtu = cm_id_priv->path_mtu;
3098 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3099 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3100 qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
3101 qp_attr->min_rnr_timer = 0;
3102 if (cm_id_priv->alt_av.ah_attr.dlid) {
3103 *qp_attr_mask |= IB_QP_ALT_PATH;
3104 qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3105 }
3106 ret = 0;
3107 break;
3108 default:
3109 ret = -EINVAL;
3110 break;
3111 }
3112 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3113 return ret;
3114}
3115
3116static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3117 struct ib_qp_attr *qp_attr,
3118 int *qp_attr_mask)
3119{
3120 unsigned long flags;
3121 int ret;
3122
3123 spin_lock_irqsave(&cm_id_priv->lock, flags);
3124 switch (cm_id_priv->id.state) {
3125 case IB_CM_REP_RCVD:
3126 case IB_CM_MRA_REP_SENT:
3127 case IB_CM_REP_SENT:
3128 case IB_CM_MRA_REP_RCVD:
3129 case IB_CM_ESTABLISHED:
3130 *qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3131 IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
3132 IB_QP_MAX_QP_RD_ATOMIC;
3133 qp_attr->timeout = cm_id_priv->local_ack_timeout;
3134 qp_attr->retry_cnt = cm_id_priv->retry_count;
3135 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3136 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3137 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3138 if (cm_id_priv->alt_av.ah_attr.dlid) {
3139 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3140 qp_attr->path_mig_state = IB_MIG_REARM;
3141 }
3142 ret = 0;
3143 break;
3144 default:
3145 ret = -EINVAL;
3146 break;
3147 }
3148 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3149 return ret;
3150}
3151
3152int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3153 struct ib_qp_attr *qp_attr,
3154 int *qp_attr_mask)
3155{
3156 struct cm_id_private *cm_id_priv;
3157 int ret;
3158
3159 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3160 switch (qp_attr->qp_state) {
3161 case IB_QPS_INIT:
3162 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3163 break;
3164 case IB_QPS_RTR:
3165 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3166 break;
3167 case IB_QPS_RTS:
3168 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3169 break;
3170 default:
3171 ret = -EINVAL;
3172 break;
3173 }
3174 return ret;
3175}
3176EXPORT_SYMBOL(ib_cm_init_qp_attr);
3177
3178static u64 cm_get_ca_guid(struct ib_device *device)
3179{
3180 struct ib_device_attr *device_attr;
3181 u64 guid;
3182 int ret;
3183
3184 device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3185 if (!device_attr)
3186 return 0;
3187
3188 ret = ib_query_device(device, device_attr);
3189 guid = ret ? 0 : device_attr->node_guid;
3190 kfree(device_attr);
3191 return guid;
3192}
3193
3194static void cm_add_one(struct ib_device *device)
3195{
3196 struct cm_device *cm_dev;
3197 struct cm_port *port;
3198 struct ib_mad_reg_req reg_req = {
3199 .mgmt_class = IB_MGMT_CLASS_CM,
3200 .mgmt_class_version = IB_CM_CLASS_VERSION
3201 };
3202 struct ib_port_modify port_modify = {
3203 .set_port_cap_mask = IB_PORT_CM_SUP
3204 };
3205 unsigned long flags;
3206 int ret;
3207 u8 i;
3208
3209 cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3210 device->phys_port_cnt, GFP_KERNEL);
3211 if (!cm_dev)
3212 return;
3213
3214 cm_dev->device = device;
3215 cm_dev->ca_guid = cm_get_ca_guid(device);
3216 if (!cm_dev->ca_guid)
3217 goto error1;
3218
3219 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3220 for (i = 1; i <= device->phys_port_cnt; i++) {
3221 port = &cm_dev->port[i-1];
3222 port->cm_dev = cm_dev;
3223 port->port_num = i;
3224 port->mad_agent = ib_register_mad_agent(device, i,
3225 IB_QPT_GSI,
3226 &reg_req,
3227 0,
3228 cm_send_handler,
3229 cm_recv_handler,
3230 port);
3231 if (IS_ERR(port->mad_agent))
3232 goto error2;
3233
3234 ret = ib_modify_port(device, i, 0, &port_modify);
3235 if (ret)
3236 goto error3;
3237 }
3238 ib_set_client_data(device, &cm_client, cm_dev);
3239
3240 write_lock_irqsave(&cm.device_lock, flags);
3241 list_add_tail(&cm_dev->list, &cm.device_list);
3242 write_unlock_irqrestore(&cm.device_lock, flags);
3243 return;
3244
3245error3:
3246 ib_unregister_mad_agent(port->mad_agent);
3247error2:
3248 port_modify.set_port_cap_mask = 0;
3249 port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3250 while (--i) {
3251 port = &cm_dev->port[i-1];
3252 ib_modify_port(device, port->port_num, 0, &port_modify);
3253 ib_unregister_mad_agent(port->mad_agent);
3254 }
3255error1:
3256 kfree(cm_dev);
3257}
3258
3259static void cm_remove_one(struct ib_device *device)
3260{
3261 struct cm_device *cm_dev;
3262 struct cm_port *port;
3263 struct ib_port_modify port_modify = {
3264 .clr_port_cap_mask = IB_PORT_CM_SUP
3265 };
3266 unsigned long flags;
3267 int i;
3268
3269 cm_dev = ib_get_client_data(device, &cm_client);
3270 if (!cm_dev)
3271 return;
3272
3273 write_lock_irqsave(&cm.device_lock, flags);
3274 list_del(&cm_dev->list);
3275 write_unlock_irqrestore(&cm.device_lock, flags);
3276
3277 for (i = 1; i <= device->phys_port_cnt; i++) {
3278 port = &cm_dev->port[i-1];
3279 ib_modify_port(device, port->port_num, 0, &port_modify);
3280 ib_unregister_mad_agent(port->mad_agent);
3281 }
3282 kfree(cm_dev);
3283}
3284
3285static int __init ib_cm_init(void)
3286{
3287 int ret;
3288
3289 memset(&cm, 0, sizeof cm);
3290 INIT_LIST_HEAD(&cm.device_list);
3291 rwlock_init(&cm.device_lock);
3292 spin_lock_init(&cm.lock);
3293 cm.listen_service_table = RB_ROOT;
3294 cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3295 cm.remote_id_table = RB_ROOT;
3296 cm.remote_qp_table = RB_ROOT;
3297 cm.remote_sidr_table = RB_ROOT;
3298 idr_init(&cm.local_id_table);
3299 idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3300
3301 cm.wq = create_workqueue("ib_cm");
3302 if (!cm.wq)
3303 return -ENOMEM;
3304
3305 ret = ib_register_client(&cm_client);
3306 if (ret)
3307 goto error;
3308
3309 return 0;
3310error:
3311 destroy_workqueue(cm.wq);
3312 return ret;
3313}
3314
3315static void __exit ib_cm_cleanup(void)
3316{
3317 flush_workqueue(cm.wq);
3318 destroy_workqueue(cm.wq);
3319 ib_unregister_client(&cm_client);
3320}
3321
3322module_init(ib_cm_init);
3323module_exit(ib_cm_cleanup);
3324
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
new file mode 100644
index 000000000000..15a309a77b2b
--- /dev/null
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -0,0 +1,819 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING the madirectory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use source and binary forms, with or
13 * withmodification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retathe above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHWARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS THE
32 * SOFTWARE.
33 */
34#if !defined(CM_MSGS_H)
35#define CM_MSGS_H
36
37#include <ib_mad.h>
38
39/*
40 * Parameters to routines below should be in network-byte order, and values
41 * are returned in network-byte order.
42 */
43
44#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
45
46enum cm_msg_attr_id {
47 CM_REQ_ATTR_ID = __constant_htons(0x0010),
48 CM_MRA_ATTR_ID = __constant_htons(0x0011),
49 CM_REJ_ATTR_ID = __constant_htons(0x0012),
50 CM_REP_ATTR_ID = __constant_htons(0x0013),
51 CM_RTU_ATTR_ID = __constant_htons(0x0014),
52 CM_DREQ_ATTR_ID = __constant_htons(0x0015),
53 CM_DREP_ATTR_ID = __constant_htons(0x0016),
54 CM_SIDR_REQ_ATTR_ID = __constant_htons(0x0017),
55 CM_SIDR_REP_ATTR_ID = __constant_htons(0x0018),
56 CM_LAP_ATTR_ID = __constant_htons(0x0019),
57 CM_APR_ATTR_ID = __constant_htons(0x001A)
58};
59
60enum cm_msg_sequence {
61 CM_MSG_SEQUENCE_REQ,
62 CM_MSG_SEQUENCE_LAP,
63 CM_MSG_SEQUENCE_DREQ,
64 CM_MSG_SEQUENCE_SIDR
65};
66
67struct cm_req_msg {
68 struct ib_mad_hdr hdr;
69
70 u32 local_comm_id;
71 u32 rsvd4;
72 u64 service_id;
73 u64 local_ca_guid;
74 u32 rsvd24;
75 u32 local_qkey;
76 /* local QPN:24, responder resources:8 */
77 u32 offset32;
78 /* local EECN:24, initiator depth:8 */
79 u32 offset36;
80 /*
81 * remote EECN:24, remote CM response timeout:5,
82 * transport service type:2, end-to-end flow control:1
83 */
84 u32 offset40;
85 /* starting PSN:24, local CM response timeout:5, retry count:3 */
86 u32 offset44;
87 u16 pkey;
88 /* path MTU:4, RDC exists:1, RNR retry count:3. */
89 u8 offset50;
90 /* max CM Retries:4, SRQ:1, rsvd:3 */
91 u8 offset51;
92
93 u16 primary_local_lid;
94 u16 primary_remote_lid;
95 union ib_gid primary_local_gid;
96 union ib_gid primary_remote_gid;
97 /* flow label:20, rsvd:6, packet rate:6 */
98 u32 primary_offset88;
99 u8 primary_traffic_class;
100 u8 primary_hop_limit;
101 /* SL:4, subnet local:1, rsvd:3 */
102 u8 primary_offset94;
103 /* local ACK timeout:5, rsvd:3 */
104 u8 primary_offset95;
105
106 u16 alt_local_lid;
107 u16 alt_remote_lid;
108 union ib_gid alt_local_gid;
109 union ib_gid alt_remote_gid;
110 /* flow label:20, rsvd:6, packet rate:6 */
111 u32 alt_offset132;
112 u8 alt_traffic_class;
113 u8 alt_hop_limit;
114 /* SL:4, subnet local:1, rsvd:3 */
115 u8 alt_offset138;
116 /* local ACK timeout:5, rsvd:3 */
117 u8 alt_offset139;
118
119 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
120
121} __attribute__ ((packed));
122
123static inline u32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
124{
125 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
126}
127
128static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, u32 qpn)
129{
130 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
131 (be32_to_cpu(req_msg->offset32) &
132 0x000000FF));
133}
134
135static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
136{
137 return (u8) be32_to_cpu(req_msg->offset32);
138}
139
140static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
141{
142 req_msg->offset32 = cpu_to_be32(resp_res |
143 (be32_to_cpu(req_msg->offset32) &
144 0xFFFFFF00));
145}
146
147static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
148{
149 return (u8) be32_to_cpu(req_msg->offset36);
150}
151
152static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
153 u8 init_depth)
154{
155 req_msg->offset36 = cpu_to_be32(init_depth |
156 (be32_to_cpu(req_msg->offset36) &
157 0xFFFFFF00));
158}
159
160static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
161{
162 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
163}
164
165static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
166 u8 resp_timeout)
167{
168 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
169 (be32_to_cpu(req_msg->offset40) &
170 0xFFFFFF07));
171}
172
173static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
174{
175 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
176 switch(transport_type) {
177 case 0: return IB_QPT_RC;
178 case 1: return IB_QPT_UC;
179 default: return 0;
180 }
181}
182
183static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
184 enum ib_qp_type qp_type)
185{
186 switch(qp_type) {
187 case IB_QPT_UC:
188 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
189 req_msg->offset40) &
190 0xFFFFFFF9) | 0x2);
191 default:
192 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
193 req_msg->offset40) &
194 0xFFFFFFF9);
195 }
196}
197
198static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
199{
200 return be32_to_cpu(req_msg->offset40) & 0x1;
201}
202
203static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
204 u8 flow_ctrl)
205{
206 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
207 (be32_to_cpu(req_msg->offset40) &
208 0xFFFFFFFE));
209}
210
211static inline u32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
212{
213 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
214}
215
216static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
217 u32 starting_psn)
218{
219 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
220 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
221}
222
223static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
224{
225 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
226}
227
228static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
229 u8 resp_timeout)
230{
231 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
232 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
233}
234
235static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
236{
237 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
238}
239
240static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
241 u8 retry_count)
242{
243 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
244 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
245}
246
247static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
248{
249 return req_msg->offset50 >> 4;
250}
251
252static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
253{
254 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
255}
256
257static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
258{
259 return req_msg->offset50 & 0x7;
260}
261
262static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
263 u8 rnr_retry_count)
264{
265 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
266 (rnr_retry_count & 0x7));
267}
268
269static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
270{
271 return req_msg->offset51 >> 4;
272}
273
274static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
275 u8 retries)
276{
277 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
278}
279
280static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
281{
282 return (req_msg->offset51 & 0x8) >> 3;
283}
284
285static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
286{
287 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
288 ((srq & 0x1) << 3));
289}
290
291static inline u32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
292{
293 return cpu_to_be32((be32_to_cpu(req_msg->primary_offset88) >> 12));
294}
295
296static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
297 u32 flow_label)
298{
299 req_msg->primary_offset88 = cpu_to_be32(
300 (be32_to_cpu(req_msg->primary_offset88) &
301 0x00000FFF) |
302 (be32_to_cpu(flow_label) << 12));
303}
304
305static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
306{
307 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
308}
309
310static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
311 u8 rate)
312{
313 req_msg->primary_offset88 = cpu_to_be32(
314 (be32_to_cpu(req_msg->primary_offset88) &
315 0xFFFFFFC0) | (rate & 0x3F));
316}
317
318static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
319{
320 return (u8) (req_msg->primary_offset94 >> 4);
321}
322
323static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
324{
325 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
326 (sl << 4));
327}
328
329static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
330{
331 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
332}
333
334static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
335 u8 subnet_local)
336{
337 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
338 ((subnet_local & 0x1) << 3));
339}
340
341static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
342{
343 return (u8) (req_msg->primary_offset95 >> 3);
344}
345
346static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
347 u8 local_ack_timeout)
348{
349 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
350 (local_ack_timeout << 3));
351}
352
353static inline u32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
354{
355 return cpu_to_be32((be32_to_cpu(req_msg->alt_offset132) >> 12));
356}
357
358static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
359 u32 flow_label)
360{
361 req_msg->alt_offset132 = cpu_to_be32(
362 (be32_to_cpu(req_msg->alt_offset132) &
363 0x00000FFF) |
364 (be32_to_cpu(flow_label) << 12));
365}
366
367static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
368{
369 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
370}
371
372static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
373 u8 rate)
374{
375 req_msg->alt_offset132 = cpu_to_be32(
376 (be32_to_cpu(req_msg->alt_offset132) &
377 0xFFFFFFC0) | (rate & 0x3F));
378}
379
380static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
381{
382 return (u8) (req_msg->alt_offset138 >> 4);
383}
384
385static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
386{
387 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
388 (sl << 4));
389}
390
391static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
392{
393 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
394}
395
396static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
397 u8 subnet_local)
398{
399 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
400 ((subnet_local & 0x1) << 3));
401}
402
403static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
404{
405 return (u8) (req_msg->alt_offset139 >> 3);
406}
407
408static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
409 u8 local_ack_timeout)
410{
411 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
412 (local_ack_timeout << 3));
413}
414
415/* Message REJected or MRAed */
416enum cm_msg_response {
417 CM_MSG_RESPONSE_REQ = 0x0,
418 CM_MSG_RESPONSE_REP = 0x1,
419 CM_MSG_RESPONSE_OTHER = 0x2
420};
421
422 struct cm_mra_msg {
423 struct ib_mad_hdr hdr;
424
425 u32 local_comm_id;
426 u32 remote_comm_id;
427 /* message MRAed:2, rsvd:6 */
428 u8 offset8;
429 /* service timeout:5, rsvd:3 */
430 u8 offset9;
431
432 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
433
434} __attribute__ ((packed));
435
436static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
437{
438 return (u8) (mra_msg->offset8 >> 6);
439}
440
441static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
442{
443 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
444}
445
446static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
447{
448 return (u8) (mra_msg->offset9 >> 3);
449}
450
451static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
452 u8 service_timeout)
453{
454 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
455 (service_timeout << 3));
456}
457
458struct cm_rej_msg {
459 struct ib_mad_hdr hdr;
460
461 u32 local_comm_id;
462 u32 remote_comm_id;
463 /* message REJected:2, rsvd:6 */
464 u8 offset8;
465 /* reject info length:7, rsvd:1. */
466 u8 offset9;
467 u16 reason;
468 u8 ari[IB_CM_REJ_ARI_LENGTH];
469
470 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
471
472} __attribute__ ((packed));
473
474static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
475{
476 return (u8) (rej_msg->offset8 >> 6);
477}
478
479static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
480{
481 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
482}
483
484static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
485{
486 return (u8) (rej_msg->offset9 >> 1);
487}
488
489static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
490 u8 len)
491{
492 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
493}
494
495struct cm_rep_msg {
496 struct ib_mad_hdr hdr;
497
498 u32 local_comm_id;
499 u32 remote_comm_id;
500 u32 local_qkey;
501 /* local QPN:24, rsvd:8 */
502 u32 offset12;
503 /* local EECN:24, rsvd:8 */
504 u32 offset16;
505 /* starting PSN:24 rsvd:8 */
506 u32 offset20;
507 u8 resp_resources;
508 u8 initiator_depth;
509 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
510 u8 offset26;
511 /* RNR retry count:3, SRQ:1, rsvd:5 */
512 u8 offset27;
513 u64 local_ca_guid;
514
515 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
516
517} __attribute__ ((packed));
518
519static inline u32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
520{
521 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
522}
523
524static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, u32 qpn)
525{
526 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
527 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
528}
529
530static inline u32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
531{
532 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
533}
534
535static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
536 u32 starting_psn)
537{
538 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
539 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
540}
541
542static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
543{
544 return (u8) (rep_msg->offset26 >> 3);
545}
546
547static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
548 u8 target_ack_delay)
549{
550 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
551 (target_ack_delay << 3));
552}
553
554static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
555{
556 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
557}
558
559static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
560{
561 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
562 ((failover & 0x3) << 1));
563}
564
565static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
566{
567 return (u8) (rep_msg->offset26 & 0x01);
568}
569
570static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
571 u8 flow_ctrl)
572{
573 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
574 (flow_ctrl & 0x1));
575}
576
577static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
578{
579 return (u8) (rep_msg->offset27 >> 5);
580}
581
582static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
583 u8 rnr_retry_count)
584{
585 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
586 (rnr_retry_count << 5));
587}
588
589static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
590{
591 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
592}
593
594static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
595{
596 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
597 ((srq & 0x1) << 4));
598}
599
600struct cm_rtu_msg {
601 struct ib_mad_hdr hdr;
602
603 u32 local_comm_id;
604 u32 remote_comm_id;
605
606 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
607
608} __attribute__ ((packed));
609
610struct cm_dreq_msg {
611 struct ib_mad_hdr hdr;
612
613 u32 local_comm_id;
614 u32 remote_comm_id;
615 /* remote QPN/EECN:24, rsvd:8 */
616 u32 offset8;
617
618 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
619
620} __attribute__ ((packed));
621
622static inline u32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
623{
624 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
625}
626
627static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, u32 qpn)
628{
629 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
630 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
631}
632
633struct cm_drep_msg {
634 struct ib_mad_hdr hdr;
635
636 u32 local_comm_id;
637 u32 remote_comm_id;
638
639 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
640
641} __attribute__ ((packed));
642
643struct cm_lap_msg {
644 struct ib_mad_hdr hdr;
645
646 u32 local_comm_id;
647 u32 remote_comm_id;
648
649 u32 rsvd8;
650 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
651 u32 offset12;
652 u32 rsvd16;
653
654 u16 alt_local_lid;
655 u16 alt_remote_lid;
656 union ib_gid alt_local_gid;
657 union ib_gid alt_remote_gid;
658 /* flow label:20, rsvd:4, traffic class:8 */
659 u32 offset56;
660 u8 alt_hop_limit;
661 /* rsvd:2, packet rate:6 */
662 uint8_t offset61;
663 /* SL:4, subnet local:1, rsvd:3 */
664 uint8_t offset62;
665 /* local ACK timeout:5, rsvd:3 */
666 uint8_t offset63;
667
668 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
669} __attribute__ ((packed));
670
671static inline u32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
672{
673 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
674}
675
676static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, u32 qpn)
677{
678 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
679 (be32_to_cpu(lap_msg->offset12) &
680 0x000000FF));
681}
682
683static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
684{
685 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
686}
687
688static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
689 u8 resp_timeout)
690{
691 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
692 (be32_to_cpu(lap_msg->offset12) &
693 0xFFFFFF07));
694}
695
696static inline u32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
697{
698 return be32_to_cpu(lap_msg->offset56) >> 12;
699}
700
701static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
702 u32 flow_label)
703{
704 lap_msg->offset56 = cpu_to_be32((flow_label << 12) |
705 (be32_to_cpu(lap_msg->offset56) &
706 0x00000FFF));
707}
708
709static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
710{
711 return (u8) be32_to_cpu(lap_msg->offset56);
712}
713
714static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
715 u8 traffic_class)
716{
717 lap_msg->offset56 = cpu_to_be32(traffic_class |
718 (be32_to_cpu(lap_msg->offset56) &
719 0xFFFFFF00));
720}
721
722static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
723{
724 return lap_msg->offset61 & 0x3F;
725}
726
727static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
728 u8 packet_rate)
729{
730 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
731}
732
733static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
734{
735 return lap_msg->offset62 >> 4;
736}
737
738static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
739{
740 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
741}
742
743static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
744{
745 return (lap_msg->offset62 >> 3) & 0x1;
746}
747
748static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
749 u8 subnet_local)
750{
751 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
752 (lap_msg->offset61 & 0xF7);
753}
754static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
755{
756 return lap_msg->offset63 >> 3;
757}
758
759static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
760 u8 local_ack_timeout)
761{
762 lap_msg->offset63 = (local_ack_timeout << 3) |
763 (lap_msg->offset63 & 0x07);
764}
765
766struct cm_apr_msg {
767 struct ib_mad_hdr hdr;
768
769 u32 local_comm_id;
770 u32 remote_comm_id;
771
772 u8 info_length;
773 u8 ap_status;
774 u8 info[IB_CM_APR_INFO_LENGTH];
775
776 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
777} __attribute__ ((packed));
778
779struct cm_sidr_req_msg {
780 struct ib_mad_hdr hdr;
781
782 u32 request_id;
783 u16 pkey;
784 u16 rsvd;
785 u64 service_id;
786
787 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
788} __attribute__ ((packed));
789
790struct cm_sidr_rep_msg {
791 struct ib_mad_hdr hdr;
792
793 u32 request_id;
794 u8 status;
795 u8 info_length;
796 u16 rsvd;
797 /* QPN:24, rsvd:8 */
798 u32 offset8;
799 u64 service_id;
800 u32 qkey;
801 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
802
803 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
804} __attribute__ ((packed));
805
806static inline u32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
807{
808 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
809}
810
811static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
812 u32 qpn)
813{
814 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
815 (be32_to_cpu(sidr_rep_msg->offset8) &
816 0x000000FF));
817}
818
819#endif /* CM_MSGS_H */