diff options
-rw-r--r-- | drivers/infiniband/core/cm.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 47 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_priv.h | 5 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 20 | ||||
-rw-r--r-- | drivers/infiniband/core/ucm.c | 12 |
5 files changed, 52 insertions, 44 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 7cfedb8d9bcd..86fee43502cd 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -34,6 +34,8 @@ | |||
34 | * | 34 | * |
35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ | 35 | * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $ |
36 | */ | 36 | */ |
37 | |||
38 | #include <linux/completion.h> | ||
37 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
38 | #include <linux/err.h> | 40 | #include <linux/err.h> |
39 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
@@ -122,7 +124,7 @@ struct cm_id_private { | |||
122 | struct rb_node service_node; | 124 | struct rb_node service_node; |
123 | struct rb_node sidr_id_node; | 125 | struct rb_node sidr_id_node; |
124 | spinlock_t lock; /* Do not acquire inside cm.lock */ | 126 | spinlock_t lock; /* Do not acquire inside cm.lock */ |
125 | wait_queue_head_t wait; | 127 | struct completion comp; |
126 | atomic_t refcount; | 128 | atomic_t refcount; |
127 | 129 | ||
128 | struct ib_mad_send_buf *msg; | 130 | struct ib_mad_send_buf *msg; |
@@ -159,7 +161,7 @@ static void cm_work_handler(void *data); | |||
159 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) | 161 | static inline void cm_deref_id(struct cm_id_private *cm_id_priv) |
160 | { | 162 | { |
161 | if (atomic_dec_and_test(&cm_id_priv->refcount)) | 163 | if (atomic_dec_and_test(&cm_id_priv->refcount)) |
162 | wake_up(&cm_id_priv->wait); | 164 | complete(&cm_id_priv->comp); |
163 | } | 165 | } |
164 | 166 | ||
165 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | 167 | static int cm_alloc_msg(struct cm_id_private *cm_id_priv, |
@@ -559,7 +561,7 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
559 | goto error; | 561 | goto error; |
560 | 562 | ||
561 | spin_lock_init(&cm_id_priv->lock); | 563 | spin_lock_init(&cm_id_priv->lock); |
562 | init_waitqueue_head(&cm_id_priv->wait); | 564 | init_completion(&cm_id_priv->comp); |
563 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 565 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
564 | atomic_set(&cm_id_priv->work_count, -1); | 566 | atomic_set(&cm_id_priv->work_count, -1); |
565 | atomic_set(&cm_id_priv->refcount, 1); | 567 | atomic_set(&cm_id_priv->refcount, 1); |
@@ -724,8 +726,8 @@ retest: | |||
724 | } | 726 | } |
725 | 727 | ||
726 | cm_free_id(cm_id->local_id); | 728 | cm_free_id(cm_id->local_id); |
727 | atomic_dec(&cm_id_priv->refcount); | 729 | cm_deref_id(cm_id_priv); |
728 | wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount)); | 730 | wait_for_completion(&cm_id_priv->comp); |
729 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) | 731 | while ((work = cm_dequeue_work(cm_id_priv)) != NULL) |
730 | cm_free_work(work); | 732 | cm_free_work(work); |
731 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) | 733 | if (cm_id_priv->private_data && cm_id_priv->private_data_len) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 469b6923a2e2..5ad41a64314c 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
352 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 352 | INIT_WORK(&mad_agent_priv->local_work, local_completions, |
353 | mad_agent_priv); | 353 | mad_agent_priv); |
354 | atomic_set(&mad_agent_priv->refcount, 1); | 354 | atomic_set(&mad_agent_priv->refcount, 1); |
355 | init_waitqueue_head(&mad_agent_priv->wait); | 355 | init_completion(&mad_agent_priv->comp); |
356 | 356 | ||
357 | return &mad_agent_priv->agent; | 357 | return &mad_agent_priv->agent; |
358 | 358 | ||
@@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | |||
467 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | 467 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; |
468 | mad_snoop_priv->agent.port_num = port_num; | 468 | mad_snoop_priv->agent.port_num = port_num; |
469 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | 469 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; |
470 | init_waitqueue_head(&mad_snoop_priv->wait); | 470 | init_completion(&mad_snoop_priv->comp); |
471 | mad_snoop_priv->snoop_index = register_snoop_agent( | 471 | mad_snoop_priv->snoop_index = register_snoop_agent( |
472 | &port_priv->qp_info[qpn], | 472 | &port_priv->qp_info[qpn], |
473 | mad_snoop_priv); | 473 | mad_snoop_priv); |
@@ -486,6 +486,18 @@ error1: | |||
486 | } | 486 | } |
487 | EXPORT_SYMBOL(ib_register_mad_snoop); | 487 | EXPORT_SYMBOL(ib_register_mad_snoop); |
488 | 488 | ||
489 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | ||
490 | { | ||
491 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
492 | complete(&mad_agent_priv->comp); | ||
493 | } | ||
494 | |||
495 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) | ||
496 | { | ||
497 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | ||
498 | complete(&mad_snoop_priv->comp); | ||
499 | } | ||
500 | |||
489 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | 501 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
490 | { | 502 | { |
491 | struct ib_mad_port_private *port_priv; | 503 | struct ib_mad_port_private *port_priv; |
@@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
509 | flush_workqueue(port_priv->wq); | 521 | flush_workqueue(port_priv->wq); |
510 | ib_cancel_rmpp_recvs(mad_agent_priv); | 522 | ib_cancel_rmpp_recvs(mad_agent_priv); |
511 | 523 | ||
512 | atomic_dec(&mad_agent_priv->refcount); | 524 | deref_mad_agent(mad_agent_priv); |
513 | wait_event(mad_agent_priv->wait, | 525 | wait_for_completion(&mad_agent_priv->comp); |
514 | !atomic_read(&mad_agent_priv->refcount)); | ||
515 | 526 | ||
516 | kfree(mad_agent_priv->reg_req); | 527 | kfree(mad_agent_priv->reg_req); |
517 | ib_dereg_mr(mad_agent_priv->agent.mr); | 528 | ib_dereg_mr(mad_agent_priv->agent.mr); |
@@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | |||
529 | atomic_dec(&qp_info->snoop_count); | 540 | atomic_dec(&qp_info->snoop_count); |
530 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 541 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
531 | 542 | ||
532 | atomic_dec(&mad_snoop_priv->refcount); | 543 | deref_snoop_agent(mad_snoop_priv); |
533 | wait_event(mad_snoop_priv->wait, | 544 | wait_for_completion(&mad_snoop_priv->comp); |
534 | !atomic_read(&mad_snoop_priv->refcount)); | ||
535 | 545 | ||
536 | kfree(mad_snoop_priv); | 546 | kfree(mad_snoop_priv); |
537 | } | 547 | } |
@@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, | |||
600 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 610 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
601 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | 611 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
602 | send_buf, mad_send_wc); | 612 | send_buf, mad_send_wc); |
603 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 613 | deref_snoop_agent(mad_snoop_priv); |
604 | wake_up(&mad_snoop_priv->wait); | ||
605 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 614 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
606 | } | 615 | } |
607 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 616 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, | |||
626 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 635 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
627 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | 636 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, |
628 | mad_recv_wc); | 637 | mad_recv_wc); |
629 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 638 | deref_snoop_agent(mad_snoop_priv); |
630 | wake_up(&mad_snoop_priv->wait); | ||
631 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 639 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
632 | } | 640 | } |
633 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 641 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | |||
968 | 976 | ||
969 | free_send_rmpp_list(mad_send_wr); | 977 | free_send_rmpp_list(mad_send_wr); |
970 | kfree(send_buf->mad); | 978 | kfree(send_buf->mad); |
971 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 979 | deref_mad_agent(mad_agent_priv); |
972 | wake_up(&mad_agent_priv->wait); | ||
973 | } | 980 | } |
974 | EXPORT_SYMBOL(ib_free_send_mad); | 981 | EXPORT_SYMBOL(ib_free_send_mad); |
975 | 982 | ||
@@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1757 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | 1764 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1758 | mad_recv_wc); | 1765 | mad_recv_wc); |
1759 | if (!mad_recv_wc) { | 1766 | if (!mad_recv_wc) { |
1760 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1767 | deref_mad_agent(mad_agent_priv); |
1761 | wake_up(&mad_agent_priv->wait); | ||
1762 | return; | 1768 | return; |
1763 | } | 1769 | } |
1764 | } | 1770 | } |
@@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1770 | if (!mad_send_wr) { | 1776 | if (!mad_send_wr) { |
1771 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1777 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1772 | ib_free_recv_mad(mad_recv_wc); | 1778 | ib_free_recv_mad(mad_recv_wc); |
1773 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1779 | deref_mad_agent(mad_agent_priv); |
1774 | wake_up(&mad_agent_priv->wait); | ||
1775 | return; | 1780 | return; |
1776 | } | 1781 | } |
1777 | ib_mark_mad_done(mad_send_wr); | 1782 | ib_mark_mad_done(mad_send_wr); |
@@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1790 | } else { | 1795 | } else { |
1791 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1796 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1792 | mad_recv_wc); | 1797 | mad_recv_wc); |
1793 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1798 | deref_mad_agent(mad_agent_priv); |
1794 | wake_up(&mad_agent_priv->wait); | ||
1795 | } | 1799 | } |
1796 | } | 1800 | } |
1797 | 1801 | ||
@@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
2021 | mad_send_wc); | 2025 | mad_send_wc); |
2022 | 2026 | ||
2023 | /* Release reference on agent taken when sending */ | 2027 | /* Release reference on agent taken when sending */ |
2024 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 2028 | deref_mad_agent(mad_agent_priv); |
2025 | wake_up(&mad_agent_priv->wait); | ||
2026 | return; | 2029 | return; |
2027 | done: | 2030 | done: |
2028 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2031 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 6c9c133d71ef..b4fa28d3160f 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #ifndef __IB_MAD_PRIV_H__ | 37 | #ifndef __IB_MAD_PRIV_H__ |
38 | #define __IB_MAD_PRIV_H__ | 38 | #define __IB_MAD_PRIV_H__ |
39 | 39 | ||
40 | #include <linux/completion.h> | ||
40 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
41 | #include <linux/kthread.h> | 42 | #include <linux/kthread.h> |
42 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
@@ -108,7 +109,7 @@ struct ib_mad_agent_private { | |||
108 | struct list_head rmpp_list; | 109 | struct list_head rmpp_list; |
109 | 110 | ||
110 | atomic_t refcount; | 111 | atomic_t refcount; |
111 | wait_queue_head_t wait; | 112 | struct completion comp; |
112 | }; | 113 | }; |
113 | 114 | ||
114 | struct ib_mad_snoop_private { | 115 | struct ib_mad_snoop_private { |
@@ -117,7 +118,7 @@ struct ib_mad_snoop_private { | |||
117 | int snoop_index; | 118 | int snoop_index; |
118 | int mad_snoop_flags; | 119 | int mad_snoop_flags; |
119 | atomic_t refcount; | 120 | atomic_t refcount; |
120 | wait_queue_head_t wait; | 121 | struct completion comp; |
121 | }; | 122 | }; |
122 | 123 | ||
123 | struct ib_mad_send_wr_private { | 124 | struct ib_mad_send_wr_private { |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index dfd4e588ce03..d4704e054e30 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -49,7 +49,7 @@ struct mad_rmpp_recv { | |||
49 | struct list_head list; | 49 | struct list_head list; |
50 | struct work_struct timeout_work; | 50 | struct work_struct timeout_work; |
51 | struct work_struct cleanup_work; | 51 | struct work_struct cleanup_work; |
52 | wait_queue_head_t wait; | 52 | struct completion comp; |
53 | enum rmpp_state state; | 53 | enum rmpp_state state; |
54 | spinlock_t lock; | 54 | spinlock_t lock; |
55 | atomic_t refcount; | 55 | atomic_t refcount; |
@@ -69,10 +69,16 @@ struct mad_rmpp_recv { | |||
69 | u8 method; | 69 | u8 method; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | ||
73 | { | ||
74 | if (atomic_dec_and_test(&rmpp_recv->refcount)) | ||
75 | complete(&rmpp_recv->comp); | ||
76 | } | ||
77 | |||
72 | static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | 78 | static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) |
73 | { | 79 | { |
74 | atomic_dec(&rmpp_recv->refcount); | 80 | deref_rmpp_recv(rmpp_recv); |
75 | wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount)); | 81 | wait_for_completion(&rmpp_recv->comp); |
76 | ib_destroy_ah(rmpp_recv->ah); | 82 | ib_destroy_ah(rmpp_recv->ah); |
77 | kfree(rmpp_recv); | 83 | kfree(rmpp_recv); |
78 | } | 84 | } |
@@ -253,7 +259,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
253 | goto error; | 259 | goto error; |
254 | 260 | ||
255 | rmpp_recv->agent = agent; | 261 | rmpp_recv->agent = agent; |
256 | init_waitqueue_head(&rmpp_recv->wait); | 262 | init_completion(&rmpp_recv->comp); |
257 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); | 263 | INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); |
258 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); | 264 | INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); |
259 | spin_lock_init(&rmpp_recv->lock); | 265 | spin_lock_init(&rmpp_recv->lock); |
@@ -279,12 +285,6 @@ error: kfree(rmpp_recv); | |||
279 | return NULL; | 285 | return NULL; |
280 | } | 286 | } |
281 | 287 | ||
282 | static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) | ||
283 | { | ||
284 | if (atomic_dec_and_test(&rmpp_recv->refcount)) | ||
285 | wake_up(&rmpp_recv->wait); | ||
286 | } | ||
287 | |||
288 | static struct mad_rmpp_recv * | 288 | static struct mad_rmpp_recv * |
289 | find_rmpp_recv(struct ib_mad_agent_private *agent, | 289 | find_rmpp_recv(struct ib_mad_agent_private *agent, |
290 | struct ib_mad_recv_wc *mad_recv_wc) | 290 | struct ib_mad_recv_wc *mad_recv_wc) |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f6a05965a4e8..9164a09b6ccd 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -32,6 +32,8 @@ | |||
32 | * | 32 | * |
33 | * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ | 33 | * $Id: ucm.c 2594 2005-06-13 19:46:02Z libor $ |
34 | */ | 34 | */ |
35 | |||
36 | #include <linux/completion.h> | ||
35 | #include <linux/init.h> | 37 | #include <linux/init.h> |
36 | #include <linux/fs.h> | 38 | #include <linux/fs.h> |
37 | #include <linux/module.h> | 39 | #include <linux/module.h> |
@@ -72,7 +74,7 @@ struct ib_ucm_file { | |||
72 | 74 | ||
73 | struct ib_ucm_context { | 75 | struct ib_ucm_context { |
74 | int id; | 76 | int id; |
75 | wait_queue_head_t wait; | 77 | struct completion comp; |
76 | atomic_t ref; | 78 | atomic_t ref; |
77 | int events_reported; | 79 | int events_reported; |
78 | 80 | ||
@@ -138,7 +140,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) | |||
138 | static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) | 140 | static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) |
139 | { | 141 | { |
140 | if (atomic_dec_and_test(&ctx->ref)) | 142 | if (atomic_dec_and_test(&ctx->ref)) |
141 | wake_up(&ctx->wait); | 143 | complete(&ctx->comp); |
142 | } | 144 | } |
143 | 145 | ||
144 | static inline int ib_ucm_new_cm_id(int event) | 146 | static inline int ib_ucm_new_cm_id(int event) |
@@ -178,7 +180,7 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) | |||
178 | return NULL; | 180 | return NULL; |
179 | 181 | ||
180 | atomic_set(&ctx->ref, 1); | 182 | atomic_set(&ctx->ref, 1); |
181 | init_waitqueue_head(&ctx->wait); | 183 | init_completion(&ctx->comp); |
182 | ctx->file = file; | 184 | ctx->file = file; |
183 | INIT_LIST_HEAD(&ctx->events); | 185 | INIT_LIST_HEAD(&ctx->events); |
184 | 186 | ||
@@ -586,8 +588,8 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file, | |||
586 | if (IS_ERR(ctx)) | 588 | if (IS_ERR(ctx)) |
587 | return PTR_ERR(ctx); | 589 | return PTR_ERR(ctx); |
588 | 590 | ||
589 | atomic_dec(&ctx->ref); | 591 | ib_ucm_ctx_put(ctx); |
590 | wait_event(ctx->wait, !atomic_read(&ctx->ref)); | 592 | wait_for_completion(&ctx->comp); |
591 | 593 | ||
592 | /* No new events will be generated after destroying the cm_id. */ | 594 | /* No new events will be generated after destroying the cm_id. */ |
593 | ib_destroy_cm_id(ctx->cm_id); | 595 | ib_destroy_cm_id(ctx->cm_id); |