diff options
author | Sean Hefty <mshefty@ichips.intel.com> | 2006-05-12 17:57:52 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-05-12 17:57:52 -0400 |
commit | 1b52fa98edd1c3e663ea4a06519e3d20976084a8 (patch) | |
tree | 178d5fd1fe2230b39f49cd36f481024e49878eb1 /drivers/infiniband/core/mad.c | |
parent | 6f4bb3d8205d943acafa2f536f37131777524b67 (diff) |
IB: refcount race fixes
Fix race condition during destruction calls to avoid possibility of
accessing object after it has been freed. Instead of waking up a wait
queue directly, which is susceptible to a race where the object is
freed between the reference count going to 0 and the wake_up(), use a
completion to wait in the function doing the freeing.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r-- | drivers/infiniband/core/mad.c | 47 |
1 files changed, 25 insertions, 22 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 469b6923a2e..5ad41a64314 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -352,7 +352,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
352 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 352 | INIT_WORK(&mad_agent_priv->local_work, local_completions, |
353 | mad_agent_priv); | 353 | mad_agent_priv); |
354 | atomic_set(&mad_agent_priv->refcount, 1); | 354 | atomic_set(&mad_agent_priv->refcount, 1); |
355 | init_waitqueue_head(&mad_agent_priv->wait); | 355 | init_completion(&mad_agent_priv->comp); |
356 | 356 | ||
357 | return &mad_agent_priv->agent; | 357 | return &mad_agent_priv->agent; |
358 | 358 | ||
@@ -467,7 +467,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, | |||
467 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; | 467 | mad_snoop_priv->agent.qp = port_priv->qp_info[qpn].qp; |
468 | mad_snoop_priv->agent.port_num = port_num; | 468 | mad_snoop_priv->agent.port_num = port_num; |
469 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; | 469 | mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; |
470 | init_waitqueue_head(&mad_snoop_priv->wait); | 470 | init_completion(&mad_snoop_priv->comp); |
471 | mad_snoop_priv->snoop_index = register_snoop_agent( | 471 | mad_snoop_priv->snoop_index = register_snoop_agent( |
472 | &port_priv->qp_info[qpn], | 472 | &port_priv->qp_info[qpn], |
473 | mad_snoop_priv); | 473 | mad_snoop_priv); |
@@ -486,6 +486,18 @@ error1: | |||
486 | } | 486 | } |
487 | EXPORT_SYMBOL(ib_register_mad_snoop); | 487 | EXPORT_SYMBOL(ib_register_mad_snoop); |
488 | 488 | ||
489 | static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | ||
490 | { | ||
491 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
492 | complete(&mad_agent_priv->comp); | ||
493 | } | ||
494 | |||
495 | static inline void deref_snoop_agent(struct ib_mad_snoop_private *mad_snoop_priv) | ||
496 | { | ||
497 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | ||
498 | complete(&mad_snoop_priv->comp); | ||
499 | } | ||
500 | |||
489 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | 501 | static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) |
490 | { | 502 | { |
491 | struct ib_mad_port_private *port_priv; | 503 | struct ib_mad_port_private *port_priv; |
@@ -509,9 +521,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
509 | flush_workqueue(port_priv->wq); | 521 | flush_workqueue(port_priv->wq); |
510 | ib_cancel_rmpp_recvs(mad_agent_priv); | 522 | ib_cancel_rmpp_recvs(mad_agent_priv); |
511 | 523 | ||
512 | atomic_dec(&mad_agent_priv->refcount); | 524 | deref_mad_agent(mad_agent_priv); |
513 | wait_event(mad_agent_priv->wait, | 525 | wait_for_completion(&mad_agent_priv->comp); |
514 | !atomic_read(&mad_agent_priv->refcount)); | ||
515 | 526 | ||
516 | kfree(mad_agent_priv->reg_req); | 527 | kfree(mad_agent_priv->reg_req); |
517 | ib_dereg_mr(mad_agent_priv->agent.mr); | 528 | ib_dereg_mr(mad_agent_priv->agent.mr); |
@@ -529,9 +540,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) | |||
529 | atomic_dec(&qp_info->snoop_count); | 540 | atomic_dec(&qp_info->snoop_count); |
530 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 541 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
531 | 542 | ||
532 | atomic_dec(&mad_snoop_priv->refcount); | 543 | deref_snoop_agent(mad_snoop_priv); |
533 | wait_event(mad_snoop_priv->wait, | 544 | wait_for_completion(&mad_snoop_priv->comp); |
534 | !atomic_read(&mad_snoop_priv->refcount)); | ||
535 | 545 | ||
536 | kfree(mad_snoop_priv); | 546 | kfree(mad_snoop_priv); |
537 | } | 547 | } |
@@ -600,8 +610,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info, | |||
600 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 610 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
601 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, | 611 | mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, |
602 | send_buf, mad_send_wc); | 612 | send_buf, mad_send_wc); |
603 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 613 | deref_snoop_agent(mad_snoop_priv); |
604 | wake_up(&mad_snoop_priv->wait); | ||
605 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 614 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
606 | } | 615 | } |
607 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 616 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -626,8 +635,7 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info, | |||
626 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 635 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
627 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, | 636 | mad_snoop_priv->agent.recv_handler(&mad_snoop_priv->agent, |
628 | mad_recv_wc); | 637 | mad_recv_wc); |
629 | if (atomic_dec_and_test(&mad_snoop_priv->refcount)) | 638 | deref_snoop_agent(mad_snoop_priv); |
630 | wake_up(&mad_snoop_priv->wait); | ||
631 | spin_lock_irqsave(&qp_info->snoop_lock, flags); | 639 | spin_lock_irqsave(&qp_info->snoop_lock, flags); |
632 | } | 640 | } |
633 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); | 641 | spin_unlock_irqrestore(&qp_info->snoop_lock, flags); |
@@ -968,8 +976,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | |||
968 | 976 | ||
969 | free_send_rmpp_list(mad_send_wr); | 977 | free_send_rmpp_list(mad_send_wr); |
970 | kfree(send_buf->mad); | 978 | kfree(send_buf->mad); |
971 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 979 | deref_mad_agent(mad_agent_priv); |
972 | wake_up(&mad_agent_priv->wait); | ||
973 | } | 980 | } |
974 | EXPORT_SYMBOL(ib_free_send_mad); | 981 | EXPORT_SYMBOL(ib_free_send_mad); |
975 | 982 | ||
@@ -1757,8 +1764,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1757 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | 1764 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, |
1758 | mad_recv_wc); | 1765 | mad_recv_wc); |
1759 | if (!mad_recv_wc) { | 1766 | if (!mad_recv_wc) { |
1760 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1767 | deref_mad_agent(mad_agent_priv); |
1761 | wake_up(&mad_agent_priv->wait); | ||
1762 | return; | 1768 | return; |
1763 | } | 1769 | } |
1764 | } | 1770 | } |
@@ -1770,8 +1776,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1770 | if (!mad_send_wr) { | 1776 | if (!mad_send_wr) { |
1771 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1777 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1772 | ib_free_recv_mad(mad_recv_wc); | 1778 | ib_free_recv_mad(mad_recv_wc); |
1773 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1779 | deref_mad_agent(mad_agent_priv); |
1774 | wake_up(&mad_agent_priv->wait); | ||
1775 | return; | 1780 | return; |
1776 | } | 1781 | } |
1777 | ib_mark_mad_done(mad_send_wr); | 1782 | ib_mark_mad_done(mad_send_wr); |
@@ -1790,8 +1795,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1790 | } else { | 1795 | } else { |
1791 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, | 1796 | mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, |
1792 | mad_recv_wc); | 1797 | mad_recv_wc); |
1793 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1798 | deref_mad_agent(mad_agent_priv); |
1794 | wake_up(&mad_agent_priv->wait); | ||
1795 | } | 1799 | } |
1796 | } | 1800 | } |
1797 | 1801 | ||
@@ -2021,8 +2025,7 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
2021 | mad_send_wc); | 2025 | mad_send_wc); |
2022 | 2026 | ||
2023 | /* Release reference on agent taken when sending */ | 2027 | /* Release reference on agent taken when sending */ |
2024 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 2028 | deref_mad_agent(mad_agent_priv); |
2025 | wake_up(&mad_agent_priv->wait); | ||
2026 | return; | 2029 | return; |
2027 | done: | 2030 | done: |
2028 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 2031 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |