diff options
author | Hal Rosenstock <halr@voltaire.com> | 2005-07-27 14:45:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:12 -0400 |
commit | cabe3cbcbb3b09637b9e706c49eadb180fca057e (patch) | |
tree | 37c9179b4f43d7a63e7d55ae6a77a9fb44537b0c /drivers | |
parent | 29bb33dd87dbe8db07c2b19df3fb453d999c96de (diff) |
[PATCH] IB: Fix a couple of MAD code paths
Fixed locking to handle error posting MAD send work requests. Fixed handling
canceling a MAD with an active work request.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/infiniband/core/mad.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 1d8f26f54ec9..8216af0ba783 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -841,6 +841,7 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
841 | { | 841 | { |
842 | struct ib_mad_qp_info *qp_info; | 842 | struct ib_mad_qp_info *qp_info; |
843 | struct ib_send_wr *bad_send_wr; | 843 | struct ib_send_wr *bad_send_wr; |
844 | struct list_head *list; | ||
844 | unsigned long flags; | 845 | unsigned long flags; |
845 | int ret; | 846 | int ret; |
846 | 847 | ||
@@ -850,22 +851,20 @@ static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
850 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; | 851 | mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; |
851 | 852 | ||
852 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); | 853 | spin_lock_irqsave(&qp_info->send_queue.lock, flags); |
853 | if (qp_info->send_queue.count++ < qp_info->send_queue.max_active) { | 854 | if (qp_info->send_queue.count < qp_info->send_queue.max_active) { |
854 | list_add_tail(&mad_send_wr->mad_list.list, | ||
855 | &qp_info->send_queue.list); | ||
856 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
857 | ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, | 855 | ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, |
858 | &mad_send_wr->send_wr, &bad_send_wr); | 856 | &mad_send_wr->send_wr, &bad_send_wr); |
859 | if (ret) { | 857 | list = &qp_info->send_queue.list; |
860 | printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); | ||
861 | dequeue_mad(&mad_send_wr->mad_list); | ||
862 | } | ||
863 | } else { | 858 | } else { |
864 | list_add_tail(&mad_send_wr->mad_list.list, | ||
865 | &qp_info->overflow_list); | ||
866 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
867 | ret = 0; | 859 | ret = 0; |
860 | list = &qp_info->overflow_list; | ||
868 | } | 861 | } |
862 | |||
863 | if (!ret) { | ||
864 | qp_info->send_queue.count++; | ||
865 | list_add_tail(&mad_send_wr->mad_list.list, list); | ||
866 | } | ||
867 | spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); | ||
869 | return ret; | 868 | return ret; |
870 | } | 869 | } |
871 | 870 | ||
@@ -2023,8 +2022,7 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv) | |||
2023 | } | 2022 | } |
2024 | 2023 | ||
2025 | static struct ib_mad_send_wr_private* | 2024 | static struct ib_mad_send_wr_private* |
2026 | find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, | 2025 | find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) |
2027 | u64 wr_id) | ||
2028 | { | 2026 | { |
2029 | struct ib_mad_send_wr_private *mad_send_wr; | 2027 | struct ib_mad_send_wr_private *mad_send_wr; |
2030 | 2028 | ||
@@ -2047,6 +2045,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | |||
2047 | struct ib_mad_agent_private *mad_agent_priv; | 2045 | struct ib_mad_agent_private *mad_agent_priv; |
2048 | struct ib_mad_send_wr_private *mad_send_wr; | 2046 | struct ib_mad_send_wr_private *mad_send_wr; |
2049 | unsigned long flags; | 2047 | unsigned long flags; |
2048 | int active; | ||
2050 | 2049 | ||
2051 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, | 2050 | mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, |
2052 | agent); | 2051 | agent); |
@@ -2057,13 +2056,14 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) | |||
2057 | return -EINVAL; | 2056 | return -EINVAL; |
2058 | } | 2057 | } |
2059 | 2058 | ||
2059 | active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1); | ||
2060 | if (!timeout_ms) { | 2060 | if (!timeout_ms) { |
2061 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; | 2061 | mad_send_wr->status = IB_WC_WR_FLUSH_ERR; |
2062 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); | 2062 | mad_send_wr->refcount -= (mad_send_wr->timeout > 0); |
2063 | } | 2063 | } |
2064 | 2064 | ||
2065 | mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; | 2065 | mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; |
2066 | if (!mad_send_wr->timeout || mad_send_wr->refcount > 1) | 2066 | if (active) |
2067 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); | 2067 | mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); |
2068 | else | 2068 | else |
2069 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); | 2069 | ib_reset_mad_timeout(mad_send_wr, timeout_ms); |