aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2005-07-27 14:45:32 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:11 -0400
commit03b61ad2f29295f019e095d0f490f30a4d678d3f (patch)
tree499d78432420c3fc4339d784940348a9f06010e5 /drivers/infiniband
parent2c153b934dca08d58e0aafde18a182e0891aa201 (diff)
[PATCH] IB: Add ib_modify_mad API to MAD
Add new MAD layer call to modify (ib_modify_mad) the timeout of a sent MAD, and simplify cancel code. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c83
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/include/ib_mad.h14
3 files changed, 40 insertions, 59 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8948f6f300a4..7af72d4ae6c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -65,7 +65,6 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc); 66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data); 67static void timeout_sends(void *data);
68static void cancel_sends(void *data);
69static void local_completions(void *data); 68static void local_completions(void *data);
70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 69static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
71 struct ib_mad_agent_private *agent_priv, 70 struct ib_mad_agent_private *agent_priv,
@@ -346,8 +345,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
346 INIT_LIST_HEAD(&mad_agent_priv->local_list); 345 INIT_LIST_HEAD(&mad_agent_priv->local_list);
347 INIT_WORK(&mad_agent_priv->local_work, local_completions, 346 INIT_WORK(&mad_agent_priv->local_work, local_completions,
348 mad_agent_priv); 347 mad_agent_priv);
349 INIT_LIST_HEAD(&mad_agent_priv->canceled_list);
350 INIT_WORK(&mad_agent_priv->canceled_work, cancel_sends, mad_agent_priv);
351 atomic_set(&mad_agent_priv->refcount, 1); 348 atomic_set(&mad_agent_priv->refcount, 1);
352 init_waitqueue_head(&mad_agent_priv->wait); 349 init_waitqueue_head(&mad_agent_priv->wait);
353 350
@@ -1775,6 +1772,13 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
1775 } 1772 }
1776} 1773}
1777 1774
1775void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1776 int timeout_ms)
1777{
1778 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
1779 wait_for_response(mad_send_wr);
1780}
1781
1778/* 1782/*
1779 * Process a send work completion 1783 * Process a send work completion
1780 */ 1784 */
@@ -2034,41 +2038,7 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv,
2034 return NULL; 2038 return NULL;
2035} 2039}
2036 2040
2037void cancel_sends(void *data) 2041int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2038{
2039 struct ib_mad_agent_private *mad_agent_priv;
2040 struct ib_mad_send_wr_private *mad_send_wr;
2041 struct ib_mad_send_wc mad_send_wc;
2042 unsigned long flags;
2043
2044 mad_agent_priv = data;
2045
2046 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2047 mad_send_wc.vendor_err = 0;
2048
2049 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2050 while (!list_empty(&mad_agent_priv->canceled_list)) {
2051 mad_send_wr = list_entry(mad_agent_priv->canceled_list.next,
2052 struct ib_mad_send_wr_private,
2053 agent_list);
2054
2055 list_del(&mad_send_wr->agent_list);
2056 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2057
2058 mad_send_wc.wr_id = mad_send_wr->wr_id;
2059 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2060 &mad_send_wc);
2061
2062 kfree(mad_send_wr);
2063 if (atomic_dec_and_test(&mad_agent_priv->refcount))
2064 wake_up(&mad_agent_priv->wait);
2065 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2066 }
2067 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2068}
2069
2070void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2071 u64 wr_id)
2072{ 2042{
2073 struct ib_mad_agent_private *mad_agent_priv; 2043 struct ib_mad_agent_private *mad_agent_priv;
2074 struct ib_mad_send_wr_private *mad_send_wr; 2044 struct ib_mad_send_wr_private *mad_send_wr;
@@ -2078,29 +2048,30 @@ void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2078 agent); 2048 agent);
2079 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2049 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2080 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); 2050 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id);
2081 if (!mad_send_wr) { 2051 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2082 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2052 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2083 goto out; 2053 return -EINVAL;
2084 } 2054 }
2085 2055
2086 if (mad_send_wr->status == IB_WC_SUCCESS) 2056 if (!timeout_ms) {
2087 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2088
2089 if (mad_send_wr->refcount != 0) {
2090 mad_send_wr->status = IB_WC_WR_FLUSH_ERR; 2057 mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2091 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2058 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2092 goto out;
2093 } 2059 }
2094 2060
2095 list_del(&mad_send_wr->agent_list); 2061 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms;
2096 list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->canceled_list); 2062 if (!mad_send_wr->timeout || mad_send_wr->refcount > 1)
2097 adjust_timeout(mad_agent_priv); 2063 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2064 else
2065 ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2066
2098 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2067 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2068 return 0;
2069}
2070EXPORT_SYMBOL(ib_modify_mad);
2099 2071
2100 queue_work(mad_agent_priv->qp_info->port_priv->wq, 2072void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id)
2101 &mad_agent_priv->canceled_work); 2073{
2102out: 2074 ib_modify_mad(mad_agent, wr_id, 0);
2103 return;
2104} 2075}
2105EXPORT_SYMBOL(ib_cancel_mad); 2076EXPORT_SYMBOL(ib_cancel_mad);
2106 2077
@@ -2207,8 +2178,6 @@ static void timeout_sends(void *data)
2207 unsigned long flags, delay; 2178 unsigned long flags, delay;
2208 2179
2209 mad_agent_priv = (struct ib_mad_agent_private *)data; 2180 mad_agent_priv = (struct ib_mad_agent_private *)data;
2210
2211 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2212 mad_send_wc.vendor_err = 0; 2181 mad_send_wc.vendor_err = 0;
2213 2182
2214 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2183 spin_lock_irqsave(&mad_agent_priv->lock, flags);
@@ -2233,6 +2202,10 @@ static void timeout_sends(void *data)
2233 2202
2234 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2203 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2235 2204
2205 if (mad_send_wr->status == IB_WC_SUCCESS)
2206 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2207 else
2208 mad_send_wc.status = mad_send_wr->status;
2236 mad_send_wc.wr_id = mad_send_wr->wr_id; 2209 mad_send_wc.wr_id = mad_send_wr->wr_id;
2237 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2210 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2238 &mad_send_wc); 2211 &mad_send_wc);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 8a61dd921d29..e5e37b5be387 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -97,8 +97,6 @@ struct ib_mad_agent_private {
97 unsigned long timeout; 97 unsigned long timeout;
98 struct list_head local_list; 98 struct list_head local_list;
99 struct work_struct local_work; 99 struct work_struct local_work;
100 struct list_head canceled_list;
101 struct work_struct canceled_work;
102 100
103 atomic_t refcount; 101 atomic_t refcount;
104 wait_queue_head_t wait; 102 wait_queue_head_t wait;
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
index e8a122122cba..c5f3170c59ef 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/drivers/infiniband/include/ib_mad.h
@@ -385,8 +385,18 @@ void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
385 * MADs will be returned to the user through the corresponding 385 * MADs will be returned to the user through the corresponding
386 * ib_mad_send_handler. 386 * ib_mad_send_handler.
387 */ 387 */
388void ib_cancel_mad(struct ib_mad_agent *mad_agent, 388void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
389 u64 wr_id); 389
390/**
391 * ib_modify_mad - Modifies an outstanding send MAD operation.
392 * @mad_agent: Specifies the registration associated with sent MAD.
393 * @wr_id: Indicates the work request identifier of the MAD to modify.
394 * @timeout_ms: New timeout value for sent MAD.
395 *
396 * This call will reset the timeout value for a sent MAD to the specified
397 * value.
398 */
399int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
390 400
391/** 401/**
392 * ib_redirect_mad_qp - Registers a QP for MAD services. 402 * ib_redirect_mad_qp - Registers a QP for MAD services.