diff options
author | Hal Rosenstock <halr@voltaire.com> | 2005-07-27 14:45:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:13 -0400 |
commit | fa619a77046bef30478697aba0553991033afb8e (patch) | |
tree | 3783af8ac0c6804c9f437f6dfb08ecda8ce92fc3 /drivers/infiniband/core/mad.c | |
parent | d2082ee516200095956bd66279be4f62f4a5843d (diff) |
[PATCH] IB: Add RMPP implementation
Add RMPP implementation.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r-- | drivers/infiniband/core/mad.c | 163 |
1 files changed, 117 insertions, 46 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 26e2b59ce5a6..b97e210ce9c8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. | 2 | * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. | ||
4 | * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. | ||
3 | * | 5 | * |
4 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -29,12 +31,12 @@ | |||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. | 32 | * SOFTWARE. |
31 | * | 33 | * |
32 | * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $ | 34 | * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $ |
33 | */ | 35 | */ |
34 | |||
35 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
36 | 37 | ||
37 | #include "mad_priv.h" | 38 | #include "mad_priv.h" |
39 | #include "mad_rmpp.h" | ||
38 | #include "smi.h" | 40 | #include "smi.h" |
39 | #include "agent.h" | 41 | #include "agent.h" |
40 | 42 | ||
@@ -45,6 +47,7 @@ MODULE_AUTHOR("Sean Hefty"); | |||
45 | 47 | ||
46 | 48 | ||
47 | kmem_cache_t *ib_mad_cache; | 49 | kmem_cache_t *ib_mad_cache; |
50 | |||
48 | static struct list_head ib_mad_port_list; | 51 | static struct list_head ib_mad_port_list; |
49 | static u32 ib_mad_client_id = 0; | 52 | static u32 ib_mad_client_id = 0; |
50 | 53 | ||
@@ -62,8 +65,6 @@ static struct ib_mad_agent_private *find_mad_agent( | |||
62 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, | 65 | static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, |
63 | struct ib_mad_private *mad); | 66 | struct ib_mad_private *mad); |
64 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); | 67 | static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); |
65 | static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | ||
66 | struct ib_mad_send_wc *mad_send_wc); | ||
67 | static void timeout_sends(void *data); | 68 | static void timeout_sends(void *data); |
68 | static void local_completions(void *data); | 69 | static void local_completions(void *data); |
69 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, | 70 | static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, |
@@ -195,8 +196,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
195 | if (qpn == -1) | 196 | if (qpn == -1) |
196 | goto error1; | 197 | goto error1; |
197 | 198 | ||
198 | if (rmpp_version) | 199 | if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) |
199 | goto error1; /* XXX: until RMPP implemented */ | 200 | goto error1; |
200 | 201 | ||
201 | /* Validate MAD registration request if supplied */ | 202 | /* Validate MAD registration request if supplied */ |
202 | if (mad_reg_req) { | 203 | if (mad_reg_req) { |
@@ -281,7 +282,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
281 | /* Now, fill in the various structures */ | 282 | /* Now, fill in the various structures */ |
282 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; | 283 | mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; |
283 | mad_agent_priv->reg_req = reg_req; | 284 | mad_agent_priv->reg_req = reg_req; |
284 | mad_agent_priv->rmpp_version = rmpp_version; | 285 | mad_agent_priv->agent.rmpp_version = rmpp_version; |
285 | mad_agent_priv->agent.device = device; | 286 | mad_agent_priv->agent.device = device; |
286 | mad_agent_priv->agent.recv_handler = recv_handler; | 287 | mad_agent_priv->agent.recv_handler = recv_handler; |
287 | mad_agent_priv->agent.send_handler = send_handler; | 288 | mad_agent_priv->agent.send_handler = send_handler; |
@@ -341,6 +342,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
341 | INIT_LIST_HEAD(&mad_agent_priv->send_list); | 342 | INIT_LIST_HEAD(&mad_agent_priv->send_list); |
342 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); | 343 | INIT_LIST_HEAD(&mad_agent_priv->wait_list); |
343 | INIT_LIST_HEAD(&mad_agent_priv->done_list); | 344 | INIT_LIST_HEAD(&mad_agent_priv->done_list); |
345 | INIT_LIST_HEAD(&mad_agent_priv->rmpp_list); | ||
344 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); | 346 | INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); |
345 | INIT_LIST_HEAD(&mad_agent_priv->local_list); | 347 | INIT_LIST_HEAD(&mad_agent_priv->local_list); |
346 | INIT_WORK(&mad_agent_priv->local_work, local_completions, | 348 | INIT_WORK(&mad_agent_priv->local_work, local_completions, |
@@ -502,6 +504,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) | |||
502 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 504 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
503 | 505 | ||
504 | flush_workqueue(port_priv->wq); | 506 | flush_workqueue(port_priv->wq); |
507 | ib_cancel_rmpp_recvs(mad_agent_priv); | ||
505 | 508 | ||
506 | atomic_dec(&mad_agent_priv->refcount); | 509 | atomic_dec(&mad_agent_priv->refcount); |
507 | wait_event(mad_agent_priv->wait, | 510 | wait_event(mad_agent_priv->wait, |
@@ -786,12 +789,15 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
786 | int buf_size; | 789 | int buf_size; |
787 | void *buf; | 790 | void *buf; |
788 | 791 | ||
789 | if (rmpp_active) | ||
790 | return ERR_PTR(-EINVAL); /* until RMPP implemented */ | ||
791 | mad_agent_priv = container_of(mad_agent, | 792 | mad_agent_priv = container_of(mad_agent, |
792 | struct ib_mad_agent_private, agent); | 793 | struct ib_mad_agent_private, agent); |
793 | buf_size = get_buf_length(hdr_len, data_len); | 794 | buf_size = get_buf_length(hdr_len, data_len); |
794 | 795 | ||
796 | if ((!mad_agent->rmpp_version && | ||
797 | (rmpp_active || buf_size > sizeof(struct ib_mad))) || | ||
798 | (!rmpp_active && buf_size > sizeof(struct ib_mad))) | ||
799 | return ERR_PTR(-EINVAL); | ||
800 | |||
795 | buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); | 801 | buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); |
796 | if (!buf) | 802 | if (!buf) |
797 | return ERR_PTR(-ENOMEM); | 803 | return ERR_PTR(-ENOMEM); |
@@ -816,6 +822,18 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, | |||
816 | send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; | 822 | send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; |
817 | send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; | 823 | send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; |
818 | send_buf->send_wr.wr.ud.pkey_index = pkey_index; | 824 | send_buf->send_wr.wr.ud.pkey_index = pkey_index; |
825 | |||
826 | if (rmpp_active) { | ||
827 | struct ib_rmpp_mad *rmpp_mad; | ||
828 | rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad; | ||
829 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - | ||
830 | offsetof(struct ib_rmpp_mad, data) + data_len); | ||
831 | rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; | ||
832 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; | ||
833 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, | ||
834 | IB_MGMT_RMPP_FLAG_ACTIVE); | ||
835 | } | ||
836 | |||
819 | send_buf->mad_agent = mad_agent; | 837 | send_buf->mad_agent = mad_agent; |
820 | atomic_inc(&mad_agent_priv->refcount); | 838 | atomic_inc(&mad_agent_priv->refcount); |
821 | return send_buf; | 839 | return send_buf; |
@@ -839,7 +857,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf) | |||
839 | } | 857 | } |
840 | EXPORT_SYMBOL(ib_free_send_mad); | 858 | EXPORT_SYMBOL(ib_free_send_mad); |
841 | 859 | ||
842 | static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) | 860 | int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) |
843 | { | 861 | { |
844 | struct ib_mad_qp_info *qp_info; | 862 | struct ib_mad_qp_info *qp_info; |
845 | struct ib_send_wr *bad_send_wr; | 863 | struct ib_send_wr *bad_send_wr; |
@@ -940,13 +958,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
940 | ret = -ENOMEM; | 958 | ret = -ENOMEM; |
941 | goto error2; | 959 | goto error2; |
942 | } | 960 | } |
961 | memset(mad_send_wr, 0, sizeof *mad_send_wr); | ||
943 | 962 | ||
944 | mad_send_wr->send_wr = *send_wr; | 963 | mad_send_wr->send_wr = *send_wr; |
945 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; | 964 | mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; |
946 | memcpy(mad_send_wr->sg_list, send_wr->sg_list, | 965 | memcpy(mad_send_wr->sg_list, send_wr->sg_list, |
947 | sizeof *send_wr->sg_list * send_wr->num_sge); | 966 | sizeof *send_wr->sg_list * send_wr->num_sge); |
948 | mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id; | 967 | mad_send_wr->wr_id = send_wr->wr_id; |
949 | mad_send_wr->send_wr.next = NULL; | ||
950 | mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; | 968 | mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; |
951 | mad_send_wr->mad_agent_priv = mad_agent_priv; | 969 | mad_send_wr->mad_agent_priv = mad_agent_priv; |
952 | /* Timeout will be updated after send completes */ | 970 | /* Timeout will be updated after send completes */ |
@@ -964,8 +982,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent, | |||
964 | &mad_agent_priv->send_list); | 982 | &mad_agent_priv->send_list); |
965 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 983 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
966 | 984 | ||
967 | ret = ib_send_mad(mad_send_wr); | 985 | if (mad_agent_priv->agent.rmpp_version) { |
968 | if (ret) { | 986 | ret = ib_send_rmpp_mad(mad_send_wr); |
987 | if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED) | ||
988 | ret = ib_send_mad(mad_send_wr); | ||
989 | } else | ||
990 | ret = ib_send_mad(mad_send_wr); | ||
991 | if (ret < 0) { | ||
969 | /* Fail send request */ | 992 | /* Fail send request */ |
970 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 993 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
971 | list_del(&mad_send_wr->agent_list); | 994 | list_del(&mad_send_wr->agent_list); |
@@ -991,31 +1014,25 @@ EXPORT_SYMBOL(ib_post_send_mad); | |||
991 | */ | 1014 | */ |
992 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) | 1015 | void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) |
993 | { | 1016 | { |
994 | struct ib_mad_recv_buf *entry; | 1017 | struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf; |
995 | struct ib_mad_private_header *mad_priv_hdr; | 1018 | struct ib_mad_private_header *mad_priv_hdr; |
996 | struct ib_mad_private *priv; | 1019 | struct ib_mad_private *priv; |
1020 | struct list_head free_list; | ||
997 | 1021 | ||
998 | mad_priv_hdr = container_of(mad_recv_wc, | 1022 | INIT_LIST_HEAD(&free_list); |
999 | struct ib_mad_private_header, | 1023 | list_splice_init(&mad_recv_wc->rmpp_list, &free_list); |
1000 | recv_wc); | ||
1001 | priv = container_of(mad_priv_hdr, struct ib_mad_private, header); | ||
1002 | 1024 | ||
1003 | /* | 1025 | list_for_each_entry_safe(mad_recv_buf, temp_recv_buf, |
1004 | * Walk receive buffer list associated with this WC | 1026 | &free_list, list) { |
1005 | * No need to remove them from list of receive buffers | 1027 | mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc, |
1006 | */ | 1028 | recv_buf); |
1007 | list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) { | ||
1008 | /* Free previous receive buffer */ | ||
1009 | kmem_cache_free(ib_mad_cache, priv); | ||
1010 | mad_priv_hdr = container_of(mad_recv_wc, | 1029 | mad_priv_hdr = container_of(mad_recv_wc, |
1011 | struct ib_mad_private_header, | 1030 | struct ib_mad_private_header, |
1012 | recv_wc); | 1031 | recv_wc); |
1013 | priv = container_of(mad_priv_hdr, struct ib_mad_private, | 1032 | priv = container_of(mad_priv_hdr, struct ib_mad_private, |
1014 | header); | 1033 | header); |
1034 | kmem_cache_free(ib_mad_cache, priv); | ||
1015 | } | 1035 | } |
1016 | |||
1017 | /* Free last buffer */ | ||
1018 | kmem_cache_free(ib_mad_cache, priv); | ||
1019 | } | 1036 | } |
1020 | EXPORT_SYMBOL(ib_free_recv_mad); | 1037 | EXPORT_SYMBOL(ib_free_recv_mad); |
1021 | 1038 | ||
@@ -1524,9 +1541,20 @@ out: | |||
1524 | return valid; | 1541 | return valid; |
1525 | } | 1542 | } |
1526 | 1543 | ||
1527 | static struct ib_mad_send_wr_private* | 1544 | static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv, |
1528 | find_send_req(struct ib_mad_agent_private *mad_agent_priv, | 1545 | struct ib_mad_hdr *mad_hdr) |
1529 | u64 tid) | 1546 | { |
1547 | struct ib_rmpp_mad *rmpp_mad; | ||
1548 | |||
1549 | rmpp_mad = (struct ib_rmpp_mad *)mad_hdr; | ||
1550 | return !mad_agent_priv->agent.rmpp_version || | ||
1551 | !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & | ||
1552 | IB_MGMT_RMPP_FLAG_ACTIVE) || | ||
1553 | (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); | ||
1554 | } | ||
1555 | |||
1556 | struct ib_mad_send_wr_private* | ||
1557 | ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid) | ||
1530 | { | 1558 | { |
1531 | struct ib_mad_send_wr_private *mad_send_wr; | 1559 | struct ib_mad_send_wr_private *mad_send_wr; |
1532 | 1560 | ||
@@ -1542,7 +1570,9 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv, | |||
1542 | */ | 1570 | */ |
1543 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 1571 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
1544 | agent_list) { | 1572 | agent_list) { |
1545 | if (mad_send_wr->tid == tid && mad_send_wr->timeout) { | 1573 | if (is_data_mad(mad_agent_priv, |
1574 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | ||
1575 | mad_send_wr->tid == tid && mad_send_wr->timeout) { | ||
1546 | /* Verify request has not been canceled */ | 1576 | /* Verify request has not been canceled */ |
1547 | return (mad_send_wr->status == IB_WC_SUCCESS) ? | 1577 | return (mad_send_wr->status == IB_WC_SUCCESS) ? |
1548 | mad_send_wr : NULL; | 1578 | mad_send_wr : NULL; |
@@ -1551,7 +1581,7 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv, | |||
1551 | return NULL; | 1581 | return NULL; |
1552 | } | 1582 | } |
1553 | 1583 | ||
1554 | static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr) | 1584 | void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr) |
1555 | { | 1585 | { |
1556 | mad_send_wr->timeout = 0; | 1586 | mad_send_wr->timeout = 0; |
1557 | if (mad_send_wr->refcount == 1) { | 1587 | if (mad_send_wr->refcount == 1) { |
@@ -1569,12 +1599,23 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1569 | unsigned long flags; | 1599 | unsigned long flags; |
1570 | u64 tid; | 1600 | u64 tid; |
1571 | 1601 | ||
1572 | INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list); | 1602 | INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); |
1603 | list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); | ||
1604 | if (mad_agent_priv->agent.rmpp_version) { | ||
1605 | mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv, | ||
1606 | mad_recv_wc); | ||
1607 | if (!mad_recv_wc) { | ||
1608 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | ||
1609 | wake_up(&mad_agent_priv->wait); | ||
1610 | return; | ||
1611 | } | ||
1612 | } | ||
1613 | |||
1573 | /* Complete corresponding request */ | 1614 | /* Complete corresponding request */ |
1574 | if (response_mad(mad_recv_wc->recv_buf.mad)) { | 1615 | if (response_mad(mad_recv_wc->recv_buf.mad)) { |
1575 | tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; | 1616 | tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; |
1576 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1617 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1577 | mad_send_wr = find_send_req(mad_agent_priv, tid); | 1618 | mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); |
1578 | if (!mad_send_wr) { | 1619 | if (!mad_send_wr) { |
1579 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1620 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1580 | ib_free_recv_mad(mad_recv_wc); | 1621 | ib_free_recv_mad(mad_recv_wc); |
@@ -1582,7 +1623,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1582 | wake_up(&mad_agent_priv->wait); | 1623 | wake_up(&mad_agent_priv->wait); |
1583 | return; | 1624 | return; |
1584 | } | 1625 | } |
1585 | ib_mark_req_done(mad_send_wr); | 1626 | ib_mark_mad_done(mad_send_wr); |
1586 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1627 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); |
1587 | 1628 | ||
1588 | /* Defined behavior is to complete response before request */ | 1629 | /* Defined behavior is to complete response before request */ |
@@ -1787,14 +1828,22 @@ void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, | |||
1787 | /* | 1828 | /* |
1788 | * Process a send work completion | 1829 | * Process a send work completion |
1789 | */ | 1830 | */ |
1790 | static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | 1831 | void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, |
1791 | struct ib_mad_send_wc *mad_send_wc) | 1832 | struct ib_mad_send_wc *mad_send_wc) |
1792 | { | 1833 | { |
1793 | struct ib_mad_agent_private *mad_agent_priv; | 1834 | struct ib_mad_agent_private *mad_agent_priv; |
1794 | unsigned long flags; | 1835 | unsigned long flags; |
1836 | int ret; | ||
1795 | 1837 | ||
1796 | mad_agent_priv = mad_send_wr->mad_agent_priv; | 1838 | mad_agent_priv = mad_send_wr->mad_agent_priv; |
1797 | spin_lock_irqsave(&mad_agent_priv->lock, flags); | 1839 | spin_lock_irqsave(&mad_agent_priv->lock, flags); |
1840 | if (mad_agent_priv->agent.rmpp_version) { | ||
1841 | ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc); | ||
1842 | if (ret == IB_RMPP_RESULT_CONSUMED) | ||
1843 | goto done; | ||
1844 | } else | ||
1845 | ret = IB_RMPP_RESULT_UNHANDLED; | ||
1846 | |||
1798 | if (mad_send_wc->status != IB_WC_SUCCESS && | 1847 | if (mad_send_wc->status != IB_WC_SUCCESS && |
1799 | mad_send_wr->status == IB_WC_SUCCESS) { | 1848 | mad_send_wr->status == IB_WC_SUCCESS) { |
1800 | mad_send_wr->status = mad_send_wc->status; | 1849 | mad_send_wr->status = mad_send_wc->status; |
@@ -1806,8 +1855,7 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
1806 | mad_send_wr->status == IB_WC_SUCCESS) { | 1855 | mad_send_wr->status == IB_WC_SUCCESS) { |
1807 | wait_for_response(mad_send_wr); | 1856 | wait_for_response(mad_send_wr); |
1808 | } | 1857 | } |
1809 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | 1858 | goto done; |
1810 | return; | ||
1811 | } | 1859 | } |
1812 | 1860 | ||
1813 | /* Remove send from MAD agent and notify client of completion */ | 1861 | /* Remove send from MAD agent and notify client of completion */ |
@@ -1817,14 +1865,18 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, | |||
1817 | 1865 | ||
1818 | if (mad_send_wr->status != IB_WC_SUCCESS ) | 1866 | if (mad_send_wr->status != IB_WC_SUCCESS ) |
1819 | mad_send_wc->status = mad_send_wr->status; | 1867 | mad_send_wc->status = mad_send_wr->status; |
1820 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, | 1868 | if (ret != IB_RMPP_RESULT_INTERNAL) |
1821 | mad_send_wc); | 1869 | mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, |
1870 | mad_send_wc); | ||
1822 | 1871 | ||
1823 | /* Release reference on agent taken when sending */ | 1872 | /* Release reference on agent taken when sending */ |
1824 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) | 1873 | if (atomic_dec_and_test(&mad_agent_priv->refcount)) |
1825 | wake_up(&mad_agent_priv->wait); | 1874 | wake_up(&mad_agent_priv->wait); |
1826 | 1875 | ||
1827 | kfree(mad_send_wr); | 1876 | kfree(mad_send_wr); |
1877 | return; | ||
1878 | done: | ||
1879 | spin_unlock_irqrestore(&mad_agent_priv->lock, flags); | ||
1828 | } | 1880 | } |
1829 | 1881 | ||
1830 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, | 1882 | static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, |
@@ -2036,7 +2088,9 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) | |||
2036 | 2088 | ||
2037 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, | 2089 | list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, |
2038 | agent_list) { | 2090 | agent_list) { |
2039 | if (mad_send_wr->wr_id == wr_id) | 2091 | if (is_data_mad(mad_agent_priv, |
2092 | mad_send_wr->send_wr.wr.ud.mad_hdr) && | ||
2093 | mad_send_wr->wr_id == wr_id) | ||
2040 | return mad_send_wr; | 2094 | return mad_send_wr; |
2041 | } | 2095 | } |
2042 | return NULL; | 2096 | return NULL; |
@@ -2118,7 +2172,9 @@ static void local_completions(void *data) | |||
2118 | local->mad_priv->header.recv_wc.wc = &wc; | 2172 | local->mad_priv->header.recv_wc.wc = &wc; |
2119 | local->mad_priv->header.recv_wc.mad_len = | 2173 | local->mad_priv->header.recv_wc.mad_len = |
2120 | sizeof(struct ib_mad); | 2174 | sizeof(struct ib_mad); |
2121 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list); | 2175 | INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list); |
2176 | list_add(&local->mad_priv->header.recv_wc.recv_buf.list, | ||
2177 | &local->mad_priv->header.recv_wc.rmpp_list); | ||
2122 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; | 2178 | local->mad_priv->header.recv_wc.recv_buf.grh = NULL; |
2123 | local->mad_priv->header.recv_wc.recv_buf.mad = | 2179 | local->mad_priv->header.recv_wc.recv_buf.mad = |
2124 | &local->mad_priv->mad.mad; | 2180 | &local->mad_priv->mad.mad; |
@@ -2166,7 +2222,21 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr) | |||
2166 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. | 2222 | mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. |
2167 | wr.ud.timeout_ms); | 2223 | wr.ud.timeout_ms); |
2168 | 2224 | ||
2169 | ret = ib_send_mad(mad_send_wr); | 2225 | if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { |
2226 | ret = ib_retry_rmpp(mad_send_wr); | ||
2227 | switch (ret) { | ||
2228 | case IB_RMPP_RESULT_UNHANDLED: | ||
2229 | ret = ib_send_mad(mad_send_wr); | ||
2230 | break; | ||
2231 | case IB_RMPP_RESULT_CONSUMED: | ||
2232 | ret = 0; | ||
2233 | break; | ||
2234 | default: | ||
2235 | ret = -ECOMM; | ||
2236 | break; | ||
2237 | } | ||
2238 | } else | ||
2239 | ret = ib_send_mad(mad_send_wr); | ||
2170 | 2240 | ||
2171 | if (!ret) { | 2241 | if (!ret) { |
2172 | mad_send_wr->refcount++; | 2242 | mad_send_wr->refcount++; |
@@ -2724,3 +2794,4 @@ static void __exit ib_mad_cleanup_module(void) | |||
2724 | 2794 | ||
2725 | module_init(ib_mad_init_module); | 2795 | module_init(ib_mad_init_module); |
2726 | module_exit(ib_mad_cleanup_module); | 2796 | module_exit(ib_mad_cleanup_module); |
2797 | |||