aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2005-07-27 14:45:37 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:13 -0400
commitfa619a77046bef30478697aba0553991033afb8e (patch)
tree3783af8ac0c6804c9f437f6dfb08ecda8ce92fc3 /drivers
parentd2082ee516200095956bd66279be4f62f4a5843d (diff)
[PATCH] IB: Add RMPP implementation
Add RMPP implementation. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/Makefile2
-rw-r--r--drivers/infiniband/core/mad.c163
-rw-r--r--drivers/infiniband/core/mad_priv.h28
-rw-r--r--drivers/infiniband/core/mad_rmpp.c765
-rw-r--r--drivers/infiniband/core/mad_rmpp.h58
5 files changed, 966 insertions, 50 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index e1a7cf3e8636..96b8eba95849 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o
6ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 6ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
7 device.o fmr_pool.o cache.o 7 device.o fmr_pool.o cache.o
8 8
9ib_mad-y := mad.o smi.o agent.o 9ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
10 10
11ib_sa-y := sa_query.o 11ib_sa-y := sa_query.o
12 12
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 26e2b59ce5a6..b97e210ce9c8 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,12 +31,12 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 32 * SOFTWARE.
31 * 33 *
32 * $Id: mad.c 1389 2004-12-27 22:56:47Z roland $ 34 * $Id: mad.c 2817 2005-07-07 11:29:26Z halr $
33 */ 35 */
34
35#include <linux/dma-mapping.h> 36#include <linux/dma-mapping.h>
36 37
37#include "mad_priv.h" 38#include "mad_priv.h"
39#include "mad_rmpp.h"
38#include "smi.h" 40#include "smi.h"
39#include "agent.h" 41#include "agent.h"
40 42
@@ -45,6 +47,7 @@ MODULE_AUTHOR("Sean Hefty");
45 47
46 48
47kmem_cache_t *ib_mad_cache; 49kmem_cache_t *ib_mad_cache;
50
48static struct list_head ib_mad_port_list; 51static struct list_head ib_mad_port_list;
49static u32 ib_mad_client_id = 0; 52static u32 ib_mad_client_id = 0;
50 53
@@ -62,8 +65,6 @@ static struct ib_mad_agent_private *find_mad_agent(
62static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 65static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
63 struct ib_mad_private *mad); 66 struct ib_mad_private *mad);
64static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv); 67static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
65static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
66 struct ib_mad_send_wc *mad_send_wc);
67static void timeout_sends(void *data); 68static void timeout_sends(void *data);
68static void local_completions(void *data); 69static void local_completions(void *data);
69static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, 70static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
@@ -195,8 +196,8 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
195 if (qpn == -1) 196 if (qpn == -1)
196 goto error1; 197 goto error1;
197 198
198 if (rmpp_version) 199 if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION)
199 goto error1; /* XXX: until RMPP implemented */ 200 goto error1;
200 201
201 /* Validate MAD registration request if supplied */ 202 /* Validate MAD registration request if supplied */
202 if (mad_reg_req) { 203 if (mad_reg_req) {
@@ -281,7 +282,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
281 /* Now, fill in the various structures */ 282 /* Now, fill in the various structures */
282 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; 283 mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
283 mad_agent_priv->reg_req = reg_req; 284 mad_agent_priv->reg_req = reg_req;
284 mad_agent_priv->rmpp_version = rmpp_version; 285 mad_agent_priv->agent.rmpp_version = rmpp_version;
285 mad_agent_priv->agent.device = device; 286 mad_agent_priv->agent.device = device;
286 mad_agent_priv->agent.recv_handler = recv_handler; 287 mad_agent_priv->agent.recv_handler = recv_handler;
287 mad_agent_priv->agent.send_handler = send_handler; 288 mad_agent_priv->agent.send_handler = send_handler;
@@ -341,6 +342,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
341 INIT_LIST_HEAD(&mad_agent_priv->send_list); 342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
342 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
343 INIT_LIST_HEAD(&mad_agent_priv->done_list); 344 INIT_LIST_HEAD(&mad_agent_priv->done_list);
345 INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 346 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list); 347 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions, 348 INIT_WORK(&mad_agent_priv->local_work, local_completions,
@@ -502,6 +504,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
502 spin_unlock_irqrestore(&port_priv->reg_lock, flags); 504 spin_unlock_irqrestore(&port_priv->reg_lock, flags);
503 505
504 flush_workqueue(port_priv->wq); 506 flush_workqueue(port_priv->wq);
507 ib_cancel_rmpp_recvs(mad_agent_priv);
505 508
506 atomic_dec(&mad_agent_priv->refcount); 509 atomic_dec(&mad_agent_priv->refcount);
507 wait_event(mad_agent_priv->wait, 510 wait_event(mad_agent_priv->wait,
@@ -786,12 +789,15 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
786 int buf_size; 789 int buf_size;
787 void *buf; 790 void *buf;
788 791
789 if (rmpp_active)
790 return ERR_PTR(-EINVAL); /* until RMPP implemented */
791 mad_agent_priv = container_of(mad_agent, 792 mad_agent_priv = container_of(mad_agent,
792 struct ib_mad_agent_private, agent); 793 struct ib_mad_agent_private, agent);
793 buf_size = get_buf_length(hdr_len, data_len); 794 buf_size = get_buf_length(hdr_len, data_len);
794 795
796 if ((!mad_agent->rmpp_version &&
797 (rmpp_active || buf_size > sizeof(struct ib_mad))) ||
798 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
799 return ERR_PTR(-EINVAL);
800
795 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); 801 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask);
796 if (!buf) 802 if (!buf)
797 return ERR_PTR(-ENOMEM); 803 return ERR_PTR(-ENOMEM);
@@ -816,6 +822,18 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
816 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; 822 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
817 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 823 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
818 send_buf->send_wr.wr.ud.pkey_index = pkey_index; 824 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
825
826 if (rmpp_active) {
827 struct ib_rmpp_mad *rmpp_mad;
828 rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad;
829 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
830 offsetof(struct ib_rmpp_mad, data) + data_len);
831 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
832 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
833 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
834 IB_MGMT_RMPP_FLAG_ACTIVE);
835 }
836
819 send_buf->mad_agent = mad_agent; 837 send_buf->mad_agent = mad_agent;
820 atomic_inc(&mad_agent_priv->refcount); 838 atomic_inc(&mad_agent_priv->refcount);
821 return send_buf; 839 return send_buf;
@@ -839,7 +857,7 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
839} 857}
840EXPORT_SYMBOL(ib_free_send_mad); 858EXPORT_SYMBOL(ib_free_send_mad);
841 859
842static int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 860int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
843{ 861{
844 struct ib_mad_qp_info *qp_info; 862 struct ib_mad_qp_info *qp_info;
845 struct ib_send_wr *bad_send_wr; 863 struct ib_send_wr *bad_send_wr;
@@ -940,13 +958,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
940 ret = -ENOMEM; 958 ret = -ENOMEM;
941 goto error2; 959 goto error2;
942 } 960 }
961 memset(mad_send_wr, 0, sizeof *mad_send_wr);
943 962
944 mad_send_wr->send_wr = *send_wr; 963 mad_send_wr->send_wr = *send_wr;
945 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 964 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
946 memcpy(mad_send_wr->sg_list, send_wr->sg_list, 965 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
947 sizeof *send_wr->sg_list * send_wr->num_sge); 966 sizeof *send_wr->sg_list * send_wr->num_sge);
948 mad_send_wr->wr_id = mad_send_wr->send_wr.wr_id; 967 mad_send_wr->wr_id = send_wr->wr_id;
949 mad_send_wr->send_wr.next = NULL;
950 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; 968 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
951 mad_send_wr->mad_agent_priv = mad_agent_priv; 969 mad_send_wr->mad_agent_priv = mad_agent_priv;
952 /* Timeout will be updated after send completes */ 970 /* Timeout will be updated after send completes */
@@ -964,8 +982,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
964 &mad_agent_priv->send_list); 982 &mad_agent_priv->send_list);
965 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 983 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
966 984
967 ret = ib_send_mad(mad_send_wr); 985 if (mad_agent_priv->agent.rmpp_version) {
968 if (ret) { 986 ret = ib_send_rmpp_mad(mad_send_wr);
987 if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
988 ret = ib_send_mad(mad_send_wr);
989 } else
990 ret = ib_send_mad(mad_send_wr);
991 if (ret < 0) {
969 /* Fail send request */ 992 /* Fail send request */
970 spin_lock_irqsave(&mad_agent_priv->lock, flags); 993 spin_lock_irqsave(&mad_agent_priv->lock, flags);
971 list_del(&mad_send_wr->agent_list); 994 list_del(&mad_send_wr->agent_list);
@@ -991,31 +1014,25 @@ EXPORT_SYMBOL(ib_post_send_mad);
991 */ 1014 */
992void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc) 1015void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
993{ 1016{
994 struct ib_mad_recv_buf *entry; 1017 struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
995 struct ib_mad_private_header *mad_priv_hdr; 1018 struct ib_mad_private_header *mad_priv_hdr;
996 struct ib_mad_private *priv; 1019 struct ib_mad_private *priv;
1020 struct list_head free_list;
997 1021
998 mad_priv_hdr = container_of(mad_recv_wc, 1022 INIT_LIST_HEAD(&free_list);
999 struct ib_mad_private_header, 1023 list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1000 recv_wc);
1001 priv = container_of(mad_priv_hdr, struct ib_mad_private, header);
1002 1024
1003 /* 1025 list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1004 * Walk receive buffer list associated with this WC 1026 &free_list, list) {
1005 * No need to remove them from list of receive buffers 1027 mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1006 */ 1028 recv_buf);
1007 list_for_each_entry(entry, &mad_recv_wc->recv_buf.list, list) {
1008 /* Free previous receive buffer */
1009 kmem_cache_free(ib_mad_cache, priv);
1010 mad_priv_hdr = container_of(mad_recv_wc, 1029 mad_priv_hdr = container_of(mad_recv_wc,
1011 struct ib_mad_private_header, 1030 struct ib_mad_private_header,
1012 recv_wc); 1031 recv_wc);
1013 priv = container_of(mad_priv_hdr, struct ib_mad_private, 1032 priv = container_of(mad_priv_hdr, struct ib_mad_private,
1014 header); 1033 header);
1034 kmem_cache_free(ib_mad_cache, priv);
1015 } 1035 }
1016
1017 /* Free last buffer */
1018 kmem_cache_free(ib_mad_cache, priv);
1019} 1036}
1020EXPORT_SYMBOL(ib_free_recv_mad); 1037EXPORT_SYMBOL(ib_free_recv_mad);
1021 1038
@@ -1524,9 +1541,20 @@ out:
1524 return valid; 1541 return valid;
1525} 1542}
1526 1543
1527static struct ib_mad_send_wr_private* 1544static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1528find_send_req(struct ib_mad_agent_private *mad_agent_priv, 1545 struct ib_mad_hdr *mad_hdr)
1529 u64 tid) 1546{
1547 struct ib_rmpp_mad *rmpp_mad;
1548
1549 rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1550 return !mad_agent_priv->agent.rmpp_version ||
1551 !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1552 IB_MGMT_RMPP_FLAG_ACTIVE) ||
1553 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1554}
1555
1556struct ib_mad_send_wr_private*
1557ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid)
1530{ 1558{
1531 struct ib_mad_send_wr_private *mad_send_wr; 1559 struct ib_mad_send_wr_private *mad_send_wr;
1532 1560
@@ -1542,7 +1570,9 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1542 */ 1570 */
1543 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1571 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1544 agent_list) { 1572 agent_list) {
1545 if (mad_send_wr->tid == tid && mad_send_wr->timeout) { 1573 if (is_data_mad(mad_agent_priv,
1574 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
1575 mad_send_wr->tid == tid && mad_send_wr->timeout) {
1546 /* Verify request has not been canceled */ 1576 /* Verify request has not been canceled */
1547 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1577 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1548 mad_send_wr : NULL; 1578 mad_send_wr : NULL;
@@ -1551,7 +1581,7 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1551 return NULL; 1581 return NULL;
1552} 1582}
1553 1583
1554static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr) 1584void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1555{ 1585{
1556 mad_send_wr->timeout = 0; 1586 mad_send_wr->timeout = 0;
1557 if (mad_send_wr->refcount == 1) { 1587 if (mad_send_wr->refcount == 1) {
@@ -1569,12 +1599,23 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1569 unsigned long flags; 1599 unsigned long flags;
1570 u64 tid; 1600 u64 tid;
1571 1601
1572 INIT_LIST_HEAD(&mad_recv_wc->recv_buf.list); 1602 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1603 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1604 if (mad_agent_priv->agent.rmpp_version) {
1605 mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1606 mad_recv_wc);
1607 if (!mad_recv_wc) {
1608 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1609 wake_up(&mad_agent_priv->wait);
1610 return;
1611 }
1612 }
1613
1573 /* Complete corresponding request */ 1614 /* Complete corresponding request */
1574 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1615 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1575 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; 1616 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1576 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1617 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1577 mad_send_wr = find_send_req(mad_agent_priv, tid); 1618 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid);
1578 if (!mad_send_wr) { 1619 if (!mad_send_wr) {
1579 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1620 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1580 ib_free_recv_mad(mad_recv_wc); 1621 ib_free_recv_mad(mad_recv_wc);
@@ -1582,7 +1623,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1582 wake_up(&mad_agent_priv->wait); 1623 wake_up(&mad_agent_priv->wait);
1583 return; 1624 return;
1584 } 1625 }
1585 ib_mark_req_done(mad_send_wr); 1626 ib_mark_mad_done(mad_send_wr);
1586 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1627 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1587 1628
1588 /* Defined behavior is to complete response before request */ 1629 /* Defined behavior is to complete response before request */
@@ -1787,14 +1828,22 @@ void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
1787/* 1828/*
1788 * Process a send work completion 1829 * Process a send work completion
1789 */ 1830 */
1790static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 1831void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1791 struct ib_mad_send_wc *mad_send_wc) 1832 struct ib_mad_send_wc *mad_send_wc)
1792{ 1833{
1793 struct ib_mad_agent_private *mad_agent_priv; 1834 struct ib_mad_agent_private *mad_agent_priv;
1794 unsigned long flags; 1835 unsigned long flags;
1836 int ret;
1795 1837
1796 mad_agent_priv = mad_send_wr->mad_agent_priv; 1838 mad_agent_priv = mad_send_wr->mad_agent_priv;
1797 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1839 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1840 if (mad_agent_priv->agent.rmpp_version) {
1841 ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
1842 if (ret == IB_RMPP_RESULT_CONSUMED)
1843 goto done;
1844 } else
1845 ret = IB_RMPP_RESULT_UNHANDLED;
1846
1798 if (mad_send_wc->status != IB_WC_SUCCESS && 1847 if (mad_send_wc->status != IB_WC_SUCCESS &&
1799 mad_send_wr->status == IB_WC_SUCCESS) { 1848 mad_send_wr->status == IB_WC_SUCCESS) {
1800 mad_send_wr->status = mad_send_wc->status; 1849 mad_send_wr->status = mad_send_wc->status;
@@ -1806,8 +1855,7 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1806 mad_send_wr->status == IB_WC_SUCCESS) { 1855 mad_send_wr->status == IB_WC_SUCCESS) {
1807 wait_for_response(mad_send_wr); 1856 wait_for_response(mad_send_wr);
1808 } 1857 }
1809 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1858 goto done;
1810 return;
1811 } 1859 }
1812 1860
1813 /* Remove send from MAD agent and notify client of completion */ 1861 /* Remove send from MAD agent and notify client of completion */
@@ -1817,14 +1865,18 @@ static void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1817 1865
1818 if (mad_send_wr->status != IB_WC_SUCCESS ) 1866 if (mad_send_wr->status != IB_WC_SUCCESS )
1819 mad_send_wc->status = mad_send_wr->status; 1867 mad_send_wc->status = mad_send_wr->status;
1820 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 1868 if (ret != IB_RMPP_RESULT_INTERNAL)
1821 mad_send_wc); 1869 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1870 mad_send_wc);
1822 1871
1823 /* Release reference on agent taken when sending */ 1872 /* Release reference on agent taken when sending */
1824 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1873 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1825 wake_up(&mad_agent_priv->wait); 1874 wake_up(&mad_agent_priv->wait);
1826 1875
1827 kfree(mad_send_wr); 1876 kfree(mad_send_wr);
1877 return;
1878done:
1879 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1828} 1880}
1829 1881
1830static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv, 1882static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
@@ -2036,7 +2088,9 @@ find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id)
2036 2088
2037 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2089 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2038 agent_list) { 2090 agent_list) {
2039 if (mad_send_wr->wr_id == wr_id) 2091 if (is_data_mad(mad_agent_priv,
2092 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
2093 mad_send_wr->wr_id == wr_id)
2040 return mad_send_wr; 2094 return mad_send_wr;
2041 } 2095 }
2042 return NULL; 2096 return NULL;
@@ -2118,7 +2172,9 @@ static void local_completions(void *data)
2118 local->mad_priv->header.recv_wc.wc = &wc; 2172 local->mad_priv->header.recv_wc.wc = &wc;
2119 local->mad_priv->header.recv_wc.mad_len = 2173 local->mad_priv->header.recv_wc.mad_len =
2120 sizeof(struct ib_mad); 2174 sizeof(struct ib_mad);
2121 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.recv_buf.list); 2175 INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2176 list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2177 &local->mad_priv->header.recv_wc.rmpp_list);
2122 local->mad_priv->header.recv_wc.recv_buf.grh = NULL; 2178 local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2123 local->mad_priv->header.recv_wc.recv_buf.mad = 2179 local->mad_priv->header.recv_wc.recv_buf.mad =
2124 &local->mad_priv->mad.mad; 2180 &local->mad_priv->mad.mad;
@@ -2166,7 +2222,21 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2166 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. 2222 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr.
2167 wr.ud.timeout_ms); 2223 wr.ud.timeout_ms);
2168 2224
2169 ret = ib_send_mad(mad_send_wr); 2225 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2226 ret = ib_retry_rmpp(mad_send_wr);
2227 switch (ret) {
2228 case IB_RMPP_RESULT_UNHANDLED:
2229 ret = ib_send_mad(mad_send_wr);
2230 break;
2231 case IB_RMPP_RESULT_CONSUMED:
2232 ret = 0;
2233 break;
2234 default:
2235 ret = -ECOMM;
2236 break;
2237 }
2238 } else
2239 ret = ib_send_mad(mad_send_wr);
2170 2240
2171 if (!ret) { 2241 if (!ret) {
2172 mad_send_wr->refcount++; 2242 mad_send_wr->refcount++;
@@ -2724,3 +2794,4 @@ static void __exit ib_mad_cleanup_module(void)
2724 2794
2725module_init(ib_mad_init_module); 2795module_init(ib_mad_init_module);
2726module_exit(ib_mad_cleanup_module); 2796module_exit(ib_mad_cleanup_module);
2797
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index e5e37b5be387..568da10b05ab 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -1,5 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved. 2 * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
3 * 5 *
4 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +31,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 32 * SOFTWARE.
31 * 33 *
32 * $Id: mad_priv.h 1980 2005-03-11 22:33:53Z sean.hefty $ 34 * $Id: mad_priv.h 2730 2005-06-28 16:43:03Z sean.hefty $
33 */ 35 */
34 36
35#ifndef __IB_MAD_PRIV_H__ 37#ifndef __IB_MAD_PRIV_H__
@@ -97,10 +99,10 @@ struct ib_mad_agent_private {
97 unsigned long timeout; 99 unsigned long timeout;
98 struct list_head local_list; 100 struct list_head local_list;
99 struct work_struct local_work; 101 struct work_struct local_work;
102 struct list_head rmpp_list;
100 103
101 atomic_t refcount; 104 atomic_t refcount;
102 wait_queue_head_t wait; 105 wait_queue_head_t wait;
103 u8 rmpp_version;
104}; 106};
105 107
106struct ib_mad_snoop_private { 108struct ib_mad_snoop_private {
@@ -125,6 +127,14 @@ struct ib_mad_send_wr_private {
125 int retry; 127 int retry;
126 int refcount; 128 int refcount;
127 enum ib_wc_status status; 129 enum ib_wc_status status;
130
131 /* RMPP control */
132 int last_ack;
133 int seg_num;
134 int newwin;
135 int total_seg;
136 int data_offset;
137 int pad;
128}; 138};
129 139
130struct ib_mad_local_private { 140struct ib_mad_local_private {
@@ -135,7 +145,6 @@ struct ib_mad_local_private {
135 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 145 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
136 u64 wr_id; /* client WR ID */ 146 u64 wr_id; /* client WR ID */
137 u64 tid; 147 u64 tid;
138 int retries;
139}; 148};
140 149
141struct ib_mad_mgmt_method_table { 150struct ib_mad_mgmt_method_table {
@@ -198,4 +207,17 @@ struct ib_mad_port_private {
198 207
199extern kmem_cache_t *ib_mad_cache; 208extern kmem_cache_t *ib_mad_cache;
200 209
210int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
211
212struct ib_mad_send_wr_private *
213ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, u64 tid);
214
215void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
216 struct ib_mad_send_wc *mad_send_wc);
217
218void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
219
220void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
221 int timeout_ms);
222
201#endif /* __IB_MAD_PRIV_H__ */ 223#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
new file mode 100644
index 000000000000..8f1eb80e421f
--- /dev/null
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -0,0 +1,765 @@
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
34 */
35
36#include <linux/dma-mapping.h>
37
38#include "mad_priv.h"
39#include "mad_rmpp.h"
40
41enum rmpp_state {
42 RMPP_STATE_ACTIVE,
43 RMPP_STATE_TIMEOUT,
44 RMPP_STATE_COMPLETE
45};
46
47struct mad_rmpp_recv {
48 struct ib_mad_agent_private *agent;
49 struct list_head list;
50 struct work_struct timeout_work;
51 struct work_struct cleanup_work;
52 wait_queue_head_t wait;
53 enum rmpp_state state;
54 spinlock_t lock;
55 atomic_t refcount;
56
57 struct ib_ah *ah;
58 struct ib_mad_recv_wc *rmpp_wc;
59 struct ib_mad_recv_buf *cur_seg_buf;
60 int last_ack;
61 int seg_num;
62 int newwin;
63
64 u64 tid;
65 u32 src_qp;
66 u16 slid;
67 u8 mgmt_class;
68 u8 class_version;
69 u8 method;
70};
71
72static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
73{
74 atomic_dec(&rmpp_recv->refcount);
75 wait_event(rmpp_recv->wait, !atomic_read(&rmpp_recv->refcount));
76 ib_destroy_ah(rmpp_recv->ah);
77 kfree(rmpp_recv);
78}
79
80void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
81{
82 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
83 unsigned long flags;
84
85 spin_lock_irqsave(&agent->lock, flags);
86 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
87 cancel_delayed_work(&rmpp_recv->timeout_work);
88 cancel_delayed_work(&rmpp_recv->cleanup_work);
89 }
90 spin_unlock_irqrestore(&agent->lock, flags);
91
92 flush_workqueue(agent->qp_info->port_priv->wq);
93
94 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
95 &agent->rmpp_list, list) {
96 list_del(&rmpp_recv->list);
97 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
98 ib_free_recv_mad(rmpp_recv->rmpp_wc);
99 destroy_rmpp_recv(rmpp_recv);
100 }
101}
102
103static void recv_timeout_handler(void *data)
104{
105 struct mad_rmpp_recv *rmpp_recv = data;
106 struct ib_mad_recv_wc *rmpp_wc;
107 unsigned long flags;
108
109 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
110 if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
111 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
112 return;
113 }
114 rmpp_recv->state = RMPP_STATE_TIMEOUT;
115 list_del(&rmpp_recv->list);
116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
117
118 /* TODO: send abort. */
119 rmpp_wc = rmpp_recv->rmpp_wc;
120 destroy_rmpp_recv(rmpp_recv);
121 ib_free_recv_mad(rmpp_wc);
122}
123
124static void recv_cleanup_handler(void *data)
125{
126 struct mad_rmpp_recv *rmpp_recv = data;
127 unsigned long flags;
128
129 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
130 list_del(&rmpp_recv->list);
131 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
132 destroy_rmpp_recv(rmpp_recv);
133}
134
135static struct mad_rmpp_recv *
136create_rmpp_recv(struct ib_mad_agent_private *agent,
137 struct ib_mad_recv_wc *mad_recv_wc)
138{
139 struct mad_rmpp_recv *rmpp_recv;
140 struct ib_mad_hdr *mad_hdr;
141
142 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
143 if (!rmpp_recv)
144 return NULL;
145
146 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
147 mad_recv_wc->wc,
148 mad_recv_wc->recv_buf.grh,
149 agent->agent.port_num);
150 if (IS_ERR(rmpp_recv->ah))
151 goto error;
152
153 rmpp_recv->agent = agent;
154 init_waitqueue_head(&rmpp_recv->wait);
155 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv);
156 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv);
157 spin_lock_init(&rmpp_recv->lock);
158 rmpp_recv->state = RMPP_STATE_ACTIVE;
159 atomic_set(&rmpp_recv->refcount, 1);
160
161 rmpp_recv->rmpp_wc = mad_recv_wc;
162 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
163 rmpp_recv->newwin = 1;
164 rmpp_recv->seg_num = 1;
165 rmpp_recv->last_ack = 0;
166
167 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
168 rmpp_recv->tid = mad_hdr->tid;
169 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
170 rmpp_recv->slid = mad_recv_wc->wc->slid;
171 rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
172 rmpp_recv->class_version = mad_hdr->class_version;
173 rmpp_recv->method = mad_hdr->method;
174 return rmpp_recv;
175
176error: kfree(rmpp_recv);
177 return NULL;
178}
179
180static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
181{
182 if (atomic_dec_and_test(&rmpp_recv->refcount))
183 wake_up(&rmpp_recv->wait);
184}
185
186static struct mad_rmpp_recv *
187find_rmpp_recv(struct ib_mad_agent_private *agent,
188 struct ib_mad_recv_wc *mad_recv_wc)
189{
190 struct mad_rmpp_recv *rmpp_recv;
191 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
192
193 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
194 if (rmpp_recv->tid == mad_hdr->tid &&
195 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
196 rmpp_recv->slid == mad_recv_wc->wc->slid &&
197 rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
198 rmpp_recv->class_version == mad_hdr->class_version &&
199 rmpp_recv->method == mad_hdr->method)
200 return rmpp_recv;
201 }
202 return NULL;
203}
204
205static struct mad_rmpp_recv *
206acquire_rmpp_recv(struct ib_mad_agent_private *agent,
207 struct ib_mad_recv_wc *mad_recv_wc)
208{
209 struct mad_rmpp_recv *rmpp_recv;
210 unsigned long flags;
211
212 spin_lock_irqsave(&agent->lock, flags);
213 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
214 if (rmpp_recv)
215 atomic_inc(&rmpp_recv->refcount);
216 spin_unlock_irqrestore(&agent->lock, flags);
217 return rmpp_recv;
218}
219
220static struct mad_rmpp_recv *
221insert_rmpp_recv(struct ib_mad_agent_private *agent,
222 struct mad_rmpp_recv *rmpp_recv)
223{
224 struct mad_rmpp_recv *cur_rmpp_recv;
225
226 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
227 if (!cur_rmpp_recv)
228 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
229
230 return cur_rmpp_recv;
231}
232
233static int data_offset(u8 mgmt_class)
234{
235 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
236 return offsetof(struct ib_sa_mad, data);
237 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
238 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
239 return offsetof(struct ib_vendor_mad, data);
240 else
241 return offsetof(struct ib_rmpp_mad, data);
242}
243
244static void format_ack(struct ib_rmpp_mad *ack,
245 struct ib_rmpp_mad *data,
246 struct mad_rmpp_recv *rmpp_recv)
247{
248 unsigned long flags;
249
250 memcpy(&ack->mad_hdr, &data->mad_hdr,
251 data_offset(data->mad_hdr.mgmt_class));
252
253 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
254 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
255 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
256
257 spin_lock_irqsave(&rmpp_recv->lock, flags);
258 rmpp_recv->last_ack = rmpp_recv->seg_num;
259 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
260 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
261 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
262}
263
264static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
265 struct ib_mad_recv_wc *recv_wc)
266{
267 struct ib_mad_send_buf *msg;
268 struct ib_send_wr *bad_send_wr;
269 int hdr_len, ret;
270
271 hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr);
272 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
273 recv_wc->wc->pkey_index, rmpp_recv->ah, 1,
274 hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len,
275 GFP_KERNEL);
276 if (!msg)
277 return;
278
279 format_ack((struct ib_rmpp_mad *) msg->mad,
280 (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
281 ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr,
282 &bad_send_wr);
283 if (ret)
284 ib_free_send_mad(msg);
285}
286
287static inline int get_last_flag(struct ib_mad_recv_buf *seg)
288{
289 struct ib_rmpp_mad *rmpp_mad;
290
291 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
292 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
293}
294
295static inline int get_seg_num(struct ib_mad_recv_buf *seg)
296{
297 struct ib_rmpp_mad *rmpp_mad;
298
299 rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
300 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
301}
302
303static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
304 struct ib_mad_recv_buf *seg)
305{
306 if (seg->list.next == rmpp_list)
307 return NULL;
308
309 return container_of(seg->list.next, struct ib_mad_recv_buf, list);
310}
311
312static inline int window_size(struct ib_mad_agent_private *agent)
313{
314 return max(agent->qp_info->recv_queue.max_active >> 3, 1);
315}
316
317static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
318 int seg_num)
319{
320 struct ib_mad_recv_buf *seg_buf;
321 int cur_seg_num;
322
323 list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
324 cur_seg_num = get_seg_num(seg_buf);
325 if (seg_num > cur_seg_num)
326 return seg_buf;
327 if (seg_num == cur_seg_num)
328 break;
329 }
330 return NULL;
331}
332
333static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
334 struct ib_mad_recv_buf *new_buf)
335{
336 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
337
338 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
339 rmpp_recv->cur_seg_buf = new_buf;
340 rmpp_recv->seg_num++;
341 new_buf = get_next_seg(rmpp_list, new_buf);
342 }
343}
344
345static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
346{
347 struct ib_rmpp_mad *rmpp_mad;
348 int hdr_size, data_size, pad;
349
350 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
351
352 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class);
353 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
354 pad = data_size - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
355 if (pad > data_size || pad < 0)
356 pad = 0;
357
358 return hdr_size + rmpp_recv->seg_num * data_size - pad;
359}
360
361static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
362{
363 struct ib_mad_recv_wc *rmpp_wc;
364
365 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
366 if (rmpp_recv->seg_num > 1)
367 cancel_delayed_work(&rmpp_recv->timeout_work);
368
369 rmpp_wc = rmpp_recv->rmpp_wc;
370 rmpp_wc->mad_len = get_mad_len(rmpp_recv);
371 /* 10 seconds until we can find the packet lifetime */
372 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
373 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
374 return rmpp_wc;
375}
376
377void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf)
378{
379 struct ib_mad_recv_buf *seg_buf;
380 struct ib_rmpp_mad *rmpp_mad;
381 void *data;
382 int size, len, offset;
383 u8 flags;
384
385 len = mad_recv_wc->mad_len;
386 if (len <= sizeof(struct ib_mad)) {
387 memcpy(buf, mad_recv_wc->recv_buf.mad, len);
388 return;
389 }
390
391 offset = data_offset(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
392
393 list_for_each_entry(seg_buf, &mad_recv_wc->rmpp_list, list) {
394 rmpp_mad = (struct ib_rmpp_mad *)seg_buf->mad;
395 flags = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr);
396
397 if (flags & IB_MGMT_RMPP_FLAG_FIRST) {
398 data = rmpp_mad;
399 size = sizeof(*rmpp_mad);
400 } else {
401 data = (void *) rmpp_mad + offset;
402 if (flags & IB_MGMT_RMPP_FLAG_LAST)
403 size = len;
404 else
405 size = sizeof(*rmpp_mad) - offset;
406 }
407
408 memcpy(buf, data, size);
409 len -= size;
410 buf += size;
411 }
412}
413EXPORT_SYMBOL(ib_coalesce_recv_mad);
414
415static struct ib_mad_recv_wc *
416continue_rmpp(struct ib_mad_agent_private *agent,
417 struct ib_mad_recv_wc *mad_recv_wc)
418{
419 struct mad_rmpp_recv *rmpp_recv;
420 struct ib_mad_recv_buf *prev_buf;
421 struct ib_mad_recv_wc *done_wc;
422 int seg_num;
423 unsigned long flags;
424
425 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
426 if (!rmpp_recv)
427 goto drop1;
428
429 seg_num = get_seg_num(&mad_recv_wc->recv_buf);
430
431 spin_lock_irqsave(&rmpp_recv->lock, flags);
432 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
433 (seg_num > rmpp_recv->newwin))
434 goto drop3;
435
436 if ((seg_num <= rmpp_recv->last_ack) ||
437 (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
438 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
439 ack_recv(rmpp_recv, mad_recv_wc);
440 goto drop2;
441 }
442
443 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
444 if (!prev_buf)
445 goto drop3;
446
447 done_wc = NULL;
448 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
449 if (rmpp_recv->cur_seg_buf == prev_buf) {
450 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
451 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
452 rmpp_recv->state = RMPP_STATE_COMPLETE;
453 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
454 done_wc = complete_rmpp(rmpp_recv);
455 goto out;
456 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
457 rmpp_recv->newwin += window_size(agent);
458 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
459 ack_recv(rmpp_recv, mad_recv_wc);
460 goto out;
461 }
462 }
463 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
464out:
465 deref_rmpp_recv(rmpp_recv);
466 return done_wc;
467
468drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags);
469drop2: deref_rmpp_recv(rmpp_recv);
470drop1: ib_free_recv_mad(mad_recv_wc);
471 return NULL;
472}
473
474static struct ib_mad_recv_wc *
475start_rmpp(struct ib_mad_agent_private *agent,
476 struct ib_mad_recv_wc *mad_recv_wc)
477{
478 struct mad_rmpp_recv *rmpp_recv;
479 unsigned long flags;
480
481 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
482 if (!rmpp_recv) {
483 ib_free_recv_mad(mad_recv_wc);
484 return NULL;
485 }
486
487 spin_lock_irqsave(&agent->lock, flags);
488 if (insert_rmpp_recv(agent, rmpp_recv)) {
489 spin_unlock_irqrestore(&agent->lock, flags);
490 /* duplicate first MAD */
491 destroy_rmpp_recv(rmpp_recv);
492 return continue_rmpp(agent, mad_recv_wc);
493 }
494 atomic_inc(&rmpp_recv->refcount);
495
496 if (get_last_flag(&mad_recv_wc->recv_buf)) {
497 rmpp_recv->state = RMPP_STATE_COMPLETE;
498 spin_unlock_irqrestore(&agent->lock, flags);
499 complete_rmpp(rmpp_recv);
500 } else {
501 spin_unlock_irqrestore(&agent->lock, flags);
502 /* 40 seconds until we can find the packet lifetimes */
503 queue_delayed_work(agent->qp_info->port_priv->wq,
504 &rmpp_recv->timeout_work,
505 msecs_to_jiffies(40000));
506 rmpp_recv->newwin += window_size(agent);
507 ack_recv(rmpp_recv, mad_recv_wc);
508 mad_recv_wc = NULL;
509 }
510 deref_rmpp_recv(rmpp_recv);
511 return mad_recv_wc;
512}
513
514static inline u64 get_seg_addr(struct ib_mad_send_wr_private *mad_send_wr)
515{
516 return mad_send_wr->sg_list[0].addr + mad_send_wr->data_offset +
517 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset) *
518 (mad_send_wr->seg_num - 1);
519}
520
521static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
522{
523 struct ib_rmpp_mad *rmpp_mad;
524 int timeout;
525
526 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
527 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
528 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num);
529
530 if (mad_send_wr->seg_num == 1) {
531 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
532 rmpp_mad->rmpp_hdr.paylen_newwin =
533 cpu_to_be32(mad_send_wr->total_seg *
534 (sizeof(struct ib_rmpp_mad) -
535 offsetof(struct ib_rmpp_mad, data)));
536 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad);
537 } else {
538 mad_send_wr->send_wr.num_sge = 2;
539 mad_send_wr->sg_list[0].length = mad_send_wr->data_offset;
540 mad_send_wr->sg_list[1].addr = get_seg_addr(mad_send_wr);
541 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) -
542 mad_send_wr->data_offset;
543 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey;
544 }
545
546 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
547 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
548 rmpp_mad->rmpp_hdr.paylen_newwin =
549 cpu_to_be32(sizeof(struct ib_rmpp_mad) -
550 offsetof(struct ib_rmpp_mad, data) -
551 mad_send_wr->pad);
552 }
553
554 /* 2 seconds for an ACK until we can find the packet lifetime */
555 timeout = mad_send_wr->send_wr.wr.ud.timeout_ms;
556 if (!timeout || timeout > 2000)
557 mad_send_wr->timeout = msecs_to_jiffies(2000);
558 mad_send_wr->seg_num++;
559 return ib_send_mad(mad_send_wr);
560}
561
562static void process_rmpp_ack(struct ib_mad_agent_private *agent,
563 struct ib_mad_recv_wc *mad_recv_wc)
564{
565 struct ib_mad_send_wr_private *mad_send_wr;
566 struct ib_rmpp_mad *rmpp_mad;
567 unsigned long flags;
568 int seg_num, newwin, ret;
569
570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
571 if (rmpp_mad->rmpp_hdr.rmpp_status)
572 return;
573
574 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
575 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
576
577 spin_lock_irqsave(&agent->lock, flags);
578 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid);
579 if (!mad_send_wr)
580 goto out; /* Unmatched ACK */
581
582 if ((mad_send_wr->last_ack == mad_send_wr->total_seg) ||
583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
584 goto out; /* Send is already done */
585
586 if (seg_num > mad_send_wr->total_seg)
587 goto out; /* Bad ACK */
588
589 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
590 goto out; /* Old ACK */
591
592 if (seg_num > mad_send_wr->last_ack) {
593 mad_send_wr->last_ack = seg_num;
594 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries;
595 }
596 mad_send_wr->newwin = newwin;
597 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
598 /* If no response is expected, the ACK completes the send */
599 if (!mad_send_wr->send_wr.wr.ud.timeout_ms) {
600 struct ib_mad_send_wc wc;
601
602 ib_mark_mad_done(mad_send_wr);
603 spin_unlock_irqrestore(&agent->lock, flags);
604
605 wc.status = IB_WC_SUCCESS;
606 wc.vendor_err = 0;
607 wc.wr_id = mad_send_wr->wr_id;
608 ib_mad_complete_send_wr(mad_send_wr, &wc);
609 return;
610 }
611 if (mad_send_wr->refcount == 1)
612 ib_reset_mad_timeout(mad_send_wr, mad_send_wr->
613 send_wr.wr.ud.timeout_ms);
614 } else if (mad_send_wr->refcount == 1 &&
615 mad_send_wr->seg_num < mad_send_wr->newwin &&
616 mad_send_wr->seg_num <= mad_send_wr->total_seg) {
617 /* Send failure will just result in a timeout/retry */
618 ret = send_next_seg(mad_send_wr);
619 if (ret)
620 goto out;
621
622 mad_send_wr->refcount++;
623 list_del(&mad_send_wr->agent_list);
624 list_add_tail(&mad_send_wr->agent_list,
625 &mad_send_wr->mad_agent_priv->send_list);
626 }
627out:
628 spin_unlock_irqrestore(&agent->lock, flags);
629}
630
631struct ib_mad_recv_wc *
632ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
633 struct ib_mad_recv_wc *mad_recv_wc)
634{
635 struct ib_rmpp_mad *rmpp_mad;
636
637 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
639 return mad_recv_wc;
640
641 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION)
642 goto out;
643
644 switch (rmpp_mad->rmpp_hdr.rmpp_type) {
645 case IB_MGMT_RMPP_TYPE_DATA:
646 if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1))
647 return start_rmpp(agent, mad_recv_wc);
648 else
649 return continue_rmpp(agent, mad_recv_wc);
650 case IB_MGMT_RMPP_TYPE_ACK:
651 process_rmpp_ack(agent, mad_recv_wc);
652 break;
653 case IB_MGMT_RMPP_TYPE_STOP:
654 case IB_MGMT_RMPP_TYPE_ABORT:
655 /* TODO: process_rmpp_nack(agent, mad_recv_wc); */
656 break;
657 default:
658 break;
659 }
660out:
661 ib_free_recv_mad(mad_recv_wc);
662 return NULL;
663}
664
665int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
666{
667 struct ib_rmpp_mad *rmpp_mad;
668 int i, total_len, ret;
669
670 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
671 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
672 IB_MGMT_RMPP_FLAG_ACTIVE))
673 return IB_RMPP_RESULT_UNHANDLED;
674
675 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
676 return IB_RMPP_RESULT_INTERNAL;
677
678 if (mad_send_wr->send_wr.num_sge > 1)
679 return -EINVAL; /* TODO: support num_sge > 1 */
680
681 mad_send_wr->seg_num = 1;
682 mad_send_wr->newwin = 1;
683 mad_send_wr->data_offset = data_offset(rmpp_mad->mad_hdr.mgmt_class);
684
685 total_len = 0;
686 for (i = 0; i < mad_send_wr->send_wr.num_sge; i++)
687 total_len += mad_send_wr->send_wr.sg_list[i].length;
688
689 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) /
690 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset);
691 mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) -
692 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
693
694 /* We need to wait for the final ACK even if there isn't a response */
695 mad_send_wr->refcount += (mad_send_wr->timeout == 0);
696 ret = send_next_seg(mad_send_wr);
697 if (!ret)
698 return IB_RMPP_RESULT_CONSUMED;
699 return ret;
700}
701
702int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
703 struct ib_mad_send_wc *mad_send_wc)
704{
705 struct ib_rmpp_mad *rmpp_mad;
706 struct ib_mad_send_buf *msg;
707 int ret;
708
709 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
710 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
711 IB_MGMT_RMPP_FLAG_ACTIVE))
712 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
713
714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
715 msg = (struct ib_mad_send_buf *) (unsigned long)
716 mad_send_wc->wr_id;
717 ib_free_send_mad(msg);
718 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */
719 }
720
721 if (mad_send_wc->status != IB_WC_SUCCESS ||
722 mad_send_wr->status != IB_WC_SUCCESS)
723 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
724
725 if (!mad_send_wr->timeout)
726 return IB_RMPP_RESULT_PROCESSED; /* Response received */
727
728 if (mad_send_wr->last_ack == mad_send_wr->total_seg) {
729 mad_send_wr->timeout =
730 msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms);
731 return IB_RMPP_RESULT_PROCESSED; /* Send done */
732 }
733
734 if (mad_send_wr->seg_num > mad_send_wr->newwin ||
735 mad_send_wr->seg_num > mad_send_wr->total_seg)
736 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
737
738 ret = send_next_seg(mad_send_wr);
739 if (ret) {
740 mad_send_wc->status = IB_WC_GENERAL_ERR;
741 return IB_RMPP_RESULT_PROCESSED;
742 }
743 return IB_RMPP_RESULT_CONSUMED;
744}
745
746int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
747{
748 struct ib_rmpp_mad *rmpp_mad;
749 int ret;
750
751 rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr;
752 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
753 IB_MGMT_RMPP_FLAG_ACTIVE))
754 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
755
756 if (mad_send_wr->last_ack == mad_send_wr->total_seg)
757 return IB_RMPP_RESULT_PROCESSED;
758
759 mad_send_wr->seg_num = mad_send_wr->last_ack + 1;
760 ret = send_next_seg(mad_send_wr);
761 if (ret)
762 return IB_RMPP_RESULT_PROCESSED;
763
764 return IB_RMPP_RESULT_CONSUMED;
765}
diff --git a/drivers/infiniband/core/mad_rmpp.h b/drivers/infiniband/core/mad_rmpp.h
new file mode 100644
index 000000000000..c4924dfb8e75
--- /dev/null
+++ b/drivers/infiniband/core/mad_rmpp.h
@@ -0,0 +1,58 @@
1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: mad_rmpp.h 1921 2005-02-25 22:58:44Z sean.hefty $
33 */
34
35#ifndef __MAD_RMPP_H__
36#define __MAD_RMPP_H__
37
38enum {
39 IB_RMPP_RESULT_PROCESSED,
40 IB_RMPP_RESULT_CONSUMED,
41 IB_RMPP_RESULT_INTERNAL,
42 IB_RMPP_RESULT_UNHANDLED
43};
44
45int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr);
46
47struct ib_mad_recv_wc *
48ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
49 struct ib_mad_recv_wc *mad_recv_wc);
50
51int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
52 struct ib_mad_send_wc *mad_send_wc);
53
54void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent);
55
56int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
57
58#endif /* __MAD_RMPP_H__ */