diff options
author | Sean Hefty <sean.hefty@intel.com> | 2006-08-28 18:10:32 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-22 18:22:44 -0400 |
commit | 75ab13443e4575c00788ba9861105745b9dda05c (patch) | |
tree | f1244f0a1e819e2bf31ddef2e9cac5a349da2146 /drivers/infiniband | |
parent | 76842405fca5f8b8e08d91558ecd3b922265034a (diff) |
IB/mad: Add support for dual-sided RMPP transfers.
The implementation assumes that any RMPP request that requires a
response uses DS RMPP. Based on the RMPP start-up scenarios defined
by the spec, this should be a valid assumption. That is, there is no
start-up scenario defined where an RMPP request is followed by a
non-RMPP response. By having this assumption we avoid any API
changes.
In order for a node that supports DS RMPP to communicate with one that
does not, RMPP responses assume a new window size of 1 if a DS ACK has
not been received. (By DS ACK, I'm referring to the turn-around ACK
after the final ACK of the request.) This is a slight spec deviation,
but is necessary to allow communication with nodes that do not
generate the DS ACK. It also handles the case when a response is sent
after the request state has been discarded.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 90 |
1 files changed, 87 insertions, 3 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index ebcd5b181770..74fe1af9b18a 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -60,6 +60,7 @@ struct mad_rmpp_recv { | |||
60 | int last_ack; | 60 | int last_ack; |
61 | int seg_num; | 61 | int seg_num; |
62 | int newwin; | 62 | int newwin; |
63 | int repwin; | ||
63 | 64 | ||
64 | __be64 tid; | 65 | __be64 tid; |
65 | u32 src_qp; | 66 | u32 src_qp; |
@@ -170,6 +171,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, | |||
170 | return msg; | 171 | return msg; |
171 | } | 172 | } |
172 | 173 | ||
174 | static void ack_ds_ack(struct ib_mad_agent_private *agent, | ||
175 | struct ib_mad_recv_wc *recv_wc) | ||
176 | { | ||
177 | struct ib_mad_send_buf *msg; | ||
178 | struct ib_rmpp_mad *rmpp_mad; | ||
179 | int ret; | ||
180 | |||
181 | msg = alloc_response_msg(&agent->agent, recv_wc); | ||
182 | if (IS_ERR(msg)) | ||
183 | return; | ||
184 | |||
185 | rmpp_mad = msg->mad; | ||
186 | memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); | ||
187 | |||
188 | rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; | ||
189 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||
190 | rmpp_mad->rmpp_hdr.seg_num = 0; | ||
191 | rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1); | ||
192 | |||
193 | ret = ib_post_send_mad(msg, NULL); | ||
194 | if (ret) { | ||
195 | ib_destroy_ah(msg->ah); | ||
196 | ib_free_send_mad(msg); | ||
197 | } | ||
198 | } | ||
199 | |||
173 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) | 200 | void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) |
174 | { | 201 | { |
175 | struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; | 202 | struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; |
@@ -271,6 +298,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent, | |||
271 | rmpp_recv->newwin = 1; | 298 | rmpp_recv->newwin = 1; |
272 | rmpp_recv->seg_num = 1; | 299 | rmpp_recv->seg_num = 1; |
273 | rmpp_recv->last_ack = 0; | 300 | rmpp_recv->last_ack = 0; |
301 | rmpp_recv->repwin = 1; | ||
274 | 302 | ||
275 | mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; | 303 | mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; |
276 | rmpp_recv->tid = mad_hdr->tid; | 304 | rmpp_recv->tid = mad_hdr->tid; |
@@ -591,6 +619,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, | |||
591 | break; | 619 | break; |
592 | } | 620 | } |
593 | 621 | ||
622 | static void process_ds_ack(struct ib_mad_agent_private *agent, | ||
623 | struct ib_mad_recv_wc *mad_recv_wc, int newwin) | ||
624 | { | ||
625 | struct mad_rmpp_recv *rmpp_recv; | ||
626 | |||
627 | rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); | ||
628 | if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE) | ||
629 | rmpp_recv->repwin = newwin; | ||
630 | } | ||
631 | |||
594 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, | 632 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, |
595 | struct ib_mad_recv_wc *mad_recv_wc) | 633 | struct ib_mad_recv_wc *mad_recv_wc) |
596 | { | 634 | { |
@@ -616,8 +654,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
616 | 654 | ||
617 | spin_lock_irqsave(&agent->lock, flags); | 655 | spin_lock_irqsave(&agent->lock, flags); |
618 | mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); | 656 | mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); |
619 | if (!mad_send_wr) | 657 | if (!mad_send_wr) { |
620 | goto out; /* Unmatched ACK */ | 658 | if (!seg_num) |
659 | process_ds_ack(agent, mad_recv_wc, newwin); | ||
660 | goto out; /* Unmatched or DS RMPP ACK */ | ||
661 | } | ||
662 | |||
663 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) && | ||
664 | (mad_send_wr->timeout)) { | ||
665 | spin_unlock_irqrestore(&agent->lock, flags); | ||
666 | ack_ds_ack(agent, mad_recv_wc); | ||
667 | return; /* Repeated ACK for DS RMPP transaction */ | ||
668 | } | ||
621 | 669 | ||
622 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || | 670 | if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || |
623 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) | 671 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) |
@@ -656,6 +704,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
656 | if (mad_send_wr->refcount == 1) | 704 | if (mad_send_wr->refcount == 1) |
657 | ib_reset_mad_timeout(mad_send_wr, | 705 | ib_reset_mad_timeout(mad_send_wr, |
658 | mad_send_wr->send_buf.timeout_ms); | 706 | mad_send_wr->send_buf.timeout_ms); |
707 | spin_unlock_irqrestore(&agent->lock, flags); | ||
708 | ack_ds_ack(agent, mad_recv_wc); | ||
709 | return; | ||
659 | } else if (mad_send_wr->refcount == 1 && | 710 | } else if (mad_send_wr->refcount == 1 && |
660 | mad_send_wr->seg_num < mad_send_wr->newwin && | 711 | mad_send_wr->seg_num < mad_send_wr->newwin && |
661 | mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { | 712 | mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { |
@@ -772,6 +823,39 @@ out: | |||
772 | return NULL; | 823 | return NULL; |
773 | } | 824 | } |
774 | 825 | ||
826 | static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr) | ||
827 | { | ||
828 | struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv; | ||
829 | struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad; | ||
830 | struct mad_rmpp_recv *rmpp_recv; | ||
831 | struct ib_ah_attr ah_attr; | ||
832 | unsigned long flags; | ||
833 | int newwin = 1; | ||
834 | |||
835 | if (!(mad_hdr->method & IB_MGMT_METHOD_RESP)) | ||
836 | goto out; | ||
837 | |||
838 | spin_lock_irqsave(&agent->lock, flags); | ||
839 | list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { | ||
840 | if (rmpp_recv->tid != mad_hdr->tid || | ||
841 | rmpp_recv->mgmt_class != mad_hdr->mgmt_class || | ||
842 | rmpp_recv->class_version != mad_hdr->class_version || | ||
843 | (rmpp_recv->method & IB_MGMT_METHOD_RESP)) | ||
844 | continue; | ||
845 | |||
846 | if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr)) | ||
847 | continue; | ||
848 | |||
849 | if (rmpp_recv->slid == ah_attr.dlid) { | ||
850 | newwin = rmpp_recv->repwin; | ||
851 | break; | ||
852 | } | ||
853 | } | ||
854 | spin_unlock_irqrestore(&agent->lock, flags); | ||
855 | out: | ||
856 | return newwin; | ||
857 | } | ||
858 | |||
775 | int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | 859 | int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) |
776 | { | 860 | { |
777 | struct ib_rmpp_mad *rmpp_mad; | 861 | struct ib_rmpp_mad *rmpp_mad; |
@@ -787,7 +871,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) | |||
787 | return IB_RMPP_RESULT_INTERNAL; | 871 | return IB_RMPP_RESULT_INTERNAL; |
788 | } | 872 | } |
789 | 873 | ||
790 | mad_send_wr->newwin = 1; | 874 | mad_send_wr->newwin = init_newwin(mad_send_wr); |
791 | 875 | ||
792 | /* We need to wait for the final ACK even if there isn't a response */ | 876 | /* We need to wait for the final ACK even if there isn't a response */ |
793 | mad_send_wr->refcount += (mad_send_wr->timeout == 0); | 877 | mad_send_wr->refcount += (mad_send_wr->timeout == 0); |