diff options
author | Sean Hefty <sean.hefty@intel.com> | 2005-08-19 16:50:33 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2005-08-26 23:37:38 -0400 |
commit | fe9e08e17af414a5fd8f3141b0fd88677f81a883 (patch) | |
tree | 45c7277413c61cdc2797f2245d010161446c91c9 /drivers/infiniband | |
parent | b9ef520f9caf20aba8ac7cb2bbba45b52ff19d53 (diff) |
[PATCH] IB: Add handling for ABORT and STOP RMPP MADs.
Add handling for ABORT / STOP RMPP MADs.
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 309 | ||||
-rw-r--r-- | drivers/infiniband/include/ib_mad.h | 2 |
2 files changed, 246 insertions, 65 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index d68bf7e220f9..43fd805e0265 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -100,6 +100,121 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) | |||
100 | } | 100 | } |
101 | } | 101 | } |
102 | 102 | ||
103 | static int data_offset(u8 mgmt_class) | ||
104 | { | ||
105 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | ||
106 | return offsetof(struct ib_sa_mad, data); | ||
107 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
108 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | ||
109 | return offsetof(struct ib_vendor_mad, data); | ||
110 | else | ||
111 | return offsetof(struct ib_rmpp_mad, data); | ||
112 | } | ||
113 | |||
114 | static void format_ack(struct ib_rmpp_mad *ack, | ||
115 | struct ib_rmpp_mad *data, | ||
116 | struct mad_rmpp_recv *rmpp_recv) | ||
117 | { | ||
118 | unsigned long flags; | ||
119 | |||
120 | memcpy(&ack->mad_hdr, &data->mad_hdr, | ||
121 | data_offset(data->mad_hdr.mgmt_class)); | ||
122 | |||
123 | ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; | ||
124 | ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; | ||
125 | ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||
126 | |||
127 | spin_lock_irqsave(&rmpp_recv->lock, flags); | ||
128 | rmpp_recv->last_ack = rmpp_recv->seg_num; | ||
129 | ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); | ||
130 | ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); | ||
131 | spin_unlock_irqrestore(&rmpp_recv->lock, flags); | ||
132 | } | ||
133 | |||
134 | static void ack_recv(struct mad_rmpp_recv *rmpp_recv, | ||
135 | struct ib_mad_recv_wc *recv_wc) | ||
136 | { | ||
137 | struct ib_mad_send_buf *msg; | ||
138 | struct ib_send_wr *bad_send_wr; | ||
139 | int hdr_len, ret; | ||
140 | |||
141 | hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); | ||
142 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, | ||
143 | recv_wc->wc->pkey_index, rmpp_recv->ah, 1, | ||
144 | hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, | ||
145 | GFP_KERNEL); | ||
146 | if (!msg) | ||
147 | return; | ||
148 | |||
149 | format_ack((struct ib_rmpp_mad *) msg->mad, | ||
150 | (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); | ||
151 | ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, | ||
152 | &bad_send_wr); | ||
153 | if (ret) | ||
154 | ib_free_send_mad(msg); | ||
155 | } | ||
156 | |||
157 | static int alloc_response_msg(struct ib_mad_agent *agent, | ||
158 | struct ib_mad_recv_wc *recv_wc, | ||
159 | struct ib_mad_send_buf **msg) | ||
160 | { | ||
161 | struct ib_mad_send_buf *m; | ||
162 | struct ib_ah *ah; | ||
163 | int hdr_len; | ||
164 | |||
165 | ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, | ||
166 | recv_wc->recv_buf.grh, agent->port_num); | ||
167 | if (IS_ERR(ah)) | ||
168 | return PTR_ERR(ah); | ||
169 | |||
170 | hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); | ||
171 | m = ib_create_send_mad(agent, recv_wc->wc->src_qp, | ||
172 | recv_wc->wc->pkey_index, ah, 1, hdr_len, | ||
173 | sizeof(struct ib_rmpp_mad) - hdr_len, | ||
174 | GFP_KERNEL); | ||
175 | if (IS_ERR(m)) { | ||
176 | ib_destroy_ah(ah); | ||
177 | return PTR_ERR(m); | ||
178 | } | ||
179 | *msg = m; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static void free_msg(struct ib_mad_send_buf *msg) | ||
184 | { | ||
185 | ib_destroy_ah(msg->send_wr.wr.ud.ah); | ||
186 | ib_free_send_mad(msg); | ||
187 | } | ||
188 | |||
189 | static void nack_recv(struct ib_mad_agent_private *agent, | ||
190 | struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) | ||
191 | { | ||
192 | struct ib_mad_send_buf *msg; | ||
193 | struct ib_rmpp_mad *rmpp_mad; | ||
194 | struct ib_send_wr *bad_send_wr; | ||
195 | int ret; | ||
196 | |||
197 | ret = alloc_response_msg(&agent->agent, recv_wc, &msg); | ||
198 | if (ret) | ||
199 | return; | ||
200 | |||
201 | rmpp_mad = (struct ib_rmpp_mad *) msg->mad; | ||
202 | memcpy(rmpp_mad, recv_wc->recv_buf.mad, | ||
203 | data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); | ||
204 | |||
205 | rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; | ||
206 | rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; | ||
207 | rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; | ||
208 | ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||
209 | rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; | ||
210 | rmpp_mad->rmpp_hdr.seg_num = 0; | ||
211 | rmpp_mad->rmpp_hdr.paylen_newwin = 0; | ||
212 | |||
213 | ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); | ||
214 | if (ret) | ||
215 | free_msg(msg); | ||
216 | } | ||
217 | |||
103 | static void recv_timeout_handler(void *data) | 218 | static void recv_timeout_handler(void *data) |
104 | { | 219 | { |
105 | struct mad_rmpp_recv *rmpp_recv = data; | 220 | struct mad_rmpp_recv *rmpp_recv = data; |
@@ -115,8 +230,8 @@ static void recv_timeout_handler(void *data) | |||
115 | list_del(&rmpp_recv->list); | 230 | list_del(&rmpp_recv->list); |
116 | spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); | 231 | spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); |
117 | 232 | ||
118 | /* TODO: send abort. */ | ||
119 | rmpp_wc = rmpp_recv->rmpp_wc; | 233 | rmpp_wc = rmpp_recv->rmpp_wc; |
234 | nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); | ||
120 | destroy_rmpp_recv(rmpp_recv); | 235 | destroy_rmpp_recv(rmpp_recv); |
121 | ib_free_recv_mad(rmpp_wc); | 236 | ib_free_recv_mad(rmpp_wc); |
122 | } | 237 | } |
@@ -230,60 +345,6 @@ insert_rmpp_recv(struct ib_mad_agent_private *agent, | |||
230 | return cur_rmpp_recv; | 345 | return cur_rmpp_recv; |
231 | } | 346 | } |
232 | 347 | ||
233 | static int data_offset(u8 mgmt_class) | ||
234 | { | ||
235 | if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) | ||
236 | return offsetof(struct ib_sa_mad, data); | ||
237 | else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
238 | (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) | ||
239 | return offsetof(struct ib_vendor_mad, data); | ||
240 | else | ||
241 | return offsetof(struct ib_rmpp_mad, data); | ||
242 | } | ||
243 | |||
244 | static void format_ack(struct ib_rmpp_mad *ack, | ||
245 | struct ib_rmpp_mad *data, | ||
246 | struct mad_rmpp_recv *rmpp_recv) | ||
247 | { | ||
248 | unsigned long flags; | ||
249 | |||
250 | memcpy(&ack->mad_hdr, &data->mad_hdr, | ||
251 | data_offset(data->mad_hdr.mgmt_class)); | ||
252 | |||
253 | ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; | ||
254 | ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; | ||
255 | ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); | ||
256 | |||
257 | spin_lock_irqsave(&rmpp_recv->lock, flags); | ||
258 | rmpp_recv->last_ack = rmpp_recv->seg_num; | ||
259 | ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); | ||
260 | ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); | ||
261 | spin_unlock_irqrestore(&rmpp_recv->lock, flags); | ||
262 | } | ||
263 | |||
264 | static void ack_recv(struct mad_rmpp_recv *rmpp_recv, | ||
265 | struct ib_mad_recv_wc *recv_wc) | ||
266 | { | ||
267 | struct ib_mad_send_buf *msg; | ||
268 | struct ib_send_wr *bad_send_wr; | ||
269 | int hdr_len, ret; | ||
270 | |||
271 | hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); | ||
272 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, | ||
273 | recv_wc->wc->pkey_index, rmpp_recv->ah, 1, | ||
274 | hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, | ||
275 | GFP_KERNEL); | ||
276 | if (!msg) | ||
277 | return; | ||
278 | |||
279 | format_ack((struct ib_rmpp_mad *) msg->mad, | ||
280 | (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); | ||
281 | ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, | ||
282 | &bad_send_wr); | ||
283 | if (ret) | ||
284 | ib_free_send_mad(msg); | ||
285 | } | ||
286 | |||
287 | static inline int get_last_flag(struct ib_mad_recv_buf *seg) | 348 | static inline int get_last_flag(struct ib_mad_recv_buf *seg) |
288 | { | 349 | { |
289 | struct ib_rmpp_mad *rmpp_mad; | 350 | struct ib_rmpp_mad *rmpp_mad; |
@@ -559,6 +620,34 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) | |||
559 | return ib_send_mad(mad_send_wr); | 620 | return ib_send_mad(mad_send_wr); |
560 | } | 621 | } |
561 | 622 | ||
623 | static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, | ||
624 | u8 rmpp_status) | ||
625 | { | ||
626 | struct ib_mad_send_wr_private *mad_send_wr; | ||
627 | struct ib_mad_send_wc wc; | ||
628 | unsigned long flags; | ||
629 | |||
630 | spin_lock_irqsave(&agent->lock, flags); | ||
631 | mad_send_wr = ib_find_send_mad(agent, tid); | ||
632 | if (!mad_send_wr) | ||
633 | goto out; /* Unmatched send */ | ||
634 | |||
635 | if ((mad_send_wr->last_ack == mad_send_wr->total_seg) || | ||
636 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) | ||
637 | goto out; /* Send is already done */ | ||
638 | |||
639 | ib_mark_mad_done(mad_send_wr); | ||
640 | spin_unlock_irqrestore(&agent->lock, flags); | ||
641 | |||
642 | wc.status = IB_WC_REM_ABORT_ERR; | ||
643 | wc.vendor_err = rmpp_status; | ||
644 | wc.wr_id = mad_send_wr->wr_id; | ||
645 | ib_mad_complete_send_wr(mad_send_wr, &wc); | ||
646 | return; | ||
647 | out: | ||
648 | spin_unlock_irqrestore(&agent->lock, flags); | ||
649 | } | ||
650 | |||
562 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, | 651 | static void process_rmpp_ack(struct ib_mad_agent_private *agent, |
563 | struct ib_mad_recv_wc *mad_recv_wc) | 652 | struct ib_mad_recv_wc *mad_recv_wc) |
564 | { | 653 | { |
@@ -568,11 +657,21 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
568 | int seg_num, newwin, ret; | 657 | int seg_num, newwin, ret; |
569 | 658 | ||
570 | rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; | 659 | rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; |
571 | if (rmpp_mad->rmpp_hdr.rmpp_status) | 660 | if (rmpp_mad->rmpp_hdr.rmpp_status) { |
661 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
662 | IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
663 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
572 | return; | 664 | return; |
665 | } | ||
573 | 666 | ||
574 | seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); | 667 | seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); |
575 | newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); | 668 | newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); |
669 | if (newwin < seg_num) { | ||
670 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
671 | IB_MGMT_RMPP_STATUS_W2S); | ||
672 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); | ||
673 | return; | ||
674 | } | ||
576 | 675 | ||
577 | spin_lock_irqsave(&agent->lock, flags); | 676 | spin_lock_irqsave(&agent->lock, flags); |
578 | mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); | 677 | mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); |
@@ -583,8 +682,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent, | |||
583 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) | 682 | (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) |
584 | goto out; /* Send is already done */ | 683 | goto out; /* Send is already done */ |
585 | 684 | ||
586 | if (seg_num > mad_send_wr->total_seg) | 685 | if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) { |
587 | goto out; /* Bad ACK */ | 686 | spin_unlock_irqrestore(&agent->lock, flags); |
687 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
688 | IB_MGMT_RMPP_STATUS_S2B); | ||
689 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); | ||
690 | return; | ||
691 | } | ||
588 | 692 | ||
589 | if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) | 693 | if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) |
590 | goto out; /* Old ACK */ | 694 | goto out; /* Old ACK */ |
@@ -628,6 +732,72 @@ out: | |||
628 | spin_unlock_irqrestore(&agent->lock, flags); | 732 | spin_unlock_irqrestore(&agent->lock, flags); |
629 | } | 733 | } |
630 | 734 | ||
735 | static struct ib_mad_recv_wc * | ||
736 | process_rmpp_data(struct ib_mad_agent_private *agent, | ||
737 | struct ib_mad_recv_wc *mad_recv_wc) | ||
738 | { | ||
739 | struct ib_rmpp_hdr *rmpp_hdr; | ||
740 | u8 rmpp_status; | ||
741 | |||
742 | rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; | ||
743 | |||
744 | if (rmpp_hdr->rmpp_status) { | ||
745 | rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; | ||
746 | goto bad; | ||
747 | } | ||
748 | |||
749 | if (rmpp_hdr->seg_num == __constant_htonl(1)) { | ||
750 | if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { | ||
751 | rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; | ||
752 | goto bad; | ||
753 | } | ||
754 | return start_rmpp(agent, mad_recv_wc); | ||
755 | } else { | ||
756 | if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { | ||
757 | rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; | ||
758 | goto bad; | ||
759 | } | ||
760 | return continue_rmpp(agent, mad_recv_wc); | ||
761 | } | ||
762 | bad: | ||
763 | nack_recv(agent, mad_recv_wc, rmpp_status); | ||
764 | ib_free_recv_mad(mad_recv_wc); | ||
765 | return NULL; | ||
766 | } | ||
767 | |||
768 | static void process_rmpp_stop(struct ib_mad_agent_private *agent, | ||
769 | struct ib_mad_recv_wc *mad_recv_wc) | ||
770 | { | ||
771 | struct ib_rmpp_mad *rmpp_mad; | ||
772 | |||
773 | rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; | ||
774 | |||
775 | if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { | ||
776 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
777 | IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
778 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
779 | } else | ||
780 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
781 | rmpp_mad->rmpp_hdr.rmpp_status); | ||
782 | } | ||
783 | |||
784 | static void process_rmpp_abort(struct ib_mad_agent_private *agent, | ||
785 | struct ib_mad_recv_wc *mad_recv_wc) | ||
786 | { | ||
787 | struct ib_rmpp_mad *rmpp_mad; | ||
788 | |||
789 | rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; | ||
790 | |||
791 | if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || | ||
792 | rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { | ||
793 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
794 | IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
795 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); | ||
796 | } else | ||
797 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
798 | rmpp_mad->rmpp_hdr.rmpp_status); | ||
799 | } | ||
800 | |||
631 | struct ib_mad_recv_wc * | 801 | struct ib_mad_recv_wc * |
632 | ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, | 802 | ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, |
633 | struct ib_mad_recv_wc *mad_recv_wc) | 803 | struct ib_mad_recv_wc *mad_recv_wc) |
@@ -638,23 +808,29 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, | |||
638 | if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) | 808 | if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) |
639 | return mad_recv_wc; | 809 | return mad_recv_wc; |
640 | 810 | ||
641 | if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) | 811 | if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { |
812 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
813 | IB_MGMT_RMPP_STATUS_UNV); | ||
814 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); | ||
642 | goto out; | 815 | goto out; |
816 | } | ||
643 | 817 | ||
644 | switch (rmpp_mad->rmpp_hdr.rmpp_type) { | 818 | switch (rmpp_mad->rmpp_hdr.rmpp_type) { |
645 | case IB_MGMT_RMPP_TYPE_DATA: | 819 | case IB_MGMT_RMPP_TYPE_DATA: |
646 | if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) | 820 | return process_rmpp_data(agent, mad_recv_wc); |
647 | return start_rmpp(agent, mad_recv_wc); | ||
648 | else | ||
649 | return continue_rmpp(agent, mad_recv_wc); | ||
650 | case IB_MGMT_RMPP_TYPE_ACK: | 821 | case IB_MGMT_RMPP_TYPE_ACK: |
651 | process_rmpp_ack(agent, mad_recv_wc); | 822 | process_rmpp_ack(agent, mad_recv_wc); |
652 | break; | 823 | break; |
653 | case IB_MGMT_RMPP_TYPE_STOP: | 824 | case IB_MGMT_RMPP_TYPE_STOP: |
825 | process_rmpp_stop(agent, mad_recv_wc); | ||
826 | break; | ||
654 | case IB_MGMT_RMPP_TYPE_ABORT: | 827 | case IB_MGMT_RMPP_TYPE_ABORT: |
655 | /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ | 828 | process_rmpp_abort(agent, mad_recv_wc); |
656 | break; | 829 | break; |
657 | default: | 830 | default: |
831 | abort_send(agent, rmpp_mad->mad_hdr.tid, | ||
832 | IB_MGMT_RMPP_STATUS_BADT); | ||
833 | nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); | ||
658 | break; | 834 | break; |
659 | } | 835 | } |
660 | out: | 836 | out: |
@@ -714,7 +890,10 @@ int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, | |||
714 | if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { | 890 | if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { |
715 | msg = (struct ib_mad_send_buf *) (unsigned long) | 891 | msg = (struct ib_mad_send_buf *) (unsigned long) |
716 | mad_send_wc->wr_id; | 892 | mad_send_wc->wr_id; |
717 | ib_free_send_mad(msg); | 893 | if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) |
894 | ib_free_send_mad(msg); | ||
895 | else | ||
896 | free_msg(msg); | ||
718 | return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ | 897 | return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ |
719 | } | 898 | } |
720 | 899 | ||
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h index 63237805d6af..9fcf6fc09035 100644 --- a/drivers/infiniband/include/ib_mad.h +++ b/drivers/infiniband/include/ib_mad.h | |||
@@ -90,6 +90,7 @@ | |||
90 | 90 | ||
91 | #define IB_MGMT_RMPP_STATUS_SUCCESS 0 | 91 | #define IB_MGMT_RMPP_STATUS_SUCCESS 0 |
92 | #define IB_MGMT_RMPP_STATUS_RESX 1 | 92 | #define IB_MGMT_RMPP_STATUS_RESX 1 |
93 | #define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 | ||
93 | #define IB_MGMT_RMPP_STATUS_T2L 118 | 94 | #define IB_MGMT_RMPP_STATUS_T2L 118 |
94 | #define IB_MGMT_RMPP_STATUS_BAD_LEN 119 | 95 | #define IB_MGMT_RMPP_STATUS_BAD_LEN 119 |
95 | #define IB_MGMT_RMPP_STATUS_BAD_SEG 120 | 96 | #define IB_MGMT_RMPP_STATUS_BAD_SEG 120 |
@@ -100,6 +101,7 @@ | |||
100 | #define IB_MGMT_RMPP_STATUS_UNV 125 | 101 | #define IB_MGMT_RMPP_STATUS_UNV 125 |
101 | #define IB_MGMT_RMPP_STATUS_TMR 126 | 102 | #define IB_MGMT_RMPP_STATUS_TMR 126 |
102 | #define IB_MGMT_RMPP_STATUS_UNSPEC 127 | 103 | #define IB_MGMT_RMPP_STATUS_UNSPEC 127 |
104 | #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 | ||
103 | 105 | ||
104 | #define IB_QP0 0 | 106 | #define IB_QP0 0 |
105 | #define IB_QP1 __constant_htonl(1) | 107 | #define IB_QP1 __constant_htonl(1) |