aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/mad_rmpp.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-04-12 16:54:16 -0400
committerJeff Garzik <jeff@garzik.org>2006-04-12 16:54:16 -0400
commit875999c5539999f61a45620aae0c3e5fb1d2b035 (patch)
tree4535032a8a10f5782c0aef6a620b1a624ea9f863 /drivers/infiniband/core/mad_rmpp.c
parent79072f38909e3d9883317238887460c39ddcc4cb (diff)
parent26ec634c31a11a003040e10b4d650495158632fd (diff)
Merge branch 'upstream'
Diffstat (limited to 'drivers/infiniband/core/mad_rmpp.c')
-rw-r--r--drivers/infiniband/core/mad_rmpp.c54
1 files changed, 17 insertions, 37 deletions
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index bacfdd5bddad..dfd4e588ce03 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved. 2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100 } 100 }
101} 101}
102 102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return IB_MGMT_SA_HDR;
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return IB_MGMT_VENDOR_HDR;
110 else
111 return IB_MGMT_RMPP_HDR;
112}
113
114static void format_ack(struct ib_mad_send_buf *msg, 103static void format_ack(struct ib_mad_send_buf *msg,
115 struct ib_rmpp_mad *data, 104 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv) 105 struct mad_rmpp_recv *rmpp_recv)
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
137 struct ib_mad_send_buf *msg; 126 struct ib_mad_send_buf *msg;
138 int ret, hdr_len; 127 int ret, hdr_len;
139 128
140 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 129 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
141 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 130 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
142 recv_wc->wc->pkey_index, 1, hdr_len, 131 recv_wc->wc->pkey_index, 1, hdr_len,
143 0, GFP_KERNEL); 132 0, GFP_KERNEL);
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
163 if (IS_ERR(ah)) 152 if (IS_ERR(ah))
164 return (void *) ah; 153 return (void *) ah;
165 154
166 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 155 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
167 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 156 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
168 recv_wc->wc->pkey_index, 1, 157 recv_wc->wc->pkey_index, 1,
169 hdr_len, 0, GFP_KERNEL); 158 hdr_len, 0, GFP_KERNEL);
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
408 397
409 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 398 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
410 399
411 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 400 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
412 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 401 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
413 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 402 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
414 if (pad > IB_MGMT_RMPP_DATA || pad < 0) 403 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
@@ -562,15 +551,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
562 return ib_send_mad(mad_send_wr); 551 return ib_send_mad(mad_send_wr);
563} 552}
564 553
565static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 554static void abort_send(struct ib_mad_agent_private *agent,
566 u8 rmpp_status) 555 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
567{ 556{
568 struct ib_mad_send_wr_private *mad_send_wr; 557 struct ib_mad_send_wr_private *mad_send_wr;
569 struct ib_mad_send_wc wc; 558 struct ib_mad_send_wc wc;
570 unsigned long flags; 559 unsigned long flags;
571 560
572 spin_lock_irqsave(&agent->lock, flags); 561 spin_lock_irqsave(&agent->lock, flags);
573 mad_send_wr = ib_find_send_mad(agent, tid); 562 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
574 if (!mad_send_wr) 563 if (!mad_send_wr)
575 goto out; /* Unmatched send */ 564 goto out; /* Unmatched send */
576 565
@@ -612,8 +601,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
612 601
613 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 602 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
614 if (rmpp_mad->rmpp_hdr.rmpp_status) { 603 if (rmpp_mad->rmpp_hdr.rmpp_status) {
615 abort_send(agent, rmpp_mad->mad_hdr.tid, 604 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
616 IB_MGMT_RMPP_STATUS_BAD_STATUS);
617 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 605 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
618 return; 606 return;
619 } 607 }
@@ -621,14 +609,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
621 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 609 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
622 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 610 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
623 if (newwin < seg_num) { 611 if (newwin < seg_num) {
624 abort_send(agent, rmpp_mad->mad_hdr.tid, 612 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
625 IB_MGMT_RMPP_STATUS_W2S);
626 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 613 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
627 return; 614 return;
628 } 615 }
629 616
630 spin_lock_irqsave(&agent->lock, flags); 617 spin_lock_irqsave(&agent->lock, flags);
631 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 618 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
632 if (!mad_send_wr) 619 if (!mad_send_wr)
633 goto out; /* Unmatched ACK */ 620 goto out; /* Unmatched ACK */
634 621
@@ -639,8 +626,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
639 if (seg_num > mad_send_wr->send_buf.seg_count || 626 if (seg_num > mad_send_wr->send_buf.seg_count ||
640 seg_num > mad_send_wr->newwin) { 627 seg_num > mad_send_wr->newwin) {
641 spin_unlock_irqrestore(&agent->lock, flags); 628 spin_unlock_irqrestore(&agent->lock, flags);
642 abort_send(agent, rmpp_mad->mad_hdr.tid, 629 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
643 IB_MGMT_RMPP_STATUS_S2B);
644 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 630 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
645 return; 631 return;
646 } 632 }
@@ -728,12 +714,10 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent,
728 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 714 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
729 715
730 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 716 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
731 abort_send(agent, rmpp_mad->mad_hdr.tid, 717 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
732 IB_MGMT_RMPP_STATUS_BAD_STATUS);
733 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 718 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
734 } else 719 } else
735 abort_send(agent, rmpp_mad->mad_hdr.tid, 720 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
736 rmpp_mad->rmpp_hdr.rmpp_status);
737} 721}
738 722
739static void process_rmpp_abort(struct ib_mad_agent_private *agent, 723static void process_rmpp_abort(struct ib_mad_agent_private *agent,
@@ -745,12 +729,10 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent,
745 729
746 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 730 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
747 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 731 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
748 abort_send(agent, rmpp_mad->mad_hdr.tid, 732 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
749 IB_MGMT_RMPP_STATUS_BAD_STATUS);
750 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 733 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
751 } else 734 } else
752 abort_send(agent, rmpp_mad->mad_hdr.tid, 735 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
753 rmpp_mad->rmpp_hdr.rmpp_status);
754} 736}
755 737
756struct ib_mad_recv_wc * 738struct ib_mad_recv_wc *
@@ -764,8 +746,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
764 return mad_recv_wc; 746 return mad_recv_wc;
765 747
766 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 748 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
767 abort_send(agent, rmpp_mad->mad_hdr.tid, 749 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
768 IB_MGMT_RMPP_STATUS_UNV);
769 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 750 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
770 goto out; 751 goto out;
771 } 752 }
@@ -783,8 +764,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
783 process_rmpp_abort(agent, mad_recv_wc); 764 process_rmpp_abort(agent, mad_recv_wc);
784 break; 765 break;
785 default: 766 default:
786 abort_send(agent, rmpp_mad->mad_hdr.tid, 767 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
787 IB_MGMT_RMPP_STATUS_BADT);
788 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 768 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
789 break; 769 break;
790 } 770 }