aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@mellanox.co.il>2006-03-28 19:39:07 -0500
committerRoland Dreier <rolandd@cisco.com>2006-03-30 10:19:48 -0500
commitfa9656bbd9af5b95adc43eaa0a143992346378cb (patch)
tree100d516324208e1a4a65b77e86bcbc94f5ef0db9 /drivers/infiniband/core
parente1f7868c80947d40ef8e1fd37323d939a9bf311c (diff)
IB/mad: include GID/class when matching receives
Received responses are currently matched against sent requests based on TID only. According to the spec, responses should match based on the combination of TID, management class, and requester LID/GID. Without the additional qualification, an agent that is responding to two requests, both of which have the same TID, can match RMPP ACKs with the incorrect transaction. This problem can occur on the SM node when responding to SA queries. Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il> Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/mad.c58
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/mad_rmpp.c35
3 files changed, 67 insertions, 29 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f7854b65fd55..d4d07012a5ca 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1618,14 +1618,59 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1618 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1618 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1619} 1619}
1620 1620
1621static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1622 struct ib_mad_recv_wc *rwc)
1623{
1624 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1625 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1626}
1627
1628static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,
1629 struct ib_mad_recv_wc *rwc )
1630{
1631 struct ib_ah_attr attr;
1632 u8 send_resp, rcv_resp;
1633
1634 send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1635 mad_hdr.method & IB_MGMT_METHOD_RESP;
1636 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1637
1638 if (!send_resp && rcv_resp)
1639 /* is request/response. GID/LIDs are both local (same). */
1640 return 1;
1641
1642 if (send_resp == rcv_resp)
1643 /* both requests, or both responses. GIDs different */
1644 return 0;
1645
1646 if (ib_query_ah(wr->send_buf.ah, &attr))
1647 /* Assume not equal, to avoid false positives. */
1648 return 0;
1649
1650 if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))
1651 return attr.dlid == rwc->wc->slid;
1652 else if ((attr.ah_flags & IB_AH_GRH) &&
1653 (rwc->wc->wc_flags & IB_WC_GRH))
1654 return memcmp(attr.grh.dgid.raw,
1655 rwc->recv_buf.grh->sgid.raw, 16) == 0;
1656 else
1657 /* one has GID, other does not. Assume different */
1658 return 0;
1659}
1621struct ib_mad_send_wr_private* 1660struct ib_mad_send_wr_private*
1622ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) 1661ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1662 struct ib_mad_recv_wc *mad_recv_wc)
1623{ 1663{
1624 struct ib_mad_send_wr_private *mad_send_wr; 1664 struct ib_mad_send_wr_private *mad_send_wr;
1665 struct ib_mad *mad;
1666
1667 mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;
1625 1668
1626 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 1669 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1627 agent_list) { 1670 agent_list) {
1628 if (mad_send_wr->tid == tid) 1671 if ((mad_send_wr->tid == mad->mad_hdr.tid) &&
1672 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1673 rcv_has_same_gid(mad_send_wr, mad_recv_wc))
1629 return mad_send_wr; 1674 return mad_send_wr;
1630 } 1675 }
1631 1676
@@ -1636,7 +1681,10 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1636 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1681 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1637 agent_list) { 1682 agent_list) {
1638 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 1683 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1639 mad_send_wr->tid == tid && mad_send_wr->timeout) { 1684 mad_send_wr->tid == mad->mad_hdr.tid &&
1685 mad_send_wr->timeout &&
1686 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1687 rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {
1640 /* Verify request has not been canceled */ 1688 /* Verify request has not been canceled */
1641 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1689 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1642 mad_send_wr : NULL; 1690 mad_send_wr : NULL;
@@ -1661,7 +1709,6 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1661 struct ib_mad_send_wr_private *mad_send_wr; 1709 struct ib_mad_send_wr_private *mad_send_wr;
1662 struct ib_mad_send_wc mad_send_wc; 1710 struct ib_mad_send_wc mad_send_wc;
1663 unsigned long flags; 1711 unsigned long flags;
1664 __be64 tid;
1665 1712
1666 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1713 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1667 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1714 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -1677,9 +1724,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1677 1724
1678 /* Complete corresponding request */ 1725 /* Complete corresponding request */
1679 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1726 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1680 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1681 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1727 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1682 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); 1728 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1683 if (!mad_send_wr) { 1729 if (!mad_send_wr) {
1684 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1730 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1685 ib_free_recv_mad(mad_recv_wc); 1731 ib_free_recv_mad(mad_recv_wc);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index a7125d4b5ccf..6c9c133d71ef 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -216,7 +216,8 @@ extern kmem_cache_t *ib_mad_cache;
216int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 216int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
217 217
218struct ib_mad_send_wr_private * 218struct ib_mad_send_wr_private *
219ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); 219ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
220 struct ib_mad_recv_wc *mad_recv_wc);
220 221
221void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 222void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
222 struct ib_mad_send_wc *mad_send_wc); 223 struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index bacfdd5bddad..a6405079c285 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -562,15 +562,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
562 return ib_send_mad(mad_send_wr); 562 return ib_send_mad(mad_send_wr);
563} 563}
564 564
565static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 565static void abort_send(struct ib_mad_agent_private *agent,
566 u8 rmpp_status) 566 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
567{ 567{
568 struct ib_mad_send_wr_private *mad_send_wr; 568 struct ib_mad_send_wr_private *mad_send_wr;
569 struct ib_mad_send_wc wc; 569 struct ib_mad_send_wc wc;
570 unsigned long flags; 570 unsigned long flags;
571 571
572 spin_lock_irqsave(&agent->lock, flags); 572 spin_lock_irqsave(&agent->lock, flags);
573 mad_send_wr = ib_find_send_mad(agent, tid); 573 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
574 if (!mad_send_wr) 574 if (!mad_send_wr)
575 goto out; /* Unmatched send */ 575 goto out; /* Unmatched send */
576 576
@@ -612,8 +612,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
612 612
613 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 613 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
614 if (rmpp_mad->rmpp_hdr.rmpp_status) { 614 if (rmpp_mad->rmpp_hdr.rmpp_status) {
615 abort_send(agent, rmpp_mad->mad_hdr.tid, 615 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
616 IB_MGMT_RMPP_STATUS_BAD_STATUS);
617 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 616 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
618 return; 617 return;
619 } 618 }
@@ -621,14 +620,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
621 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 620 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
622 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 621 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
623 if (newwin < seg_num) { 622 if (newwin < seg_num) {
624 abort_send(agent, rmpp_mad->mad_hdr.tid, 623 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
625 IB_MGMT_RMPP_STATUS_W2S);
626 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 624 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
627 return; 625 return;
628 } 626 }
629 627
630 spin_lock_irqsave(&agent->lock, flags); 628 spin_lock_irqsave(&agent->lock, flags);
631 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 629 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
632 if (!mad_send_wr) 630 if (!mad_send_wr)
633 goto out; /* Unmatched ACK */ 631 goto out; /* Unmatched ACK */
634 632
@@ -639,8 +637,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
639 if (seg_num > mad_send_wr->send_buf.seg_count || 637 if (seg_num > mad_send_wr->send_buf.seg_count ||
640 seg_num > mad_send_wr->newwin) { 638 seg_num > mad_send_wr->newwin) {
641 spin_unlock_irqrestore(&agent->lock, flags); 639 spin_unlock_irqrestore(&agent->lock, flags);
642 abort_send(agent, rmpp_mad->mad_hdr.tid, 640 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
643 IB_MGMT_RMPP_STATUS_S2B);
644 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 641 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
645 return; 642 return;
646 } 643 }
@@ -728,12 +725,10 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent,
728 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 725 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
729 726
730 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 727 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
731 abort_send(agent, rmpp_mad->mad_hdr.tid, 728 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
732 IB_MGMT_RMPP_STATUS_BAD_STATUS);
733 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 729 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
734 } else 730 } else
735 abort_send(agent, rmpp_mad->mad_hdr.tid, 731 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
736 rmpp_mad->rmpp_hdr.rmpp_status);
737} 732}
738 733
739static void process_rmpp_abort(struct ib_mad_agent_private *agent, 734static void process_rmpp_abort(struct ib_mad_agent_private *agent,
@@ -745,12 +740,10 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent,
745 740
746 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 741 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
747 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 742 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
748 abort_send(agent, rmpp_mad->mad_hdr.tid, 743 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
749 IB_MGMT_RMPP_STATUS_BAD_STATUS);
750 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 744 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
751 } else 745 } else
752 abort_send(agent, rmpp_mad->mad_hdr.tid, 746 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
753 rmpp_mad->rmpp_hdr.rmpp_status);
754} 747}
755 748
756struct ib_mad_recv_wc * 749struct ib_mad_recv_wc *
@@ -764,8 +757,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
764 return mad_recv_wc; 757 return mad_recv_wc;
765 758
766 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 759 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
767 abort_send(agent, rmpp_mad->mad_hdr.tid, 760 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
768 IB_MGMT_RMPP_STATUS_UNV);
769 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 761 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
770 goto out; 762 goto out;
771 } 763 }
@@ -783,8 +775,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
783 process_rmpp_abort(agent, mad_recv_wc); 775 process_rmpp_abort(agent, mad_recv_wc);
784 break; 776 break;
785 default: 777 default:
786 abort_send(agent, rmpp_mad->mad_hdr.tid, 778 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
787 IB_MGMT_RMPP_STATUS_BADT);
788 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 779 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
789 break; 780 break;
790 } 781 }