aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-30 17:32:38 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-30 17:32:38 -0500
commit064c94f9da8845f12446ab37142aa10f3c6f66ac (patch)
tree974a69a06e7f9218ad987e41fc6627d62eac091e
parent256414dee44eaa0983f5ab1a71877de23c4e9ce7 (diff)
parent618a3c03fcfdf1ac4543247c8ddfb0c9d775ff33 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: IB/mad: RMPP support for additional classes IB/mad: include GID/class when matching receives IB/mthca: Fix section mismatch problems IPoIB: Fix oops with raw sockets IB/mthca: Fix check of size in SRQ creation IB/srp: Fix unmapping of fake scatterlist
-rw-r--r--drivers/infiniband/core/mad.c112
-rw-r--r--drivers/infiniband/core/mad_priv.h3
-rw-r--r--drivers/infiniband/core/mad_rmpp.c54
-rw-r--r--drivers/infiniband/core/user_mad.c30
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4
-rw-r--r--include/rdma/ib_mad.h27
16 files changed, 168 insertions, 90 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index f7854b65fd55..ba54c856b0e5 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -227,6 +227,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
227 if (!is_vendor_oui(mad_reg_req->oui)) 227 if (!is_vendor_oui(mad_reg_req->oui))
228 goto error1; 228 goto error1;
229 } 229 }
230 /* Make sure class supplied is consistent with RMPP */
231 if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
232 if (!rmpp_version)
233 goto error1;
234 } else {
235 if (rmpp_version)
236 goto error1;
237 }
230 /* Make sure class supplied is consistent with QP type */ 238 /* Make sure class supplied is consistent with QP type */
231 if (qp_type == IB_QPT_SMI) { 239 if (qp_type == IB_QPT_SMI) {
232 if ((mad_reg_req->mgmt_class != 240 if ((mad_reg_req->mgmt_class !=
@@ -890,6 +898,35 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
890} 898}
891EXPORT_SYMBOL(ib_create_send_mad); 899EXPORT_SYMBOL(ib_create_send_mad);
892 900
901int ib_get_mad_data_offset(u8 mgmt_class)
902{
903 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
904 return IB_MGMT_SA_HDR;
905 else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
906 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
907 (mgmt_class == IB_MGMT_CLASS_BIS))
908 return IB_MGMT_DEVICE_HDR;
909 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
910 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
911 return IB_MGMT_VENDOR_HDR;
912 else
913 return IB_MGMT_MAD_HDR;
914}
915EXPORT_SYMBOL(ib_get_mad_data_offset);
916
917int ib_is_mad_class_rmpp(u8 mgmt_class)
918{
919 if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
920 (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
921 (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
922 (mgmt_class == IB_MGMT_CLASS_BIS) ||
923 ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
924 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
925 return 1;
926 return 0;
927}
928EXPORT_SYMBOL(ib_is_mad_class_rmpp);
929
893void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 930void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
894{ 931{
895 struct ib_mad_send_wr_private *mad_send_wr; 932 struct ib_mad_send_wr_private *mad_send_wr;
@@ -1022,6 +1059,13 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1022 goto error; 1059 goto error;
1023 } 1060 }
1024 1061
1062 if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1063 if (mad_agent_priv->agent.rmpp_version) {
1064 ret = -EINVAL;
1065 goto error;
1066 }
1067 }
1068
1025 /* 1069 /*
1026 * Save pointer to next work request to post in case the 1070 * Save pointer to next work request to post in case the
1027 * current one completes, and the user modifies the work 1071 * current one completes, and the user modifies the work
@@ -1618,14 +1662,59 @@ static int is_data_mad(struct ib_mad_agent_private *mad_agent_priv,
1618 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1662 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1619} 1663}
1620 1664
1665static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr,
1666 struct ib_mad_recv_wc *rwc)
1667{
1668 return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class ==
1669 rwc->recv_buf.mad->mad_hdr.mgmt_class;
1670}
1671
1672static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr,
1673 struct ib_mad_recv_wc *rwc )
1674{
1675 struct ib_ah_attr attr;
1676 u8 send_resp, rcv_resp;
1677
1678 send_resp = ((struct ib_mad *)(wr->send_buf.mad))->
1679 mad_hdr.method & IB_MGMT_METHOD_RESP;
1680 rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP;
1681
1682 if (!send_resp && rcv_resp)
1683 /* is request/response. GID/LIDs are both local (same). */
1684 return 1;
1685
1686 if (send_resp == rcv_resp)
1687 /* both requests, or both responses. GIDs different */
1688 return 0;
1689
1690 if (ib_query_ah(wr->send_buf.ah, &attr))
1691 /* Assume not equal, to avoid false positives. */
1692 return 0;
1693
1694 if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH))
1695 return attr.dlid == rwc->wc->slid;
1696 else if ((attr.ah_flags & IB_AH_GRH) &&
1697 (rwc->wc->wc_flags & IB_WC_GRH))
1698 return memcmp(attr.grh.dgid.raw,
1699 rwc->recv_buf.grh->sgid.raw, 16) == 0;
1700 else
1701 /* one has GID, other does not. Assume different */
1702 return 0;
1703}
1621struct ib_mad_send_wr_private* 1704struct ib_mad_send_wr_private*
1622ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) 1705ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
1706 struct ib_mad_recv_wc *mad_recv_wc)
1623{ 1707{
1624 struct ib_mad_send_wr_private *mad_send_wr; 1708 struct ib_mad_send_wr_private *mad_send_wr;
1709 struct ib_mad *mad;
1710
1711 mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad;
1625 1712
1626 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 1713 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
1627 agent_list) { 1714 agent_list) {
1628 if (mad_send_wr->tid == tid) 1715 if ((mad_send_wr->tid == mad->mad_hdr.tid) &&
1716 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1717 rcv_has_same_gid(mad_send_wr, mad_recv_wc))
1629 return mad_send_wr; 1718 return mad_send_wr;
1630 } 1719 }
1631 1720
@@ -1636,7 +1725,10 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1636 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1725 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1637 agent_list) { 1726 agent_list) {
1638 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 1727 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1639 mad_send_wr->tid == tid && mad_send_wr->timeout) { 1728 mad_send_wr->tid == mad->mad_hdr.tid &&
1729 mad_send_wr->timeout &&
1730 rcv_has_same_class(mad_send_wr, mad_recv_wc) &&
1731 rcv_has_same_gid(mad_send_wr, mad_recv_wc)) {
1640 /* Verify request has not been canceled */ 1732 /* Verify request has not been canceled */
1641 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1733 return (mad_send_wr->status == IB_WC_SUCCESS) ?
1642 mad_send_wr : NULL; 1734 mad_send_wr : NULL;
@@ -1661,7 +1753,6 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1661 struct ib_mad_send_wr_private *mad_send_wr; 1753 struct ib_mad_send_wr_private *mad_send_wr;
1662 struct ib_mad_send_wc mad_send_wc; 1754 struct ib_mad_send_wc mad_send_wc;
1663 unsigned long flags; 1755 unsigned long flags;
1664 __be64 tid;
1665 1756
1666 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1757 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1667 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); 1758 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
@@ -1677,9 +1768,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1677 1768
1678 /* Complete corresponding request */ 1769 /* Complete corresponding request */
1679 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1770 if (response_mad(mad_recv_wc->recv_buf.mad)) {
1680 tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid;
1681 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1771 spin_lock_irqsave(&mad_agent_priv->lock, flags);
1682 mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); 1772 mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1683 if (!mad_send_wr) { 1773 if (!mad_send_wr) {
1684 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1774 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1685 ib_free_recv_mad(mad_recv_wc); 1775 ib_free_recv_mad(mad_recv_wc);
@@ -2408,11 +2498,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2408 } 2498 }
2409 } 2499 }
2410 sg_list.addr = dma_map_single(qp_info->port_priv-> 2500 sg_list.addr = dma_map_single(qp_info->port_priv->
2411 device->dma_device, 2501 device->dma_device,
2412 &mad_priv->grh, 2502 &mad_priv->grh,
2413 sizeof *mad_priv - 2503 sizeof *mad_priv -
2414 sizeof mad_priv->header, 2504 sizeof mad_priv->header,
2415 DMA_FROM_DEVICE); 2505 DMA_FROM_DEVICE);
2416 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2506 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
2417 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2507 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
2418 mad_priv->header.mad_list.mad_queue = recv_queue; 2508 mad_priv->header.mad_list.mad_queue = recv_queue;
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index a7125d4b5ccf..6c9c133d71ef 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -216,7 +216,8 @@ extern kmem_cache_t *ib_mad_cache;
216int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 216int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr);
217 217
218struct ib_mad_send_wr_private * 218struct ib_mad_send_wr_private *
219ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); 219ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv,
220 struct ib_mad_recv_wc *mad_recv_wc);
220 221
221void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 222void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
222 struct ib_mad_send_wc *mad_send_wc); 223 struct ib_mad_send_wc *mad_send_wc);
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index bacfdd5bddad..dfd4e588ce03 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Inc. All rights reserved. 2 * Copyright (c) 2005 Intel Inc. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4 * 4 *
5 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -100,17 +100,6 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
100 } 100 }
101} 101}
102 102
103static int data_offset(u8 mgmt_class)
104{
105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
106 return IB_MGMT_SA_HDR;
107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
109 return IB_MGMT_VENDOR_HDR;
110 else
111 return IB_MGMT_RMPP_HDR;
112}
113
114static void format_ack(struct ib_mad_send_buf *msg, 103static void format_ack(struct ib_mad_send_buf *msg,
115 struct ib_rmpp_mad *data, 104 struct ib_rmpp_mad *data,
116 struct mad_rmpp_recv *rmpp_recv) 105 struct mad_rmpp_recv *rmpp_recv)
@@ -137,7 +126,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
137 struct ib_mad_send_buf *msg; 126 struct ib_mad_send_buf *msg;
138 int ret, hdr_len; 127 int ret, hdr_len;
139 128
140 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 129 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
141 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 130 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
142 recv_wc->wc->pkey_index, 1, hdr_len, 131 recv_wc->wc->pkey_index, 1, hdr_len,
143 0, GFP_KERNEL); 132 0, GFP_KERNEL);
@@ -163,7 +152,7 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
163 if (IS_ERR(ah)) 152 if (IS_ERR(ah))
164 return (void *) ah; 153 return (void *) ah;
165 154
166 hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 155 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
167 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 156 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
168 recv_wc->wc->pkey_index, 1, 157 recv_wc->wc->pkey_index, 1,
169 hdr_len, 0, GFP_KERNEL); 158 hdr_len, 0, GFP_KERNEL);
@@ -408,7 +397,7 @@ static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
408 397
409 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 398 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
410 399
411 hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 400 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
412 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 401 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
413 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 402 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
414 if (pad > IB_MGMT_RMPP_DATA || pad < 0) 403 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
@@ -562,15 +551,15 @@ static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
562 return ib_send_mad(mad_send_wr); 551 return ib_send_mad(mad_send_wr);
563} 552}
564 553
565static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 554static void abort_send(struct ib_mad_agent_private *agent,
566 u8 rmpp_status) 555 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
567{ 556{
568 struct ib_mad_send_wr_private *mad_send_wr; 557 struct ib_mad_send_wr_private *mad_send_wr;
569 struct ib_mad_send_wc wc; 558 struct ib_mad_send_wc wc;
570 unsigned long flags; 559 unsigned long flags;
571 560
572 spin_lock_irqsave(&agent->lock, flags); 561 spin_lock_irqsave(&agent->lock, flags);
573 mad_send_wr = ib_find_send_mad(agent, tid); 562 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
574 if (!mad_send_wr) 563 if (!mad_send_wr)
575 goto out; /* Unmatched send */ 564 goto out; /* Unmatched send */
576 565
@@ -612,8 +601,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
612 601
613 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 602 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
614 if (rmpp_mad->rmpp_hdr.rmpp_status) { 603 if (rmpp_mad->rmpp_hdr.rmpp_status) {
615 abort_send(agent, rmpp_mad->mad_hdr.tid, 604 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
616 IB_MGMT_RMPP_STATUS_BAD_STATUS);
617 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 605 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
618 return; 606 return;
619 } 607 }
@@ -621,14 +609,13 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
621 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 609 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
622 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 610 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
623 if (newwin < seg_num) { 611 if (newwin < seg_num) {
624 abort_send(agent, rmpp_mad->mad_hdr.tid, 612 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
625 IB_MGMT_RMPP_STATUS_W2S);
626 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 613 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
627 return; 614 return;
628 } 615 }
629 616
630 spin_lock_irqsave(&agent->lock, flags); 617 spin_lock_irqsave(&agent->lock, flags);
631 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 618 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
632 if (!mad_send_wr) 619 if (!mad_send_wr)
633 goto out; /* Unmatched ACK */ 620 goto out; /* Unmatched ACK */
634 621
@@ -639,8 +626,7 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
639 if (seg_num > mad_send_wr->send_buf.seg_count || 626 if (seg_num > mad_send_wr->send_buf.seg_count ||
640 seg_num > mad_send_wr->newwin) { 627 seg_num > mad_send_wr->newwin) {
641 spin_unlock_irqrestore(&agent->lock, flags); 628 spin_unlock_irqrestore(&agent->lock, flags);
642 abort_send(agent, rmpp_mad->mad_hdr.tid, 629 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
643 IB_MGMT_RMPP_STATUS_S2B);
644 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 630 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
645 return; 631 return;
646 } 632 }
@@ -728,12 +714,10 @@ static void process_rmpp_stop(struct ib_mad_agent_private *agent,
728 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 714 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
729 715
730 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 716 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
731 abort_send(agent, rmpp_mad->mad_hdr.tid, 717 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
732 IB_MGMT_RMPP_STATUS_BAD_STATUS);
733 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 718 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
734 } else 719 } else
735 abort_send(agent, rmpp_mad->mad_hdr.tid, 720 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
736 rmpp_mad->rmpp_hdr.rmpp_status);
737} 721}
738 722
739static void process_rmpp_abort(struct ib_mad_agent_private *agent, 723static void process_rmpp_abort(struct ib_mad_agent_private *agent,
@@ -745,12 +729,10 @@ static void process_rmpp_abort(struct ib_mad_agent_private *agent,
745 729
746 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 730 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
747 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 731 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
748 abort_send(agent, rmpp_mad->mad_hdr.tid, 732 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
749 IB_MGMT_RMPP_STATUS_BAD_STATUS);
750 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 733 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
751 } else 734 } else
752 abort_send(agent, rmpp_mad->mad_hdr.tid, 735 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
753 rmpp_mad->rmpp_hdr.rmpp_status);
754} 736}
755 737
756struct ib_mad_recv_wc * 738struct ib_mad_recv_wc *
@@ -764,8 +746,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
764 return mad_recv_wc; 746 return mad_recv_wc;
765 747
766 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 748 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
767 abort_send(agent, rmpp_mad->mad_hdr.tid, 749 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
768 IB_MGMT_RMPP_STATUS_UNV);
769 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 750 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
770 goto out; 751 goto out;
771 } 752 }
@@ -783,8 +764,7 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
783 process_rmpp_abort(agent, mad_recv_wc); 764 process_rmpp_abort(agent, mad_recv_wc);
784 break; 765 break;
785 default: 766 default:
786 abort_send(agent, rmpp_mad->mad_hdr.tid, 767 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
787 IB_MGMT_RMPP_STATUS_BADT);
788 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 768 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
789 break; 769 break;
790 } 770 }
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index fb6cd42601f9..afe70a549c2f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -177,17 +177,6 @@ static int queue_packet(struct ib_umad_file *file,
177 return ret; 177 return ret;
178} 178}
179 179
180static int data_offset(u8 mgmt_class)
181{
182 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
183 return IB_MGMT_SA_HDR;
184 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
185 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
186 return IB_MGMT_VENDOR_HDR;
187 else
188 return IB_MGMT_RMPP_HDR;
189}
190
191static void send_handler(struct ib_mad_agent *agent, 180static void send_handler(struct ib_mad_agent *agent,
192 struct ib_mad_send_wc *send_wc) 181 struct ib_mad_send_wc *send_wc)
193{ 182{
@@ -283,7 +272,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
283 */ 272 */
284 return -ENOSPC; 273 return -ENOSPC;
285 } 274 }
286 offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); 275 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class);
287 max_seg_payload = sizeof (struct ib_mad) - offset; 276 max_seg_payload = sizeof (struct ib_mad) - offset;
288 277
289 for (left = packet->length - seg_payload, buf += seg_payload; 278 for (left = packet->length - seg_payload, buf += seg_payload;
@@ -441,21 +430,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
441 } 430 }
442 431
443 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 432 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
444 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 433 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
445 hdr_len = IB_MGMT_SA_HDR; 434 if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
446 copy_offset = IB_MGMT_RMPP_HDR; 435 copy_offset = IB_MGMT_MAD_HDR;
447 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 436 rmpp_active = 0;
448 IB_MGMT_RMPP_FLAG_ACTIVE; 437 } else {
449 } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
450 rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
451 hdr_len = IB_MGMT_VENDOR_HDR;
452 copy_offset = IB_MGMT_RMPP_HDR; 438 copy_offset = IB_MGMT_RMPP_HDR;
453 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 439 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
454 IB_MGMT_RMPP_FLAG_ACTIVE; 440 IB_MGMT_RMPP_FLAG_ACTIVE;
455 } else {
456 hdr_len = IB_MGMT_MAD_HDR;
457 copy_offset = IB_MGMT_MAD_HDR;
458 rmpp_active = 0;
459 } 441 }
460 442
461 data_len = count - sizeof (struct ib_user_mad) - hdr_len; 443 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
diff --git a/drivers/infiniband/hw/mthca/mthca_av.c b/drivers/infiniband/hw/mthca/mthca_av.c
index f023d3936518..bc5bdcbe51b5 100644
--- a/drivers/infiniband/hw/mthca/mthca_av.c
+++ b/drivers/infiniband/hw/mthca/mthca_av.c
@@ -265,7 +265,7 @@ int __devinit mthca_init_av_table(struct mthca_dev *dev)
265 return -ENOMEM; 265 return -ENOMEM;
266} 266}
267 267
268void __devexit mthca_cleanup_av_table(struct mthca_dev *dev) 268void mthca_cleanup_av_table(struct mthca_dev *dev)
269{ 269{
270 if (mthca_is_memfree(dev)) 270 if (mthca_is_memfree(dev))
271 return; 271 return;
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 76aabc5bf371..312cf90731ea 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -973,7 +973,7 @@ int __devinit mthca_init_cq_table(struct mthca_dev *dev)
973 return err; 973 return err;
974} 974}
975 975
976void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev) 976void mthca_cleanup_cq_table(struct mthca_dev *dev)
977{ 977{
978 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); 978 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs);
979 mthca_alloc_cleanup(&dev->cq_table.alloc); 979 mthca_alloc_cleanup(&dev->cq_table.alloc);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index cbdc348fb689..99f109c3815d 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -765,7 +765,7 @@ static int __devinit mthca_map_eq_regs(struct mthca_dev *dev)
765 765
766} 766}
767 767
768static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) 768static void mthca_unmap_eq_regs(struct mthca_dev *dev)
769{ 769{
770 if (mthca_is_memfree(dev)) { 770 if (mthca_is_memfree(dev)) {
771 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & 771 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
@@ -821,7 +821,7 @@ int __devinit mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
821 return ret; 821 return ret;
822} 822}
823 823
824void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) 824void mthca_unmap_eq_icm(struct mthca_dev *dev)
825{ 825{
826 u8 status; 826 u8 status;
827 827
@@ -954,7 +954,7 @@ err_out_free:
954 return err; 954 return err;
955} 955}
956 956
957void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) 957void mthca_cleanup_eq_table(struct mthca_dev *dev)
958{ 958{
959 u8 status; 959 u8 status;
960 int i; 960 int i;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 4ace6a392f41..dfb482eac9a2 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -271,7 +271,7 @@ err:
271 return PTR_ERR(agent); 271 return PTR_ERR(agent);
272} 272}
273 273
274void mthca_free_agents(struct mthca_dev *dev) 274void __devexit mthca_free_agents(struct mthca_dev *dev)
275{ 275{
276 struct ib_mad_agent *agent; 276 struct ib_mad_agent *agent;
277 int p, q; 277 int p, q;
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 9965bda9afed..47ca8a9b7247 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -388,7 +388,7 @@ int __devinit mthca_init_mcg_table(struct mthca_dev *dev)
388 return 0; 388 return 0;
389} 389}
390 390
391void __devexit mthca_cleanup_mcg_table(struct mthca_dev *dev) 391void mthca_cleanup_mcg_table(struct mthca_dev *dev)
392{ 392{
393 mthca_alloc_cleanup(&dev->mcg_table.alloc); 393 mthca_alloc_cleanup(&dev->mcg_table.alloc);
394} 394}
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 698b62125765..25e1c1db9a40 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -170,7 +170,7 @@ err_out:
170 return -ENOMEM; 170 return -ENOMEM;
171} 171}
172 172
173static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy) 173static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
174{ 174{
175 int i; 175 int i;
176 176
@@ -866,7 +866,7 @@ err_mtt_buddy:
866 return err; 866 return err;
867} 867}
868 868
869void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev) 869void mthca_cleanup_mr_table(struct mthca_dev *dev)
870{ 870{
871 /* XXX check if any MRs are still allocated? */ 871 /* XXX check if any MRs are still allocated? */
872 if (dev->limits.fmr_reserved_mtts) 872 if (dev->limits.fmr_reserved_mtts)
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index 105fc5faaddf..59df51614c85 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -77,7 +77,7 @@ int __devinit mthca_init_pd_table(struct mthca_dev *dev)
77 dev->limits.reserved_pds); 77 dev->limits.reserved_pds);
78} 78}
79 79
80void __devexit mthca_cleanup_pd_table(struct mthca_dev *dev) 80void mthca_cleanup_pd_table(struct mthca_dev *dev)
81{ 81{
82 /* XXX check if any PDs are still allocated? */ 82 /* XXX check if any PDs are still allocated? */
83 mthca_alloc_cleanup(&dev->pd_table.alloc); 83 mthca_alloc_cleanup(&dev->pd_table.alloc);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 1bc2678c2fae..057c8e6af87b 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -2204,7 +2204,7 @@ int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2204 return err; 2204 return err;
2205} 2205}
2206 2206
2207void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) 2207void mthca_cleanup_qp_table(struct mthca_dev *dev)
2208{ 2208{
2209 int i; 2209 int i;
2210 u8 status; 2210 u8 status;
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 0cfd15802217..2dd3aea05341 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -206,7 +206,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
206 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 206 roundup_pow_of_two(sizeof (struct mthca_next_seg) +
207 srq->max_gs * sizeof (struct mthca_data_seg))); 207 srq->max_gs * sizeof (struct mthca_data_seg)));
208 208
209 if (ds > dev->limits.max_desc_sz) 209 if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 srq->wqe_shift = long_log2(ds); 212 srq->wqe_shift = long_log2(ds);
@@ -684,7 +684,7 @@ int __devinit mthca_init_srq_table(struct mthca_dev *dev)
684 return err; 684 return err;
685} 685}
686 686
687void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) 687void mthca_cleanup_srq_table(struct mthca_dev *dev)
688{ 688{
689 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 689 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
690 return; 690 return;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 53a32f65788d..9b0bd7c746ca 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -723,7 +723,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
723 * destination address onto the front of the skb so we can 723 * destination address onto the front of the skb so we can
724 * figure out where to send the packet later. 724 * figure out where to send the packet later.
725 */ 725 */
726 if (!skb->dst || !skb->dst->neighbour) { 726 if ((!skb->dst || !skb->dst->neighbour) && daddr) {
727 struct ipoib_pseudoheader *phdr = 727 struct ipoib_pseudoheader *phdr =
728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); 729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 61924cc30e55..fd8a95a9c5d3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -607,10 +607,10 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
607 */ 607 */
608 if (likely(scmnd->use_sg)) { 608 if (likely(scmnd->use_sg)) {
609 nents = scmnd->use_sg; 609 nents = scmnd->use_sg;
610 scat = (struct scatterlist *) scmnd->request_buffer; 610 scat = scmnd->request_buffer;
611 } else { 611 } else {
612 nents = 1; 612 nents = 1;
613 scat = (struct scatterlist *) scmnd->request_buffer; 613 scat = &req->fake_sg;
614 } 614 }
615 615
616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, 616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 51ab8eddb295..5ff77558013b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -3,7 +3,7 @@
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
7 * 7 *
8 * This software is available to you under a choice of one of two 8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU 9 * licenses. You may choose to be licensed under the terms of the GNU
@@ -55,6 +55,10 @@
55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
56#define IB_MGMT_CLASS_CM 0x07 56#define IB_MGMT_CLASS_CM 0x07
57#define IB_MGMT_CLASS_SNMP 0x08 57#define IB_MGMT_CLASS_SNMP 0x08
58#define IB_MGMT_CLASS_DEVICE_ADM 0x10
59#define IB_MGMT_CLASS_BOOT_MGMT 0x11
60#define IB_MGMT_CLASS_BIS 0x12
61#define IB_MGMT_CLASS_CONG_MGMT 0x21
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 62#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 63#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60 64
@@ -117,6 +121,8 @@ enum {
117 IB_MGMT_VENDOR_DATA = 216, 121 IB_MGMT_VENDOR_DATA = 216,
118 IB_MGMT_SA_HDR = 56, 122 IB_MGMT_SA_HDR = 56,
119 IB_MGMT_SA_DATA = 200, 123 IB_MGMT_SA_DATA = 200,
124 IB_MGMT_DEVICE_HDR = 64,
125 IB_MGMT_DEVICE_DATA = 192,
120}; 126};
121 127
122struct ib_mad_hdr { 128struct ib_mad_hdr {
@@ -603,6 +609,25 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
603 gfp_t gfp_mask); 609 gfp_t gfp_mask);
604 610
605/** 611/**
612 * ib_is_mad_class_rmpp - returns whether given management class
613 * supports RMPP.
614 * @mgmt_class: management class
615 *
616 * This routine returns whether the management class supports RMPP.
617 */
618int ib_is_mad_class_rmpp(u8 mgmt_class);
619
620/**
621 * ib_get_mad_data_offset - returns the data offset for a given
622 * management class.
623 * @mgmt_class: management class
624 *
625 * This routine returns the data offset in the MAD for the management
626 * class requested.
627 */
628int ib_get_mad_data_offset(u8 mgmt_class);
629
630/**
606 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment. 631 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
607 * @send_buf: Previously allocated send data buffer. 632 * @send_buf: Previously allocated send data buffer.
608 * @seg_num: number of segment to return 633 * @seg_num: number of segment to return