aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/mad.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/mad.c')
-rw-r--r--drivers/infiniband/core/mad.c335
1 files changed, 152 insertions, 183 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index a14ca87fda1..88f9f8c9eac 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -579,7 +579,7 @@ static void dequeue_mad(struct ib_mad_list_head *mad_list)
579} 579}
580 580
581static void snoop_send(struct ib_mad_qp_info *qp_info, 581static void snoop_send(struct ib_mad_qp_info *qp_info,
582 struct ib_send_wr *send_wr, 582 struct ib_mad_send_buf *send_buf,
583 struct ib_mad_send_wc *mad_send_wc, 583 struct ib_mad_send_wc *mad_send_wc,
584 int mad_snoop_flags) 584 int mad_snoop_flags)
585{ 585{
@@ -597,7 +597,7 @@ static void snoop_send(struct ib_mad_qp_info *qp_info,
597 atomic_inc(&mad_snoop_priv->refcount); 597 atomic_inc(&mad_snoop_priv->refcount);
598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags);
599 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 599 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent,
600 send_wr, mad_send_wc); 600 send_buf, mad_send_wc);
601 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 601 if (atomic_dec_and_test(&mad_snoop_priv->refcount))
602 wake_up(&mad_snoop_priv->wait); 602 wake_up(&mad_snoop_priv->wait);
603 spin_lock_irqsave(&qp_info->snoop_lock, flags); 603 spin_lock_irqsave(&qp_info->snoop_lock, flags);
@@ -654,10 +654,10 @@ static void build_smp_wc(u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
654 * Return < 0 if error 654 * Return < 0 if error
655 */ 655 */
656static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 656static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
657 struct ib_smp *smp, 657 struct ib_mad_send_wr_private *mad_send_wr)
658 struct ib_send_wr *send_wr)
659{ 658{
660 int ret; 659 int ret;
660 struct ib_smp *smp = mad_send_wr->send_buf.mad;
661 unsigned long flags; 661 unsigned long flags;
662 struct ib_mad_local_private *local; 662 struct ib_mad_local_private *local;
663 struct ib_mad_private *mad_priv; 663 struct ib_mad_private *mad_priv;
@@ -666,6 +666,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
666 struct ib_device *device = mad_agent_priv->agent.device; 666 struct ib_device *device = mad_agent_priv->agent.device;
667 u8 port_num = mad_agent_priv->agent.port_num; 667 u8 port_num = mad_agent_priv->agent.port_num;
668 struct ib_wc mad_wc; 668 struct ib_wc mad_wc;
669 struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
669 670
670 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { 671 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
671 ret = -EINVAL; 672 ret = -EINVAL;
@@ -745,13 +746,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
745 goto out; 746 goto out;
746 } 747 }
747 748
748 local->send_wr = *send_wr; 749 local->mad_send_wr = mad_send_wr;
749 local->send_wr.sg_list = local->sg_list;
750 memcpy(local->sg_list, send_wr->sg_list,
751 sizeof *send_wr->sg_list * send_wr->num_sge);
752 local->send_wr.next = NULL;
753 local->tid = send_wr->wr.ud.mad_hdr->tid;
754 local->wr_id = send_wr->wr_id;
755 /* Reference MAD agent until send side of local completion handled */ 750 /* Reference MAD agent until send side of local completion handled */
756 atomic_inc(&mad_agent_priv->refcount); 751 atomic_inc(&mad_agent_priv->refcount);
757 /* Queue local completion to local list */ 752 /* Queue local completion to local list */
@@ -781,17 +776,17 @@ static int get_buf_length(int hdr_len, int data_len)
781 776
782struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 777struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
783 u32 remote_qpn, u16 pkey_index, 778 u32 remote_qpn, u16 pkey_index,
784 struct ib_ah *ah, int rmpp_active, 779 int rmpp_active,
785 int hdr_len, int data_len, 780 int hdr_len, int data_len,
786 gfp_t gfp_mask) 781 gfp_t gfp_mask)
787{ 782{
788 struct ib_mad_agent_private *mad_agent_priv; 783 struct ib_mad_agent_private *mad_agent_priv;
789 struct ib_mad_send_buf *send_buf; 784 struct ib_mad_send_wr_private *mad_send_wr;
790 int buf_size; 785 int buf_size;
791 void *buf; 786 void *buf;
792 787
793 mad_agent_priv = container_of(mad_agent, 788 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
794 struct ib_mad_agent_private, agent); 789 agent);
795 buf_size = get_buf_length(hdr_len, data_len); 790 buf_size = get_buf_length(hdr_len, data_len);
796 791
797 if ((!mad_agent->rmpp_version && 792 if ((!mad_agent->rmpp_version &&
@@ -799,45 +794,40 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
799 (!rmpp_active && buf_size > sizeof(struct ib_mad))) 794 (!rmpp_active && buf_size > sizeof(struct ib_mad)))
800 return ERR_PTR(-EINVAL); 795 return ERR_PTR(-EINVAL);
801 796
802 buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); 797 buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
803 if (!buf) 798 if (!buf)
804 return ERR_PTR(-ENOMEM); 799 return ERR_PTR(-ENOMEM);
805 memset(buf, 0, sizeof *send_buf + buf_size); 800 memset(buf, 0, sizeof *mad_send_wr + buf_size);
806 801
807 send_buf = buf + buf_size; 802 mad_send_wr = buf + buf_size;
808 send_buf->mad = buf; 803 mad_send_wr->send_buf.mad = buf;
809 804
810 send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device, 805 mad_send_wr->mad_agent_priv = mad_agent_priv;
811 buf, buf_size, DMA_TO_DEVICE); 806 mad_send_wr->sg_list[0].length = buf_size;
812 pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr); 807 mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey;
813 send_buf->sge.length = buf_size; 808
814 send_buf->sge.lkey = mad_agent->mr->lkey; 809 mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr;
815 810 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
816 send_buf->send_wr.wr_id = (unsigned long) send_buf; 811 mad_send_wr->send_wr.num_sge = 1;
817 send_buf->send_wr.sg_list = &send_buf->sge; 812 mad_send_wr->send_wr.opcode = IB_WR_SEND;
818 send_buf->send_wr.num_sge = 1; 813 mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED;
819 send_buf->send_wr.opcode = IB_WR_SEND; 814 mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn;
820 send_buf->send_wr.send_flags = IB_SEND_SIGNALED; 815 mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
821 send_buf->send_wr.wr.ud.ah = ah; 816 mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index;
822 send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr;
823 send_buf->send_wr.wr.ud.remote_qpn = remote_qpn;
824 send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY;
825 send_buf->send_wr.wr.ud.pkey_index = pkey_index;
826 817
827 if (rmpp_active) { 818 if (rmpp_active) {
828 struct ib_rmpp_mad *rmpp_mad; 819 struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad;
829 rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad;
830 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - 820 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len -
831 offsetof(struct ib_rmpp_mad, data) + data_len); 821 IB_MGMT_RMPP_HDR + data_len);
832 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; 822 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version;
833 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 823 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
834 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, 824 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr,
835 IB_MGMT_RMPP_FLAG_ACTIVE); 825 IB_MGMT_RMPP_FLAG_ACTIVE);
836 } 826 }
837 827
838 send_buf->mad_agent = mad_agent; 828 mad_send_wr->send_buf.mad_agent = mad_agent;
839 atomic_inc(&mad_agent_priv->refcount); 829 atomic_inc(&mad_agent_priv->refcount);
840 return send_buf; 830 return &mad_send_wr->send_buf;
841} 831}
842EXPORT_SYMBOL(ib_create_send_mad); 832EXPORT_SYMBOL(ib_create_send_mad);
843 833
@@ -847,10 +837,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
847 837
848 mad_agent_priv = container_of(send_buf->mad_agent, 838 mad_agent_priv = container_of(send_buf->mad_agent,
849 struct ib_mad_agent_private, agent); 839 struct ib_mad_agent_private, agent);
850
851 dma_unmap_single(send_buf->mad_agent->device->dma_device,
852 pci_unmap_addr(send_buf, mapping),
853 send_buf->sge.length, DMA_TO_DEVICE);
854 kfree(send_buf->mad); 840 kfree(send_buf->mad);
855 841
856 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 842 if (atomic_dec_and_test(&mad_agent_priv->refcount))
@@ -861,8 +847,10 @@ EXPORT_SYMBOL(ib_free_send_mad);
861int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 847int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
862{ 848{
863 struct ib_mad_qp_info *qp_info; 849 struct ib_mad_qp_info *qp_info;
864 struct ib_send_wr *bad_send_wr;
865 struct list_head *list; 850 struct list_head *list;
851 struct ib_send_wr *bad_send_wr;
852 struct ib_mad_agent *mad_agent;
853 struct ib_sge *sge;
866 unsigned long flags; 854 unsigned long flags;
867 int ret; 855 int ret;
868 856
@@ -871,10 +859,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
871 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 859 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list;
872 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 860 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
873 861
862 mad_agent = mad_send_wr->send_buf.mad_agent;
863 sge = mad_send_wr->sg_list;
864 sge->addr = dma_map_single(mad_agent->device->dma_device,
865 mad_send_wr->send_buf.mad, sge->length,
866 DMA_TO_DEVICE);
867 pci_unmap_addr_set(mad_send_wr, mapping, sge->addr);
868
874 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 869 spin_lock_irqsave(&qp_info->send_queue.lock, flags);
875 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 870 if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
876 ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, 871 ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr,
877 &mad_send_wr->send_wr, &bad_send_wr); 872 &bad_send_wr);
878 list = &qp_info->send_queue.list; 873 list = &qp_info->send_queue.list;
879 } else { 874 } else {
880 ret = 0; 875 ret = 0;
@@ -886,6 +881,11 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
886 list_add_tail(&mad_send_wr->mad_list.list, list); 881 list_add_tail(&mad_send_wr->mad_list.list, list);
887 } 882 }
888 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 883 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
884 if (ret)
885 dma_unmap_single(mad_agent->device->dma_device,
886 pci_unmap_addr(mad_send_wr, mapping),
887 sge->length, DMA_TO_DEVICE);
888
889 return ret; 889 return ret;
890} 890}
891 891
@@ -893,45 +893,28 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
893 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 893 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
894 * with the registered client 894 * with the registered client
895 */ 895 */
896int ib_post_send_mad(struct ib_mad_agent *mad_agent, 896int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
897 struct ib_send_wr *send_wr, 897 struct ib_mad_send_buf **bad_send_buf)
898 struct ib_send_wr **bad_send_wr)
899{ 898{
900 int ret = -EINVAL;
901 struct ib_mad_agent_private *mad_agent_priv; 899 struct ib_mad_agent_private *mad_agent_priv;
902 900 struct ib_mad_send_buf *next_send_buf;
903 /* Validate supplied parameters */ 901 struct ib_mad_send_wr_private *mad_send_wr;
904 if (!bad_send_wr) 902 unsigned long flags;
905 goto error1; 903 int ret = -EINVAL;
906
907 if (!mad_agent || !send_wr)
908 goto error2;
909
910 if (!mad_agent->send_handler)
911 goto error2;
912
913 mad_agent_priv = container_of(mad_agent,
914 struct ib_mad_agent_private,
915 agent);
916 904
917 /* Walk list of send WRs and post each on send list */ 905 /* Walk list of send WRs and post each on send list */
918 while (send_wr) { 906 for (; send_buf; send_buf = next_send_buf) {
919 unsigned long flags;
920 struct ib_send_wr *next_send_wr;
921 struct ib_mad_send_wr_private *mad_send_wr;
922 struct ib_smp *smp;
923
924 /* Validate more parameters */
925 if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG)
926 goto error2;
927 907
928 if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler) 908 mad_send_wr = container_of(send_buf,
929 goto error2; 909 struct ib_mad_send_wr_private,
930 910 send_buf);
931 if (!send_wr->wr.ud.mad_hdr) { 911 mad_agent_priv = mad_send_wr->mad_agent_priv;
932 printk(KERN_ERR PFX "MAD header must be supplied " 912
933 "in WR %p\n", send_wr); 913 if (!send_buf->mad_agent->send_handler ||
934 goto error2; 914 (send_buf->timeout_ms &&
915 !send_buf->mad_agent->recv_handler)) {
916 ret = -EINVAL;
917 goto error;
935 } 918 }
936 919
937 /* 920 /*
@@ -939,40 +922,24 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
939 * current one completes, and the user modifies the work 922 * current one completes, and the user modifies the work
940 * request associated with the completion 923 * request associated with the completion
941 */ 924 */
942 next_send_wr = (struct ib_send_wr *)send_wr->next; 925 next_send_buf = send_buf->next;
926 mad_send_wr->send_wr.wr.ud.ah = send_buf->ah;
943 927
944 smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr; 928 if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
945 if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 929 IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
946 ret = handle_outgoing_dr_smp(mad_agent_priv, smp, 930 ret = handle_outgoing_dr_smp(mad_agent_priv,
947 send_wr); 931 mad_send_wr);
948 if (ret < 0) /* error */ 932 if (ret < 0) /* error */
949 goto error2; 933 goto error;
950 else if (ret == 1) /* locally consumed */ 934 else if (ret == 1) /* locally consumed */
951 goto next; 935 continue;
952 } 936 }
953 937
954 /* Allocate MAD send WR tracking structure */ 938 mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
955 mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC);
956 if (!mad_send_wr) {
957 printk(KERN_ERR PFX "No memory for "
958 "ib_mad_send_wr_private\n");
959 ret = -ENOMEM;
960 goto error2;
961 }
962 memset(mad_send_wr, 0, sizeof *mad_send_wr);
963
964 mad_send_wr->send_wr = *send_wr;
965 mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list;
966 memcpy(mad_send_wr->sg_list, send_wr->sg_list,
967 sizeof *send_wr->sg_list * send_wr->num_sge);
968 mad_send_wr->wr_id = send_wr->wr_id;
969 mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid;
970 mad_send_wr->mad_agent_priv = mad_agent_priv;
971 /* Timeout will be updated after send completes */ 939 /* Timeout will be updated after send completes */
972 mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. 940 mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
973 ud.timeout_ms); 941 mad_send_wr->retries = send_buf->retries;
974 mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; 942 /* Reference for work request to QP + response */
975 /* One reference for each work request to QP + response */
976 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 943 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
977 mad_send_wr->status = IB_WC_SUCCESS; 944 mad_send_wr->status = IB_WC_SUCCESS;
978 945
@@ -995,16 +962,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
995 list_del(&mad_send_wr->agent_list); 962 list_del(&mad_send_wr->agent_list);
996 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 963 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
997 atomic_dec(&mad_agent_priv->refcount); 964 atomic_dec(&mad_agent_priv->refcount);
998 goto error2; 965 goto error;
999 } 966 }
1000next:
1001 send_wr = next_send_wr;
1002 } 967 }
1003 return 0; 968 return 0;
1004 969error:
1005error2: 970 if (bad_send_buf)
1006 *bad_send_wr = send_wr; 971 *bad_send_buf = send_buf;
1007error1:
1008 return ret; 972 return ret;
1009} 973}
1010EXPORT_SYMBOL(ib_post_send_mad); 974EXPORT_SYMBOL(ib_post_send_mad);
@@ -1447,8 +1411,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
1447 * of MAD. 1411 * of MAD.
1448 */ 1412 */
1449 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; 1413 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32;
1450 list_for_each_entry(entry, &port_priv->agent_list, 1414 list_for_each_entry(entry, &port_priv->agent_list, agent_list) {
1451 agent_list) {
1452 if (entry->agent.hi_tid == hi_tid) { 1415 if (entry->agent.hi_tid == hi_tid) {
1453 mad_agent = entry; 1416 mad_agent = entry;
1454 break; 1417 break;
@@ -1571,8 +1534,7 @@ ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid)
1571 */ 1534 */
1572 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1535 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
1573 agent_list) { 1536 agent_list) {
1574 if (is_data_mad(mad_agent_priv, 1537 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
1575 mad_send_wr->send_wr.wr.ud.mad_hdr) &&
1576 mad_send_wr->tid == tid && mad_send_wr->timeout) { 1538 mad_send_wr->tid == tid && mad_send_wr->timeout) {
1577 /* Verify request has not been canceled */ 1539 /* Verify request has not been canceled */
1578 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1540 return (mad_send_wr->status == IB_WC_SUCCESS) ?
@@ -1628,14 +1590,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1628 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1590 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1629 1591
1630 /* Defined behavior is to complete response before request */ 1592 /* Defined behavior is to complete response before request */
1631 mad_recv_wc->wc->wr_id = mad_send_wr->wr_id; 1593 mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf;
1632 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1594 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
1633 mad_recv_wc); 1595 mad_recv_wc);
1634 atomic_dec(&mad_agent_priv->refcount); 1596 atomic_dec(&mad_agent_priv->refcount);
1635 1597
1636 mad_send_wc.status = IB_WC_SUCCESS; 1598 mad_send_wc.status = IB_WC_SUCCESS;
1637 mad_send_wc.vendor_err = 0; 1599 mad_send_wc.vendor_err = 0;
1638 mad_send_wc.wr_id = mad_send_wr->wr_id; 1600 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1639 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1601 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1640 } else { 1602 } else {
1641 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1603 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
@@ -1728,11 +1690,11 @@ local:
1728 if (ret & IB_MAD_RESULT_CONSUMED) 1690 if (ret & IB_MAD_RESULT_CONSUMED)
1729 goto out; 1691 goto out;
1730 if (ret & IB_MAD_RESULT_REPLY) { 1692 if (ret & IB_MAD_RESULT_REPLY) {
1731 /* Send response */ 1693 agent_send_response(&response->mad.mad,
1732 if (!agent_send(response, &recv->grh, wc, 1694 &recv->grh, wc,
1733 port_priv->device, 1695 port_priv->device,
1734 port_priv->port_num)) 1696 port_priv->port_num,
1735 response = NULL; 1697 qp_info->qp->qp_num);
1736 goto out; 1698 goto out;
1737 } 1699 }
1738 } 1700 }
@@ -1866,15 +1828,15 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
1866 1828
1867 if (mad_send_wr->status != IB_WC_SUCCESS ) 1829 if (mad_send_wr->status != IB_WC_SUCCESS )
1868 mad_send_wc->status = mad_send_wr->status; 1830 mad_send_wc->status = mad_send_wr->status;
1869 if (ret != IB_RMPP_RESULT_INTERNAL) 1831 if (ret == IB_RMPP_RESULT_INTERNAL)
1832 ib_rmpp_send_handler(mad_send_wc);
1833 else
1870 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 1834 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
1871 mad_send_wc); 1835 mad_send_wc);
1872 1836
1873 /* Release reference on agent taken when sending */ 1837 /* Release reference on agent taken when sending */
1874 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1838 if (atomic_dec_and_test(&mad_agent_priv->refcount))
1875 wake_up(&mad_agent_priv->wait); 1839 wake_up(&mad_agent_priv->wait);
1876
1877 kfree(mad_send_wr);
1878 return; 1840 return;
1879done: 1841done:
1880 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1842 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
@@ -1888,6 +1850,7 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1888 struct ib_mad_qp_info *qp_info; 1850 struct ib_mad_qp_info *qp_info;
1889 struct ib_mad_queue *send_queue; 1851 struct ib_mad_queue *send_queue;
1890 struct ib_send_wr *bad_send_wr; 1852 struct ib_send_wr *bad_send_wr;
1853 struct ib_mad_send_wc mad_send_wc;
1891 unsigned long flags; 1854 unsigned long flags;
1892 int ret; 1855 int ret;
1893 1856
@@ -1898,6 +1861,9 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
1898 qp_info = send_queue->qp_info; 1861 qp_info = send_queue->qp_info;
1899 1862
1900retry: 1863retry:
1864 dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
1865 pci_unmap_addr(mad_send_wr, mapping),
1866 mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
1901 queued_send_wr = NULL; 1867 queued_send_wr = NULL;
1902 spin_lock_irqsave(&send_queue->lock, flags); 1868 spin_lock_irqsave(&send_queue->lock, flags);
1903 list_del(&mad_list->list); 1869 list_del(&mad_list->list);
@@ -1914,17 +1880,17 @@ retry:
1914 } 1880 }
1915 spin_unlock_irqrestore(&send_queue->lock, flags); 1881 spin_unlock_irqrestore(&send_queue->lock, flags);
1916 1882
1917 /* Restore client wr_id in WC and complete send */ 1883 mad_send_wc.send_buf = &mad_send_wr->send_buf;
1918 wc->wr_id = mad_send_wr->wr_id; 1884 mad_send_wc.status = wc->status;
1885 mad_send_wc.vendor_err = wc->vendor_err;
1919 if (atomic_read(&qp_info->snoop_count)) 1886 if (atomic_read(&qp_info->snoop_count))
1920 snoop_send(qp_info, &mad_send_wr->send_wr, 1887 snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc,
1921 (struct ib_mad_send_wc *)wc,
1922 IB_MAD_SNOOP_SEND_COMPLETIONS); 1888 IB_MAD_SNOOP_SEND_COMPLETIONS);
1923 ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc); 1889 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1924 1890
1925 if (queued_send_wr) { 1891 if (queued_send_wr) {
1926 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, 1892 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr,
1927 &bad_send_wr); 1893 &bad_send_wr);
1928 if (ret) { 1894 if (ret) {
1929 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); 1895 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret);
1930 mad_send_wr = queued_send_wr; 1896 mad_send_wr = queued_send_wr;
@@ -2066,38 +2032,37 @@ static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2066 2032
2067 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2033 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2068 &cancel_list, agent_list) { 2034 &cancel_list, agent_list) {
2069 mad_send_wc.wr_id = mad_send_wr->wr_id; 2035 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2036 list_del(&mad_send_wr->agent_list);
2070 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2037 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2071 &mad_send_wc); 2038 &mad_send_wc);
2072
2073 list_del(&mad_send_wr->agent_list);
2074 kfree(mad_send_wr);
2075 atomic_dec(&mad_agent_priv->refcount); 2039 atomic_dec(&mad_agent_priv->refcount);
2076 } 2040 }
2077} 2041}
2078 2042
2079static struct ib_mad_send_wr_private* 2043static struct ib_mad_send_wr_private*
2080find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) 2044find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2045 struct ib_mad_send_buf *send_buf)
2081{ 2046{
2082 struct ib_mad_send_wr_private *mad_send_wr; 2047 struct ib_mad_send_wr_private *mad_send_wr;
2083 2048
2084 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2049 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2085 agent_list) { 2050 agent_list) {
2086 if (mad_send_wr->wr_id == wr_id) 2051 if (&mad_send_wr->send_buf == send_buf)
2087 return mad_send_wr; 2052 return mad_send_wr;
2088 } 2053 }
2089 2054
2090 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2055 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2091 agent_list) { 2056 agent_list) {
2092 if (is_data_mad(mad_agent_priv, 2057 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) &&
2093 mad_send_wr->send_wr.wr.ud.mad_hdr) && 2058 &mad_send_wr->send_buf == send_buf)
2094 mad_send_wr->wr_id == wr_id)
2095 return mad_send_wr; 2059 return mad_send_wr;
2096 } 2060 }
2097 return NULL; 2061 return NULL;
2098} 2062}
2099 2063
2100int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) 2064int ib_modify_mad(struct ib_mad_agent *mad_agent,
2065 struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2101{ 2066{
2102 struct ib_mad_agent_private *mad_agent_priv; 2067 struct ib_mad_agent_private *mad_agent_priv;
2103 struct ib_mad_send_wr_private *mad_send_wr; 2068 struct ib_mad_send_wr_private *mad_send_wr;
@@ -2107,7 +2072,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2107 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2072 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2108 agent); 2073 agent);
2109 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2074 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2110 mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); 2075 mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2111 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2076 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2112 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2077 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2113 return -EINVAL; 2078 return -EINVAL;
@@ -2119,7 +2084,7 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2119 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2084 mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2120 } 2085 }
2121 2086
2122 mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; 2087 mad_send_wr->send_buf.timeout_ms = timeout_ms;
2123 if (active) 2088 if (active)
2124 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2089 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2125 else 2090 else
@@ -2130,9 +2095,10 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms)
2130} 2095}
2131EXPORT_SYMBOL(ib_modify_mad); 2096EXPORT_SYMBOL(ib_modify_mad);
2132 2097
2133void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id) 2098void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2099 struct ib_mad_send_buf *send_buf)
2134{ 2100{
2135 ib_modify_mad(mad_agent, wr_id, 0); 2101 ib_modify_mad(mad_agent, send_buf, 0);
2136} 2102}
2137EXPORT_SYMBOL(ib_cancel_mad); 2103EXPORT_SYMBOL(ib_cancel_mad);
2138 2104
@@ -2166,10 +2132,9 @@ static void local_completions(void *data)
2166 * Defined behavior is to complete response 2132 * Defined behavior is to complete response
2167 * before request 2133 * before request
2168 */ 2134 */
2169 build_smp_wc(local->wr_id, 2135 build_smp_wc((unsigned long) local->mad_send_wr,
2170 be16_to_cpu(IB_LID_PERMISSIVE), 2136 be16_to_cpu(IB_LID_PERMISSIVE),
2171 0 /* pkey index */, 2137 0, recv_mad_agent->agent.port_num, &wc);
2172 recv_mad_agent->agent.port_num, &wc);
2173 2138
2174 local->mad_priv->header.recv_wc.wc = &wc; 2139 local->mad_priv->header.recv_wc.wc = &wc;
2175 local->mad_priv->header.recv_wc.mad_len = 2140 local->mad_priv->header.recv_wc.mad_len =
@@ -2196,11 +2161,11 @@ local_send_completion:
2196 /* Complete send */ 2161 /* Complete send */
2197 mad_send_wc.status = IB_WC_SUCCESS; 2162 mad_send_wc.status = IB_WC_SUCCESS;
2198 mad_send_wc.vendor_err = 0; 2163 mad_send_wc.vendor_err = 0;
2199 mad_send_wc.wr_id = local->wr_id; 2164 mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2200 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2165 if (atomic_read(&mad_agent_priv->qp_info->snoop_count))
2201 snoop_send(mad_agent_priv->qp_info, &local->send_wr, 2166 snoop_send(mad_agent_priv->qp_info,
2202 &mad_send_wc, 2167 &local->mad_send_wr->send_buf,
2203 IB_MAD_SNOOP_SEND_COMPLETIONS); 2168 &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS);
2204 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2169 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2205 &mad_send_wc); 2170 &mad_send_wc);
2206 2171
@@ -2221,8 +2186,7 @@ static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2221 if (!mad_send_wr->retries--) 2186 if (!mad_send_wr->retries--)
2222 return -ETIMEDOUT; 2187 return -ETIMEDOUT;
2223 2188
2224 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. 2189 mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2225 wr.ud.timeout_ms);
2226 2190
2227 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { 2191 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) {
2228 ret = ib_retry_rmpp(mad_send_wr); 2192 ret = ib_retry_rmpp(mad_send_wr);
@@ -2285,11 +2249,10 @@ static void timeout_sends(void *data)
2285 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2249 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2286 else 2250 else
2287 mad_send_wc.status = mad_send_wr->status; 2251 mad_send_wc.status = mad_send_wr->status;
2288 mad_send_wc.wr_id = mad_send_wr->wr_id; 2252 mad_send_wc.send_buf = &mad_send_wr->send_buf;
2289 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2253 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2290 &mad_send_wc); 2254 &mad_send_wc);
2291 2255
2292 kfree(mad_send_wr);
2293 atomic_dec(&mad_agent_priv->refcount); 2256 atomic_dec(&mad_agent_priv->refcount);
2294 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2257 spin_lock_irqsave(&mad_agent_priv->lock, flags);
2295 } 2258 }
@@ -2683,40 +2646,47 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
2683 2646
2684static void ib_mad_init_device(struct ib_device *device) 2647static void ib_mad_init_device(struct ib_device *device)
2685{ 2648{
2686 int num_ports, cur_port, i; 2649 int start, end, i;
2687 2650
2688 if (device->node_type == IB_NODE_SWITCH) { 2651 if (device->node_type == IB_NODE_SWITCH) {
2689 num_ports = 1; 2652 start = 0;
2690 cur_port = 0; 2653 end = 0;
2691 } else { 2654 } else {
2692 num_ports = device->phys_port_cnt; 2655 start = 1;
2693 cur_port = 1; 2656 end = device->phys_port_cnt;
2694 } 2657 }
2695 for (i = 0; i < num_ports; i++, cur_port++) { 2658
2696 if (ib_mad_port_open(device, cur_port)) { 2659 for (i = start; i <= end; i++) {
2660 if (ib_mad_port_open(device, i)) {
2697 printk(KERN_ERR PFX "Couldn't open %s port %d\n", 2661 printk(KERN_ERR PFX "Couldn't open %s port %d\n",
2698 device->name, cur_port); 2662 device->name, i);
2699 goto error_device_open; 2663 goto error;
2700 } 2664 }
2701 if (ib_agent_port_open(device, cur_port)) { 2665 if (ib_agent_port_open(device, i)) {
2702 printk(KERN_ERR PFX "Couldn't open %s port %d " 2666 printk(KERN_ERR PFX "Couldn't open %s port %d "
2703 "for agents\n", 2667 "for agents\n",
2704 device->name, cur_port); 2668 device->name, i);
2705 goto error_device_open; 2669 goto error_agent;
2706 } 2670 }
2707 } 2671 }
2708 return; 2672 return;
2709 2673
2710error_device_open: 2674error_agent:
2711 while (i > 0) { 2675 if (ib_mad_port_close(device, i))
2712 cur_port--; 2676 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2713 if (ib_agent_port_close(device, cur_port)) 2677 device->name, i);
2678
2679error:
2680 i--;
2681
2682 while (i >= start) {
2683 if (ib_agent_port_close(device, i))
2714 printk(KERN_ERR PFX "Couldn't close %s port %d " 2684 printk(KERN_ERR PFX "Couldn't close %s port %d "
2715 "for agents\n", 2685 "for agents\n",
2716 device->name, cur_port); 2686 device->name, i);
2717 if (ib_mad_port_close(device, cur_port)) 2687 if (ib_mad_port_close(device, i))
2718 printk(KERN_ERR PFX "Couldn't close %s port %d\n", 2688 printk(KERN_ERR PFX "Couldn't close %s port %d\n",
2719 device->name, cur_port); 2689 device->name, i);
2720 i--; 2690 i--;
2721 } 2691 }
2722} 2692}
@@ -2754,7 +2724,6 @@ static int __init ib_mad_init_module(void)
2754 int ret; 2724 int ret;
2755 2725
2756 spin_lock_init(&ib_mad_port_list_lock); 2726 spin_lock_init(&ib_mad_port_list_lock);
2757 spin_lock_init(&ib_agent_port_list_lock);
2758 2727
2759 ib_mad_cache = kmem_cache_create("ib_mad", 2728 ib_mad_cache = kmem_cache_create("ib_mad",
2760 sizeof(struct ib_mad_private), 2729 sizeof(struct ib_mad_private),