aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.co.il>2010-09-27 20:51:11 -0400
committerRoland Dreier <rolandd@cisco.com>2010-10-13 12:38:11 -0400
commitfac70d51914674ce8ae742ed73441ddb4770ad20 (patch)
tree9469e5497a237ad7b5fd25213ecaa796745890d1 /drivers/infiniband
parent7b4c876961ad6ddcfacd69b25fe7e13ff41fe322 (diff)
IB/mad: IBoE supports only QP1 (no QP0)
Since IBoE is using Ethernet as its link layer, there is no central management entity so there is need for QP0. QP1 is still needed since it handles communications between CM agents. This patch will skip QP0 and create only QP1 for IBoE ports. Signed-off-by: Eli Cohen <eli@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/agent.c29
-rw-r--r--drivers/infiniband/core/mad.c27
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/sa_query.c25
4 files changed, 78 insertions, 26 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index ae7c2880e624..91916a8d5de4 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
59 struct ib_agent_port_private *entry; 59 struct ib_agent_port_private *entry;
60 60
61 list_for_each_entry(entry, &ib_agent_port_list, port_list) { 61 list_for_each_entry(entry, &ib_agent_port_list, port_list) {
62 if (entry->agent[0]->device == device && 62 if (entry->agent[1]->device == device &&
63 entry->agent[0]->port_num == port_num) 63 entry->agent[1]->port_num == port_num)
64 return entry; 64 return entry;
65 } 65 }
66 return NULL; 66 return NULL;
@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
155 goto error1; 155 goto error1;
156 } 156 }
157 157
158 /* Obtain send only MAD agent for SMI QP */ 158 if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
159 port_priv->agent[0] = ib_register_mad_agent(device, port_num, 159 /* Obtain send only MAD agent for SMI QP */
160 IB_QPT_SMI, NULL, 0, 160 port_priv->agent[0] = ib_register_mad_agent(device, port_num,
161 &agent_send_handler, 161 IB_QPT_SMI, NULL, 0,
162 NULL, NULL); 162 &agent_send_handler,
163 if (IS_ERR(port_priv->agent[0])) { 163 NULL, NULL);
164 ret = PTR_ERR(port_priv->agent[0]); 164 if (IS_ERR(port_priv->agent[0])) {
165 goto error2; 165 ret = PTR_ERR(port_priv->agent[0]);
166 goto error2;
167 }
166 } 168 }
167 169
168 /* Obtain send only MAD agent for GSI QP */ 170 /* Obtain send only MAD agent for GSI QP */
@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
182 return 0; 184 return 0;
183 185
184error3: 186error3:
185 ib_unregister_mad_agent(port_priv->agent[0]); 187 if (port_priv->agent[0])
188 ib_unregister_mad_agent(port_priv->agent[0]);
186error2: 189error2:
187 kfree(port_priv); 190 kfree(port_priv);
188error1: 191error1:
@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
205 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 208 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
206 209
207 ib_unregister_mad_agent(port_priv->agent[1]); 210 ib_unregister_mad_agent(port_priv->agent[1]);
208 ib_unregister_mad_agent(port_priv->agent[0]); 211 if (port_priv->agent[0])
212 ib_unregister_mad_agent(port_priv->agent[0]);
213
209 kfree(port_priv); 214 kfree(port_priv);
210 return 0; 215 return 0;
211} 216}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef1304f151dc..822cfdcd9f78 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2598 struct ib_mad_private *recv; 2598 struct ib_mad_private *recv;
2599 struct ib_mad_list_head *mad_list; 2599 struct ib_mad_list_head *mad_list;
2600 2600
2601 if (!qp_info->qp)
2602 return;
2603
2601 while (!list_empty(&qp_info->recv_queue.list)) { 2604 while (!list_empty(&qp_info->recv_queue.list)) {
2602 2605
2603 mad_list = list_entry(qp_info->recv_queue.list.next, 2606 mad_list = list_entry(qp_info->recv_queue.list.next,
@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2639 2642
2640 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2643 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2641 qp = port_priv->qp_info[i].qp; 2644 qp = port_priv->qp_info[i].qp;
2645 if (!qp)
2646 continue;
2647
2642 /* 2648 /*
2643 * PKey index for QP1 is irrelevant but 2649 * PKey index for QP1 is irrelevant but
2644 * one is needed for the Reset to Init transition 2650 * one is needed for the Reset to Init transition
@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2680 } 2686 }
2681 2687
2682 for (i = 0; i < IB_MAD_QPS_CORE; i++) { 2688 for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2689 if (!port_priv->qp_info[i].qp)
2690 continue;
2691
2683 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); 2692 ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2684 if (ret) { 2693 if (ret) {
2685 printk(KERN_ERR PFX "Couldn't post receive WRs\n"); 2694 printk(KERN_ERR PFX "Couldn't post receive WRs\n");
@@ -2758,6 +2767,9 @@ error:
2758 2767
2759static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) 2768static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2760{ 2769{
2770 if (!qp_info->qp)
2771 return;
2772
2761 ib_destroy_qp(qp_info->qp); 2773 ib_destroy_qp(qp_info->qp);
2762 kfree(qp_info->snoop_table); 2774 kfree(qp_info->snoop_table);
2763} 2775}
@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device,
2773 struct ib_mad_port_private *port_priv; 2785 struct ib_mad_port_private *port_priv;
2774 unsigned long flags; 2786 unsigned long flags;
2775 char name[sizeof "ib_mad123"]; 2787 char name[sizeof "ib_mad123"];
2788 int has_smi;
2776 2789
2777 /* Create new device info */ 2790 /* Create new device info */
2778 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 2791 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device,
2788 init_mad_qp(port_priv, &port_priv->qp_info[0]); 2801 init_mad_qp(port_priv, &port_priv->qp_info[0]);
2789 init_mad_qp(port_priv, &port_priv->qp_info[1]); 2802 init_mad_qp(port_priv, &port_priv->qp_info[1]);
2790 2803
2791 cq_size = (mad_sendq_size + mad_recvq_size) * 2; 2804 cq_size = mad_sendq_size + mad_recvq_size;
2805 has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
2806 if (has_smi)
2807 cq_size *= 2;
2808
2792 port_priv->cq = ib_create_cq(port_priv->device, 2809 port_priv->cq = ib_create_cq(port_priv->device,
2793 ib_mad_thread_completion_handler, 2810 ib_mad_thread_completion_handler,
2794 NULL, port_priv, cq_size, 0); 2811 NULL, port_priv, cq_size, 0);
@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device,
2812 goto error5; 2829 goto error5;
2813 } 2830 }
2814 2831
2815 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); 2832 if (has_smi) {
2816 if (ret) 2833 ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2817 goto error6; 2834 if (ret)
2835 goto error6;
2836 }
2818 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); 2837 ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
2819 if (ret) 2838 if (ret)
2820 goto error7; 2839 goto error7;
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index a519801dcfb7..68b4162fd9d2 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler,
774 int index; 774 int index;
775 775
776 dev = container_of(handler, struct mcast_device, event_handler); 776 dev = container_of(handler, struct mcast_device, event_handler);
777 if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
778 IB_LINK_LAYER_INFINIBAND)
779 return;
780
777 index = event->element.port_num - dev->start_port; 781 index = event->element.port_num - dev->start_port;
778 782
779 switch (event->event) { 783 switch (event->event) {
@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device)
796 struct mcast_device *dev; 800 struct mcast_device *dev;
797 struct mcast_port *port; 801 struct mcast_port *port;
798 int i; 802 int i;
803 int count = 0;
799 804
800 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) 805 if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
801 return; 806 return;
@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device)
813 } 818 }
814 819
815 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 820 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
821 if (rdma_port_get_link_layer(device, dev->start_port + i) !=
822 IB_LINK_LAYER_INFINIBAND)
823 continue;
816 port = &dev->port[i]; 824 port = &dev->port[i];
817 port->dev = dev; 825 port->dev = dev;
818 port->port_num = dev->start_port + i; 826 port->port_num = dev->start_port + i;
@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device)
820 port->table = RB_ROOT; 828 port->table = RB_ROOT;
821 init_completion(&port->comp); 829 init_completion(&port->comp);
822 atomic_set(&port->refcount, 1); 830 atomic_set(&port->refcount, 1);
831 ++count;
832 }
833
834 if (!count) {
835 kfree(dev);
836 return;
823 } 837 }
824 838
825 dev->device = device; 839 dev->device = device;
@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device)
843 flush_workqueue(mcast_wq); 857 flush_workqueue(mcast_wq);
844 858
845 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 859 for (i = 0; i <= dev->end_port - dev->start_port; i++) {
846 port = &dev->port[i]; 860 if (rdma_port_get_link_layer(device, dev->start_port + i) ==
847 deref_port(port); 861 IB_LINK_LAYER_INFINIBAND) {
848 wait_for_completion(&port->comp); 862 port = &dev->port[i];
863 deref_port(port);
864 wait_for_completion(&port->comp);
865 }
849 } 866 }
850 867
851 kfree(dev); 868 kfree(dev);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7e1ffd8ccd5c..27674c790a73 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
416 struct ib_sa_port *port = 416 struct ib_sa_port *port =
417 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 417 &sa_dev->port[event->element.port_num - sa_dev->start_port];
418 418
419 if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
420 return;
421
419 spin_lock_irqsave(&port->ah_lock, flags); 422 spin_lock_irqsave(&port->ah_lock, flags);
420 if (port->sm_ah) 423 if (port->sm_ah)
421 kref_put(&port->sm_ah->ref, free_sm_ah); 424 kref_put(&port->sm_ah->ref, free_sm_ah);
@@ -1007,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
1007 e = device->phys_port_cnt; 1010 e = device->phys_port_cnt;
1008 } 1011 }
1009 1012
1010 sa_dev = kmalloc(sizeof *sa_dev + 1013 sa_dev = kzalloc(sizeof *sa_dev +
1011 (e - s + 1) * sizeof (struct ib_sa_port), 1014 (e - s + 1) * sizeof (struct ib_sa_port),
1012 GFP_KERNEL); 1015 GFP_KERNEL);
1013 if (!sa_dev) 1016 if (!sa_dev)
@@ -1017,9 +1020,12 @@ static void ib_sa_add_one(struct ib_device *device)
1017 sa_dev->end_port = e; 1020 sa_dev->end_port = e;
1018 1021
1019 for (i = 0; i <= e - s; ++i) { 1022 for (i = 0; i <= e - s; ++i) {
1023 spin_lock_init(&sa_dev->port[i].ah_lock);
1024 if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
1025 continue;
1026
1020 sa_dev->port[i].sm_ah = NULL; 1027 sa_dev->port[i].sm_ah = NULL;
1021 sa_dev->port[i].port_num = i + s; 1028 sa_dev->port[i].port_num = i + s;
1022 spin_lock_init(&sa_dev->port[i].ah_lock);
1023 1029
1024 sa_dev->port[i].agent = 1030 sa_dev->port[i].agent =
1025 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1031 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
@@ -1045,13 +1051,15 @@ static void ib_sa_add_one(struct ib_device *device)
1045 goto err; 1051 goto err;
1046 1052
1047 for (i = 0; i <= e - s; ++i) 1053 for (i = 0; i <= e - s; ++i)
1048 update_sm_ah(&sa_dev->port[i].update_task); 1054 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1055 update_sm_ah(&sa_dev->port[i].update_task);
1049 1056
1050 return; 1057 return;
1051 1058
1052err: 1059err:
1053 while (--i >= 0) 1060 while (--i >= 0)
1054 ib_unregister_mad_agent(sa_dev->port[i].agent); 1061 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
1062 ib_unregister_mad_agent(sa_dev->port[i].agent);
1055 1063
1056 kfree(sa_dev); 1064 kfree(sa_dev);
1057 1065
@@ -1071,9 +1079,12 @@ static void ib_sa_remove_one(struct ib_device *device)
1071 flush_scheduled_work(); 1079 flush_scheduled_work();
1072 1080
1073 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1081 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1074 ib_unregister_mad_agent(sa_dev->port[i].agent); 1082 if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
1075 if (sa_dev->port[i].sm_ah) 1083 ib_unregister_mad_agent(sa_dev->port[i].agent);
1076 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1084 if (sa_dev->port[i].sm_ah)
1085 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1086 }
1087
1077 } 1088 }
1078 1089
1079 kfree(sa_dev); 1090 kfree(sa_dev);