diff options
31 files changed, 633 insertions, 159 deletions
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 6e35eccc9caa..0f9a84c1046a 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -2,6 +2,7 @@ menuconfig INFINIBAND | |||
2 | tristate "InfiniBand support" | 2 | tristate "InfiniBand support" |
3 | depends on PCI || BROKEN | 3 | depends on PCI || BROKEN |
4 | depends on HAS_IOMEM | 4 | depends on HAS_IOMEM |
5 | depends on NET | ||
5 | ---help--- | 6 | ---help--- |
6 | Core support for InfiniBand (IB). Make sure to also select | 7 | Core support for InfiniBand (IB). Make sure to also select |
7 | any protocols you wish to use as well as drivers for your | 8 | any protocols you wish to use as well as drivers for your |
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index cb1ab3ea4998..c8bbaef1becb 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ | |||
8 | $(user_access-y) | 8 | $(user_access-y) |
9 | 9 | ||
10 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ | 10 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ |
11 | device.o fmr_pool.o cache.o | 11 | device.o fmr_pool.o cache.o netlink.o |
12 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o | 12 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o |
13 | 13 | ||
14 | ib_mad-y := mad.o smi.o agent.o mad_rmpp.o | 14 | ib_mad-y := mad.o smi.o agent.o mad_rmpp.o |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f804e28e1ebb..f62f52fb9ece 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = { | |||
3639 | .release = cm_release_port_obj | 3639 | .release = cm_release_port_obj |
3640 | }; | 3640 | }; |
3641 | 3641 | ||
3642 | static char *cm_devnode(struct device *dev, mode_t *mode) | ||
3643 | { | ||
3644 | *mode = 0666; | ||
3645 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
3646 | } | ||
3647 | |||
3642 | struct class cm_class = { | 3648 | struct class cm_class = { |
3649 | .owner = THIS_MODULE, | ||
3643 | .name = "infiniband_cm", | 3650 | .name = "infiniband_cm", |
3651 | .devnode = cm_devnode, | ||
3644 | }; | 3652 | }; |
3645 | EXPORT_SYMBOL(cm_class); | 3653 | EXPORT_SYMBOL(cm_class); |
3646 | 3654 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 99dde874fbbd..b6a33b3c516d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include <rdma/rdma_cm.h> | 48 | #include <rdma/rdma_cm.h> |
49 | #include <rdma/rdma_cm_ib.h> | 49 | #include <rdma/rdma_cm_ib.h> |
50 | #include <rdma/rdma_netlink.h> | ||
50 | #include <rdma/ib_cache.h> | 51 | #include <rdma/ib_cache.h> |
51 | #include <rdma/ib_cm.h> | 52 | #include <rdma/ib_cm.h> |
52 | #include <rdma/ib_sa.h> | 53 | #include <rdma/ib_sa.h> |
@@ -89,20 +90,6 @@ struct cma_device { | |||
89 | struct list_head id_list; | 90 | struct list_head id_list; |
90 | }; | 91 | }; |
91 | 92 | ||
92 | enum cma_state { | ||
93 | CMA_IDLE, | ||
94 | CMA_ADDR_QUERY, | ||
95 | CMA_ADDR_RESOLVED, | ||
96 | CMA_ROUTE_QUERY, | ||
97 | CMA_ROUTE_RESOLVED, | ||
98 | CMA_CONNECT, | ||
99 | CMA_DISCONNECT, | ||
100 | CMA_ADDR_BOUND, | ||
101 | CMA_LISTEN, | ||
102 | CMA_DEVICE_REMOVAL, | ||
103 | CMA_DESTROYING | ||
104 | }; | ||
105 | |||
106 | struct rdma_bind_list { | 93 | struct rdma_bind_list { |
107 | struct idr *ps; | 94 | struct idr *ps; |
108 | struct hlist_head owners; | 95 | struct hlist_head owners; |
@@ -126,7 +113,7 @@ struct rdma_id_private { | |||
126 | struct list_head mc_list; | 113 | struct list_head mc_list; |
127 | 114 | ||
128 | int internal_id; | 115 | int internal_id; |
129 | enum cma_state state; | 116 | enum rdma_cm_state state; |
130 | spinlock_t lock; | 117 | spinlock_t lock; |
131 | struct mutex qp_mutex; | 118 | struct mutex qp_mutex; |
132 | 119 | ||
@@ -146,6 +133,7 @@ struct rdma_id_private { | |||
146 | u32 seq_num; | 133 | u32 seq_num; |
147 | u32 qkey; | 134 | u32 qkey; |
148 | u32 qp_num; | 135 | u32 qp_num; |
136 | pid_t owner; | ||
149 | u8 srq; | 137 | u8 srq; |
150 | u8 tos; | 138 | u8 tos; |
151 | u8 reuseaddr; | 139 | u8 reuseaddr; |
@@ -165,8 +153,8 @@ struct cma_multicast { | |||
165 | struct cma_work { | 153 | struct cma_work { |
166 | struct work_struct work; | 154 | struct work_struct work; |
167 | struct rdma_id_private *id; | 155 | struct rdma_id_private *id; |
168 | enum cma_state old_state; | 156 | enum rdma_cm_state old_state; |
169 | enum cma_state new_state; | 157 | enum rdma_cm_state new_state; |
170 | struct rdma_cm_event event; | 158 | struct rdma_cm_event event; |
171 | }; | 159 | }; |
172 | 160 | ||
@@ -217,7 +205,7 @@ struct sdp_hah { | |||
217 | #define CMA_VERSION 0x00 | 205 | #define CMA_VERSION 0x00 |
218 | #define SDP_MAJ_VERSION 0x2 | 206 | #define SDP_MAJ_VERSION 0x2 |
219 | 207 | ||
220 | static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) | 208 | static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) |
221 | { | 209 | { |
222 | unsigned long flags; | 210 | unsigned long flags; |
223 | int ret; | 211 | int ret; |
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) | |||
229 | } | 217 | } |
230 | 218 | ||
231 | static int cma_comp_exch(struct rdma_id_private *id_priv, | 219 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
232 | enum cma_state comp, enum cma_state exch) | 220 | enum rdma_cm_state comp, enum rdma_cm_state exch) |
233 | { | 221 | { |
234 | unsigned long flags; | 222 | unsigned long flags; |
235 | int ret; | 223 | int ret; |
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv, | |||
241 | return ret; | 229 | return ret; |
242 | } | 230 | } |
243 | 231 | ||
244 | static enum cma_state cma_exch(struct rdma_id_private *id_priv, | 232 | static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, |
245 | enum cma_state exch) | 233 | enum rdma_cm_state exch) |
246 | { | 234 | { |
247 | unsigned long flags; | 235 | unsigned long flags; |
248 | enum cma_state old; | 236 | enum rdma_cm_state old; |
249 | 237 | ||
250 | spin_lock_irqsave(&id_priv->lock, flags); | 238 | spin_lock_irqsave(&id_priv->lock, flags); |
251 | old = id_priv->state; | 239 | old = id_priv->state; |
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) | |||
279 | hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); | 267 | hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); |
280 | } | 268 | } |
281 | 269 | ||
282 | static inline int cma_is_ud_ps(enum rdma_port_space ps) | ||
283 | { | ||
284 | return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); | ||
285 | } | ||
286 | |||
287 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, | 270 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
288 | struct cma_device *cma_dev) | 271 | struct cma_device *cma_dev) |
289 | { | 272 | { |
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
413 | } | 396 | } |
414 | 397 | ||
415 | static int cma_disable_callback(struct rdma_id_private *id_priv, | 398 | static int cma_disable_callback(struct rdma_id_private *id_priv, |
416 | enum cma_state state) | 399 | enum rdma_cm_state state) |
417 | { | 400 | { |
418 | mutex_lock(&id_priv->handler_mutex); | 401 | mutex_lock(&id_priv->handler_mutex); |
419 | if (id_priv->state != state) { | 402 | if (id_priv->state != state) { |
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv) | |||
429 | } | 412 | } |
430 | 413 | ||
431 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 414 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
432 | void *context, enum rdma_port_space ps) | 415 | void *context, enum rdma_port_space ps, |
416 | enum ib_qp_type qp_type) | ||
433 | { | 417 | { |
434 | struct rdma_id_private *id_priv; | 418 | struct rdma_id_private *id_priv; |
435 | 419 | ||
@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | |||
437 | if (!id_priv) | 421 | if (!id_priv) |
438 | return ERR_PTR(-ENOMEM); | 422 | return ERR_PTR(-ENOMEM); |
439 | 423 | ||
440 | id_priv->state = CMA_IDLE; | 424 | id_priv->owner = task_pid_nr(current); |
425 | id_priv->state = RDMA_CM_IDLE; | ||
441 | id_priv->id.context = context; | 426 | id_priv->id.context = context; |
442 | id_priv->id.event_handler = event_handler; | 427 | id_priv->id.event_handler = event_handler; |
443 | id_priv->id.ps = ps; | 428 | id_priv->id.ps = ps; |
429 | id_priv->id.qp_type = qp_type; | ||
444 | spin_lock_init(&id_priv->lock); | 430 | spin_lock_init(&id_priv->lock); |
445 | mutex_init(&id_priv->qp_mutex); | 431 | mutex_init(&id_priv->qp_mutex); |
446 | init_completion(&id_priv->comp); | 432 | init_completion(&id_priv->comp); |
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, | |||
508 | if (IS_ERR(qp)) | 494 | if (IS_ERR(qp)) |
509 | return PTR_ERR(qp); | 495 | return PTR_ERR(qp); |
510 | 496 | ||
511 | if (cma_is_ud_ps(id_priv->id.ps)) | 497 | if (id->qp_type == IB_QPT_UD) |
512 | ret = cma_init_ud_qp(id_priv, qp); | 498 | ret = cma_init_ud_qp(id_priv, qp); |
513 | else | 499 | else |
514 | ret = cma_init_conn_qp(id_priv, qp); | 500 | ret = cma_init_conn_qp(id_priv, qp); |
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, | |||
636 | qp_attr->port_num = id_priv->id.port_num; | 622 | qp_attr->port_num = id_priv->id.port_num; |
637 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; | 623 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
638 | 624 | ||
639 | if (cma_is_ud_ps(id_priv->id.ps)) { | 625 | if (id_priv->id.qp_type == IB_QPT_UD) { |
640 | ret = cma_set_qkey(id_priv); | 626 | ret = cma_set_qkey(id_priv); |
641 | if (ret) | 627 | if (ret) |
642 | return ret; | 628 | return ret; |
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
659 | id_priv = container_of(id, struct rdma_id_private, id); | 645 | id_priv = container_of(id, struct rdma_id_private, id); |
660 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | 646 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
661 | case RDMA_TRANSPORT_IB: | 647 | case RDMA_TRANSPORT_IB: |
662 | if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) | 648 | if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) |
663 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); | 649 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); |
664 | else | 650 | else |
665 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, | 651 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, |
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv) | |||
858 | } | 844 | } |
859 | 845 | ||
860 | static void cma_cancel_operation(struct rdma_id_private *id_priv, | 846 | static void cma_cancel_operation(struct rdma_id_private *id_priv, |
861 | enum cma_state state) | 847 | enum rdma_cm_state state) |
862 | { | 848 | { |
863 | switch (state) { | 849 | switch (state) { |
864 | case CMA_ADDR_QUERY: | 850 | case RDMA_CM_ADDR_QUERY: |
865 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); | 851 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); |
866 | break; | 852 | break; |
867 | case CMA_ROUTE_QUERY: | 853 | case RDMA_CM_ROUTE_QUERY: |
868 | cma_cancel_route(id_priv); | 854 | cma_cancel_route(id_priv); |
869 | break; | 855 | break; |
870 | case CMA_LISTEN: | 856 | case RDMA_CM_LISTEN: |
871 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) | 857 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) |
872 | && !id_priv->cma_dev) | 858 | && !id_priv->cma_dev) |
873 | cma_cancel_listens(id_priv); | 859 | cma_cancel_listens(id_priv); |
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) | |||
918 | void rdma_destroy_id(struct rdma_cm_id *id) | 904 | void rdma_destroy_id(struct rdma_cm_id *id) |
919 | { | 905 | { |
920 | struct rdma_id_private *id_priv; | 906 | struct rdma_id_private *id_priv; |
921 | enum cma_state state; | 907 | enum rdma_cm_state state; |
922 | 908 | ||
923 | id_priv = container_of(id, struct rdma_id_private, id); | 909 | id_priv = container_of(id, struct rdma_id_private, id); |
924 | state = cma_exch(id_priv, CMA_DESTROYING); | 910 | state = cma_exch(id_priv, RDMA_CM_DESTROYING); |
925 | cma_cancel_operation(id_priv, state); | 911 | cma_cancel_operation(id_priv, state); |
926 | 912 | ||
927 | /* | 913 | /* |
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1015 | int ret = 0; | 1001 | int ret = 0; |
1016 | 1002 | ||
1017 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && | 1003 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
1018 | cma_disable_callback(id_priv, CMA_CONNECT)) || | 1004 | cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || |
1019 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && | 1005 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
1020 | cma_disable_callback(id_priv, CMA_DISCONNECT))) | 1006 | cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) |
1021 | return 0; | 1007 | return 0; |
1022 | 1008 | ||
1023 | memset(&event, 0, sizeof event); | 1009 | memset(&event, 0, sizeof event); |
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1048 | event.status = -ETIMEDOUT; /* fall through */ | 1034 | event.status = -ETIMEDOUT; /* fall through */ |
1049 | case IB_CM_DREQ_RECEIVED: | 1035 | case IB_CM_DREQ_RECEIVED: |
1050 | case IB_CM_DREP_RECEIVED: | 1036 | case IB_CM_DREP_RECEIVED: |
1051 | if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) | 1037 | if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, |
1038 | RDMA_CM_DISCONNECT)) | ||
1052 | goto out; | 1039 | goto out; |
1053 | event.event = RDMA_CM_EVENT_DISCONNECTED; | 1040 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
1054 | break; | 1041 | break; |
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1075 | if (ret) { | 1062 | if (ret) { |
1076 | /* Destroy the CM ID by returning a non-zero value. */ | 1063 | /* Destroy the CM ID by returning a non-zero value. */ |
1077 | id_priv->cm_id.ib = NULL; | 1064 | id_priv->cm_id.ib = NULL; |
1078 | cma_exch(id_priv, CMA_DESTROYING); | 1065 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1079 | mutex_unlock(&id_priv->handler_mutex); | 1066 | mutex_unlock(&id_priv->handler_mutex); |
1080 | rdma_destroy_id(&id_priv->id); | 1067 | rdma_destroy_id(&id_priv->id); |
1081 | return ret; | 1068 | return ret; |
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1101 | goto err; | 1088 | goto err; |
1102 | 1089 | ||
1103 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1090 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1104 | listen_id->ps); | 1091 | listen_id->ps, ib_event->param.req_rcvd.qp_type); |
1105 | if (IS_ERR(id)) | 1092 | if (IS_ERR(id)) |
1106 | goto err; | 1093 | goto err; |
1107 | 1094 | ||
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1132 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | 1119 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
1133 | 1120 | ||
1134 | id_priv = container_of(id, struct rdma_id_private, id); | 1121 | id_priv = container_of(id, struct rdma_id_private, id); |
1135 | id_priv->state = CMA_CONNECT; | 1122 | id_priv->state = RDMA_CM_CONNECT; |
1136 | return id_priv; | 1123 | return id_priv; |
1137 | 1124 | ||
1138 | destroy_id: | 1125 | destroy_id: |
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1152 | int ret; | 1139 | int ret; |
1153 | 1140 | ||
1154 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1141 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1155 | listen_id->ps); | 1142 | listen_id->ps, IB_QPT_UD); |
1156 | if (IS_ERR(id)) | 1143 | if (IS_ERR(id)) |
1157 | return NULL; | 1144 | return NULL; |
1158 | 1145 | ||
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1172 | } | 1159 | } |
1173 | 1160 | ||
1174 | id_priv = container_of(id, struct rdma_id_private, id); | 1161 | id_priv = container_of(id, struct rdma_id_private, id); |
1175 | id_priv->state = CMA_CONNECT; | 1162 | id_priv->state = RDMA_CM_CONNECT; |
1176 | return id_priv; | 1163 | return id_priv; |
1177 | err: | 1164 | err: |
1178 | rdma_destroy_id(id); | 1165 | rdma_destroy_id(id); |
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1201 | int offset, ret; | 1188 | int offset, ret; |
1202 | 1189 | ||
1203 | listen_id = cm_id->context; | 1190 | listen_id = cm_id->context; |
1204 | if (cma_disable_callback(listen_id, CMA_LISTEN)) | 1191 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1205 | return -ECONNABORTED; | 1192 | return -ECONNABORTED; |
1206 | 1193 | ||
1207 | memset(&event, 0, sizeof event); | 1194 | memset(&event, 0, sizeof event); |
1208 | offset = cma_user_data_offset(listen_id->id.ps); | 1195 | offset = cma_user_data_offset(listen_id->id.ps); |
1209 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1196 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1210 | if (cma_is_ud_ps(listen_id->id.ps)) { | 1197 | if (listen_id->id.qp_type == IB_QPT_UD) { |
1211 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | 1198 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1212 | event.param.ud.private_data = ib_event->private_data + offset; | 1199 | event.param.ud.private_data = ib_event->private_data + offset; |
1213 | event.param.ud.private_data_len = | 1200 | event.param.ud.private_data_len = |
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1243 | * while we're accessing the cm_id. | 1230 | * while we're accessing the cm_id. |
1244 | */ | 1231 | */ |
1245 | mutex_lock(&lock); | 1232 | mutex_lock(&lock); |
1246 | if (cma_comp(conn_id, CMA_CONNECT) && | 1233 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) |
1247 | !cma_is_ud_ps(conn_id->id.ps)) | ||
1248 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1234 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
1249 | mutex_unlock(&lock); | 1235 | mutex_unlock(&lock); |
1250 | mutex_unlock(&conn_id->handler_mutex); | 1236 | mutex_unlock(&conn_id->handler_mutex); |
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1257 | conn_id->cm_id.ib = NULL; | 1243 | conn_id->cm_id.ib = NULL; |
1258 | 1244 | ||
1259 | release_conn_id: | 1245 | release_conn_id: |
1260 | cma_exch(conn_id, CMA_DESTROYING); | 1246 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
1261 | mutex_unlock(&conn_id->handler_mutex); | 1247 | mutex_unlock(&conn_id->handler_mutex); |
1262 | rdma_destroy_id(&conn_id->id); | 1248 | rdma_destroy_id(&conn_id->id); |
1263 | 1249 | ||
@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1328 | struct sockaddr_in *sin; | 1314 | struct sockaddr_in *sin; |
1329 | int ret = 0; | 1315 | int ret = 0; |
1330 | 1316 | ||
1331 | if (cma_disable_callback(id_priv, CMA_CONNECT)) | 1317 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) |
1332 | return 0; | 1318 | return 0; |
1333 | 1319 | ||
1334 | memset(&event, 0, sizeof event); | 1320 | memset(&event, 0, sizeof event); |
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1371 | if (ret) { | 1357 | if (ret) { |
1372 | /* Destroy the CM ID by returning a non-zero value. */ | 1358 | /* Destroy the CM ID by returning a non-zero value. */ |
1373 | id_priv->cm_id.iw = NULL; | 1359 | id_priv->cm_id.iw = NULL; |
1374 | cma_exch(id_priv, CMA_DESTROYING); | 1360 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1375 | mutex_unlock(&id_priv->handler_mutex); | 1361 | mutex_unlock(&id_priv->handler_mutex); |
1376 | rdma_destroy_id(&id_priv->id); | 1362 | rdma_destroy_id(&id_priv->id); |
1377 | return ret; | 1363 | return ret; |
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1393 | struct ib_device_attr attr; | 1379 | struct ib_device_attr attr; |
1394 | 1380 | ||
1395 | listen_id = cm_id->context; | 1381 | listen_id = cm_id->context; |
1396 | if (cma_disable_callback(listen_id, CMA_LISTEN)) | 1382 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1397 | return -ECONNABORTED; | 1383 | return -ECONNABORTED; |
1398 | 1384 | ||
1399 | /* Create a new RDMA id for the new IW CM ID */ | 1385 | /* Create a new RDMA id for the new IW CM ID */ |
1400 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | 1386 | new_cm_id = rdma_create_id(listen_id->id.event_handler, |
1401 | listen_id->id.context, | 1387 | listen_id->id.context, |
1402 | RDMA_PS_TCP); | 1388 | RDMA_PS_TCP, IB_QPT_RC); |
1403 | if (IS_ERR(new_cm_id)) { | 1389 | if (IS_ERR(new_cm_id)) { |
1404 | ret = -ENOMEM; | 1390 | ret = -ENOMEM; |
1405 | goto out; | 1391 | goto out; |
1406 | } | 1392 | } |
1407 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); | 1393 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); |
1408 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1394 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
1409 | conn_id->state = CMA_CONNECT; | 1395 | conn_id->state = RDMA_CM_CONNECT; |
1410 | 1396 | ||
1411 | dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); | 1397 | dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); |
1412 | if (!dev) { | 1398 | if (!dev) { |
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1461 | if (ret) { | 1447 | if (ret) { |
1462 | /* User wants to destroy the CM ID */ | 1448 | /* User wants to destroy the CM ID */ |
1463 | conn_id->cm_id.iw = NULL; | 1449 | conn_id->cm_id.iw = NULL; |
1464 | cma_exch(conn_id, CMA_DESTROYING); | 1450 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
1465 | mutex_unlock(&conn_id->handler_mutex); | 1451 | mutex_unlock(&conn_id->handler_mutex); |
1466 | cma_deref_id(conn_id); | 1452 | cma_deref_id(conn_id); |
1467 | rdma_destroy_id(&conn_id->id); | 1453 | rdma_destroy_id(&conn_id->id); |
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1548 | struct rdma_cm_id *id; | 1534 | struct rdma_cm_id *id; |
1549 | int ret; | 1535 | int ret; |
1550 | 1536 | ||
1551 | id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); | 1537 | id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, |
1538 | id_priv->id.qp_type); | ||
1552 | if (IS_ERR(id)) | 1539 | if (IS_ERR(id)) |
1553 | return; | 1540 | return; |
1554 | 1541 | ||
1555 | dev_id_priv = container_of(id, struct rdma_id_private, id); | 1542 | dev_id_priv = container_of(id, struct rdma_id_private, id); |
1556 | 1543 | ||
1557 | dev_id_priv->state = CMA_ADDR_BOUND; | 1544 | dev_id_priv->state = RDMA_CM_ADDR_BOUND; |
1558 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, | 1545 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, |
1559 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); | 1546 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
1560 | 1547 | ||
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, | |||
1601 | route->num_paths = 1; | 1588 | route->num_paths = 1; |
1602 | *route->path_rec = *path_rec; | 1589 | *route->path_rec = *path_rec; |
1603 | } else { | 1590 | } else { |
1604 | work->old_state = CMA_ROUTE_QUERY; | 1591 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1605 | work->new_state = CMA_ADDR_RESOLVED; | 1592 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
1606 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; | 1593 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; |
1607 | work->event.status = status; | 1594 | work->event.status = status; |
1608 | } | 1595 | } |
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work) | |||
1660 | goto out; | 1647 | goto out; |
1661 | 1648 | ||
1662 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { | 1649 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1663 | cma_exch(id_priv, CMA_DESTROYING); | 1650 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1664 | destroy = 1; | 1651 | destroy = 1; |
1665 | } | 1652 | } |
1666 | out: | 1653 | out: |
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work) | |||
1678 | int destroy = 0; | 1665 | int destroy = 0; |
1679 | 1666 | ||
1680 | mutex_lock(&id_priv->handler_mutex); | 1667 | mutex_lock(&id_priv->handler_mutex); |
1681 | if (id_priv->state == CMA_DESTROYING || | 1668 | if (id_priv->state == RDMA_CM_DESTROYING || |
1682 | id_priv->state == CMA_DEVICE_REMOVAL) | 1669 | id_priv->state == RDMA_CM_DEVICE_REMOVAL) |
1683 | goto out; | 1670 | goto out; |
1684 | 1671 | ||
1685 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { | 1672 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1686 | cma_exch(id_priv, CMA_DESTROYING); | 1673 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1687 | destroy = 1; | 1674 | destroy = 1; |
1688 | } | 1675 | } |
1689 | 1676 | ||
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1707 | 1694 | ||
1708 | work->id = id_priv; | 1695 | work->id = id_priv; |
1709 | INIT_WORK(&work->work, cma_work_handler); | 1696 | INIT_WORK(&work->work, cma_work_handler); |
1710 | work->old_state = CMA_ROUTE_QUERY; | 1697 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1711 | work->new_state = CMA_ROUTE_RESOLVED; | 1698 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1712 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1699 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1713 | 1700 | ||
1714 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); | 1701 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); |
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, | |||
1737 | int ret; | 1724 | int ret; |
1738 | 1725 | ||
1739 | id_priv = container_of(id, struct rdma_id_private, id); | 1726 | id_priv = container_of(id, struct rdma_id_private, id); |
1740 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) | 1727 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
1728 | RDMA_CM_ROUTE_RESOLVED)) | ||
1741 | return -EINVAL; | 1729 | return -EINVAL; |
1742 | 1730 | ||
1743 | id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, | 1731 | id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, |
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, | |||
1750 | id->route.num_paths = num_paths; | 1738 | id->route.num_paths = num_paths; |
1751 | return 0; | 1739 | return 0; |
1752 | err: | 1740 | err: |
1753 | cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); | 1741 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); |
1754 | return ret; | 1742 | return ret; |
1755 | } | 1743 | } |
1756 | EXPORT_SYMBOL(rdma_set_ib_paths); | 1744 | EXPORT_SYMBOL(rdma_set_ib_paths); |
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1765 | 1753 | ||
1766 | work->id = id_priv; | 1754 | work->id = id_priv; |
1767 | INIT_WORK(&work->work, cma_work_handler); | 1755 | INIT_WORK(&work->work, cma_work_handler); |
1768 | work->old_state = CMA_ROUTE_QUERY; | 1756 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1769 | work->new_state = CMA_ROUTE_RESOLVED; | 1757 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1770 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1758 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1771 | queue_work(cma_wq, &work->work); | 1759 | queue_work(cma_wq, &work->work); |
1772 | return 0; | 1760 | return 0; |
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
1830 | goto err2; | 1818 | goto err2; |
1831 | } | 1819 | } |
1832 | 1820 | ||
1833 | work->old_state = CMA_ROUTE_QUERY; | 1821 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1834 | work->new_state = CMA_ROUTE_RESOLVED; | 1822 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1835 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1823 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1836 | work->event.status = 0; | 1824 | work->event.status = 0; |
1837 | 1825 | ||
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1853 | int ret; | 1841 | int ret; |
1854 | 1842 | ||
1855 | id_priv = container_of(id, struct rdma_id_private, id); | 1843 | id_priv = container_of(id, struct rdma_id_private, id); |
1856 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) | 1844 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) |
1857 | return -EINVAL; | 1845 | return -EINVAL; |
1858 | 1846 | ||
1859 | atomic_inc(&id_priv->refcount); | 1847 | atomic_inc(&id_priv->refcount); |
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1882 | 1870 | ||
1883 | return 0; | 1871 | return 0; |
1884 | err: | 1872 | err: |
1885 | cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); | 1873 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); |
1886 | cma_deref_id(id_priv); | 1874 | cma_deref_id(id_priv); |
1887 | return ret; | 1875 | return ret; |
1888 | } | 1876 | } |
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1941 | 1929 | ||
1942 | memset(&event, 0, sizeof event); | 1930 | memset(&event, 0, sizeof event); |
1943 | mutex_lock(&id_priv->handler_mutex); | 1931 | mutex_lock(&id_priv->handler_mutex); |
1944 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) | 1932 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, |
1933 | RDMA_CM_ADDR_RESOLVED)) | ||
1945 | goto out; | 1934 | goto out; |
1946 | 1935 | ||
1947 | if (!status && !id_priv->cma_dev) | 1936 | if (!status && !id_priv->cma_dev) |
1948 | status = cma_acquire_dev(id_priv); | 1937 | status = cma_acquire_dev(id_priv); |
1949 | 1938 | ||
1950 | if (status) { | 1939 | if (status) { |
1951 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) | 1940 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
1941 | RDMA_CM_ADDR_BOUND)) | ||
1952 | goto out; | 1942 | goto out; |
1953 | event.event = RDMA_CM_EVENT_ADDR_ERROR; | 1943 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
1954 | event.status = status; | 1944 | event.status = status; |
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1959 | } | 1949 | } |
1960 | 1950 | ||
1961 | if (id_priv->id.event_handler(&id_priv->id, &event)) { | 1951 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
1962 | cma_exch(id_priv, CMA_DESTROYING); | 1952 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1963 | mutex_unlock(&id_priv->handler_mutex); | 1953 | mutex_unlock(&id_priv->handler_mutex); |
1964 | cma_deref_id(id_priv); | 1954 | cma_deref_id(id_priv); |
1965 | rdma_destroy_id(&id_priv->id); | 1955 | rdma_destroy_id(&id_priv->id); |
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
2004 | 1994 | ||
2005 | work->id = id_priv; | 1995 | work->id = id_priv; |
2006 | INIT_WORK(&work->work, cma_work_handler); | 1996 | INIT_WORK(&work->work, cma_work_handler); |
2007 | work->old_state = CMA_ADDR_QUERY; | 1997 | work->old_state = RDMA_CM_ADDR_QUERY; |
2008 | work->new_state = CMA_ADDR_RESOLVED; | 1998 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
2009 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1999 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
2010 | queue_work(cma_wq, &work->work); | 2000 | queue_work(cma_wq, &work->work); |
2011 | return 0; | 2001 | return 0; |
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
2034 | int ret; | 2024 | int ret; |
2035 | 2025 | ||
2036 | id_priv = container_of(id, struct rdma_id_private, id); | 2026 | id_priv = container_of(id, struct rdma_id_private, id); |
2037 | if (id_priv->state == CMA_IDLE) { | 2027 | if (id_priv->state == RDMA_CM_IDLE) { |
2038 | ret = cma_bind_addr(id, src_addr, dst_addr); | 2028 | ret = cma_bind_addr(id, src_addr, dst_addr); |
2039 | if (ret) | 2029 | if (ret) |
2040 | return ret; | 2030 | return ret; |
2041 | } | 2031 | } |
2042 | 2032 | ||
2043 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) | 2033 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) |
2044 | return -EINVAL; | 2034 | return -EINVAL; |
2045 | 2035 | ||
2046 | atomic_inc(&id_priv->refcount); | 2036 | atomic_inc(&id_priv->refcount); |
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
2056 | 2046 | ||
2057 | return 0; | 2047 | return 0; |
2058 | err: | 2048 | err: |
2059 | cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); | 2049 | cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); |
2060 | cma_deref_id(id_priv); | 2050 | cma_deref_id(id_priv); |
2061 | return ret; | 2051 | return ret; |
2062 | } | 2052 | } |
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | |||
2070 | 2060 | ||
2071 | id_priv = container_of(id, struct rdma_id_private, id); | 2061 | id_priv = container_of(id, struct rdma_id_private, id); |
2072 | spin_lock_irqsave(&id_priv->lock, flags); | 2062 | spin_lock_irqsave(&id_priv->lock, flags); |
2073 | if (id_priv->state == CMA_IDLE) { | 2063 | if (id_priv->state == RDMA_CM_IDLE) { |
2074 | id_priv->reuseaddr = reuse; | 2064 | id_priv->reuseaddr = reuse; |
2075 | ret = 0; | 2065 | ret = 0; |
2076 | } else { | 2066 | } else { |
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, | |||
2177 | if (id_priv == cur_id) | 2167 | if (id_priv == cur_id) |
2178 | continue; | 2168 | continue; |
2179 | 2169 | ||
2180 | if ((cur_id->state == CMA_LISTEN) || | 2170 | if ((cur_id->state == RDMA_CM_LISTEN) || |
2181 | !reuseaddr || !cur_id->reuseaddr) { | 2171 | !reuseaddr || !cur_id->reuseaddr) { |
2182 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | 2172 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; |
2183 | if (cma_any_addr(cur_addr)) | 2173 | if (cma_any_addr(cur_addr)) |
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) | |||
2280 | int ret; | 2270 | int ret; |
2281 | 2271 | ||
2282 | id_priv = container_of(id, struct rdma_id_private, id); | 2272 | id_priv = container_of(id, struct rdma_id_private, id); |
2283 | if (id_priv->state == CMA_IDLE) { | 2273 | if (id_priv->state == RDMA_CM_IDLE) { |
2284 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | 2274 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; |
2285 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | 2275 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); |
2286 | if (ret) | 2276 | if (ret) |
2287 | return ret; | 2277 | return ret; |
2288 | } | 2278 | } |
2289 | 2279 | ||
2290 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | 2280 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) |
2291 | return -EINVAL; | 2281 | return -EINVAL; |
2292 | 2282 | ||
2293 | if (id_priv->reuseaddr) { | 2283 | if (id_priv->reuseaddr) { |
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) | |||
2319 | return 0; | 2309 | return 0; |
2320 | err: | 2310 | err: |
2321 | id_priv->backlog = 0; | 2311 | id_priv->backlog = 0; |
2322 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | 2312 | cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); |
2323 | return ret; | 2313 | return ret; |
2324 | } | 2314 | } |
2325 | EXPORT_SYMBOL(rdma_listen); | 2315 | EXPORT_SYMBOL(rdma_listen); |
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2333 | return -EAFNOSUPPORT; | 2323 | return -EAFNOSUPPORT; |
2334 | 2324 | ||
2335 | id_priv = container_of(id, struct rdma_id_private, id); | 2325 | id_priv = container_of(id, struct rdma_id_private, id); |
2336 | if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) | 2326 | if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) |
2337 | return -EINVAL; | 2327 | return -EINVAL; |
2338 | 2328 | ||
2339 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); | 2329 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); |
@@ -2360,7 +2350,7 @@ err2: | |||
2360 | if (id_priv->cma_dev) | 2350 | if (id_priv->cma_dev) |
2361 | cma_release_dev(id_priv); | 2351 | cma_release_dev(id_priv); |
2362 | err1: | 2352 | err1: |
2363 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); | 2353 | cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); |
2364 | return ret; | 2354 | return ret; |
2365 | } | 2355 | } |
2366 | EXPORT_SYMBOL(rdma_bind_addr); | 2356 | EXPORT_SYMBOL(rdma_bind_addr); |
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2433 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 2423 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
2434 | int ret = 0; | 2424 | int ret = 0; |
2435 | 2425 | ||
2436 | if (cma_disable_callback(id_priv, CMA_CONNECT)) | 2426 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) |
2437 | return 0; | 2427 | return 0; |
2438 | 2428 | ||
2439 | memset(&event, 0, sizeof event); | 2429 | memset(&event, 0, sizeof event); |
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2479 | if (ret) { | 2469 | if (ret) { |
2480 | /* Destroy the CM ID by returning a non-zero value. */ | 2470 | /* Destroy the CM ID by returning a non-zero value. */ |
2481 | id_priv->cm_id.ib = NULL; | 2471 | id_priv->cm_id.ib = NULL; |
2482 | cma_exch(id_priv, CMA_DESTROYING); | 2472 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
2483 | mutex_unlock(&id_priv->handler_mutex); | 2473 | mutex_unlock(&id_priv->handler_mutex); |
2484 | rdma_destroy_id(&id_priv->id); | 2474 | rdma_destroy_id(&id_priv->id); |
2485 | return ret; | 2475 | return ret; |
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2645 | int ret; | 2635 | int ret; |
2646 | 2636 | ||
2647 | id_priv = container_of(id, struct rdma_id_private, id); | 2637 | id_priv = container_of(id, struct rdma_id_private, id); |
2648 | if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) | 2638 | if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) |
2649 | return -EINVAL; | 2639 | return -EINVAL; |
2650 | 2640 | ||
2651 | if (!id->qp) { | 2641 | if (!id->qp) { |
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2655 | 2645 | ||
2656 | switch (rdma_node_get_transport(id->device->node_type)) { | 2646 | switch (rdma_node_get_transport(id->device->node_type)) { |
2657 | case RDMA_TRANSPORT_IB: | 2647 | case RDMA_TRANSPORT_IB: |
2658 | if (cma_is_ud_ps(id->ps)) | 2648 | if (id->qp_type == IB_QPT_UD) |
2659 | ret = cma_resolve_ib_udp(id_priv, conn_param); | 2649 | ret = cma_resolve_ib_udp(id_priv, conn_param); |
2660 | else | 2650 | else |
2661 | ret = cma_connect_ib(id_priv, conn_param); | 2651 | ret = cma_connect_ib(id_priv, conn_param); |
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2672 | 2662 | ||
2673 | return 0; | 2663 | return 0; |
2674 | err: | 2664 | err: |
2675 | cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); | 2665 | cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); |
2676 | return ret; | 2666 | return ret; |
2677 | } | 2667 | } |
2678 | EXPORT_SYMBOL(rdma_connect); | 2668 | EXPORT_SYMBOL(rdma_connect); |
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2758 | int ret; | 2748 | int ret; |
2759 | 2749 | ||
2760 | id_priv = container_of(id, struct rdma_id_private, id); | 2750 | id_priv = container_of(id, struct rdma_id_private, id); |
2761 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2751 | |
2752 | id_priv->owner = task_pid_nr(current); | ||
2753 | |||
2754 | if (!cma_comp(id_priv, RDMA_CM_CONNECT)) | ||
2762 | return -EINVAL; | 2755 | return -EINVAL; |
2763 | 2756 | ||
2764 | if (!id->qp && conn_param) { | 2757 | if (!id->qp && conn_param) { |
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2768 | 2761 | ||
2769 | switch (rdma_node_get_transport(id->device->node_type)) { | 2762 | switch (rdma_node_get_transport(id->device->node_type)) { |
2770 | case RDMA_TRANSPORT_IB: | 2763 | case RDMA_TRANSPORT_IB: |
2771 | if (cma_is_ud_ps(id->ps)) | 2764 | if (id->qp_type == IB_QPT_UD) |
2772 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2765 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2773 | conn_param->private_data, | 2766 | conn_param->private_data, |
2774 | conn_param->private_data_len); | 2767 | conn_param->private_data_len); |
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2829 | 2822 | ||
2830 | switch (rdma_node_get_transport(id->device->node_type)) { | 2823 | switch (rdma_node_get_transport(id->device->node_type)) { |
2831 | case RDMA_TRANSPORT_IB: | 2824 | case RDMA_TRANSPORT_IB: |
2832 | if (cma_is_ud_ps(id->ps)) | 2825 | if (id->qp_type == IB_QPT_UD) |
2833 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, | 2826 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, |
2834 | private_data, private_data_len); | 2827 | private_data, private_data_len); |
2835 | else | 2828 | else |
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2887 | int ret; | 2880 | int ret; |
2888 | 2881 | ||
2889 | id_priv = mc->id_priv; | 2882 | id_priv = mc->id_priv; |
2890 | if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && | 2883 | if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && |
2891 | cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) | 2884 | cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) |
2892 | return 0; | 2885 | return 0; |
2893 | 2886 | ||
2894 | mutex_lock(&id_priv->qp_mutex); | 2887 | mutex_lock(&id_priv->qp_mutex); |
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2912 | 2905 | ||
2913 | ret = id_priv->id.event_handler(&id_priv->id, &event); | 2906 | ret = id_priv->id.event_handler(&id_priv->id, &event); |
2914 | if (ret) { | 2907 | if (ret) { |
2915 | cma_exch(id_priv, CMA_DESTROYING); | 2908 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
2916 | mutex_unlock(&id_priv->handler_mutex); | 2909 | mutex_unlock(&id_priv->handler_mutex); |
2917 | rdma_destroy_id(&id_priv->id); | 2910 | rdma_destroy_id(&id_priv->id); |
2918 | return 0; | 2911 | return 0; |
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
3095 | int ret; | 3088 | int ret; |
3096 | 3089 | ||
3097 | id_priv = container_of(id, struct rdma_id_private, id); | 3090 | id_priv = container_of(id, struct rdma_id_private, id); |
3098 | if (!cma_comp(id_priv, CMA_ADDR_BOUND) && | 3091 | if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && |
3099 | !cma_comp(id_priv, CMA_ADDR_RESOLVED)) | 3092 | !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) |
3100 | return -EINVAL; | 3093 | return -EINVAL; |
3101 | 3094 | ||
3102 | mc = kmalloc(sizeof *mc, GFP_KERNEL); | 3095 | mc = kmalloc(sizeof *mc, GFP_KERNEL); |
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device) | |||
3261 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) | 3254 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
3262 | { | 3255 | { |
3263 | struct rdma_cm_event event; | 3256 | struct rdma_cm_event event; |
3264 | enum cma_state state; | 3257 | enum rdma_cm_state state; |
3265 | int ret = 0; | 3258 | int ret = 0; |
3266 | 3259 | ||
3267 | /* Record that we want to remove the device */ | 3260 | /* Record that we want to remove the device */ |
3268 | state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); | 3261 | state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); |
3269 | if (state == CMA_DESTROYING) | 3262 | if (state == RDMA_CM_DESTROYING) |
3270 | return 0; | 3263 | return 0; |
3271 | 3264 | ||
3272 | cma_cancel_operation(id_priv, state); | 3265 | cma_cancel_operation(id_priv, state); |
3273 | mutex_lock(&id_priv->handler_mutex); | 3266 | mutex_lock(&id_priv->handler_mutex); |
3274 | 3267 | ||
3275 | /* Check for destruction from another callback. */ | 3268 | /* Check for destruction from another callback. */ |
3276 | if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) | 3269 | if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) |
3277 | goto out; | 3270 | goto out; |
3278 | 3271 | ||
3279 | memset(&event, 0, sizeof event); | 3272 | memset(&event, 0, sizeof event); |
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device) | |||
3328 | kfree(cma_dev); | 3321 | kfree(cma_dev); |
3329 | } | 3322 | } |
3330 | 3323 | ||
3324 | static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) | ||
3325 | { | ||
3326 | struct nlmsghdr *nlh; | ||
3327 | struct rdma_cm_id_stats *id_stats; | ||
3328 | struct rdma_id_private *id_priv; | ||
3329 | struct rdma_cm_id *id = NULL; | ||
3330 | struct cma_device *cma_dev; | ||
3331 | int i_dev = 0, i_id = 0; | ||
3332 | |||
3333 | /* | ||
3334 | * We export all of the IDs as a sequence of messages. Each | ||
3335 | * ID gets its own netlink message. | ||
3336 | */ | ||
3337 | mutex_lock(&lock); | ||
3338 | |||
3339 | list_for_each_entry(cma_dev, &dev_list, list) { | ||
3340 | if (i_dev < cb->args[0]) { | ||
3341 | i_dev++; | ||
3342 | continue; | ||
3343 | } | ||
3344 | |||
3345 | i_id = 0; | ||
3346 | list_for_each_entry(id_priv, &cma_dev->id_list, list) { | ||
3347 | if (i_id < cb->args[1]) { | ||
3348 | i_id++; | ||
3349 | continue; | ||
3350 | } | ||
3351 | |||
3352 | id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, | ||
3353 | sizeof *id_stats, RDMA_NL_RDMA_CM, | ||
3354 | RDMA_NL_RDMA_CM_ID_STATS); | ||
3355 | if (!id_stats) | ||
3356 | goto out; | ||
3357 | |||
3358 | memset(id_stats, 0, sizeof *id_stats); | ||
3359 | id = &id_priv->id; | ||
3360 | id_stats->node_type = id->route.addr.dev_addr.dev_type; | ||
3361 | id_stats->port_num = id->port_num; | ||
3362 | id_stats->bound_dev_if = | ||
3363 | id->route.addr.dev_addr.bound_dev_if; | ||
3364 | |||
3365 | if (id->route.addr.src_addr.ss_family == AF_INET) { | ||
3366 | if (ibnl_put_attr(skb, nlh, | ||
3367 | sizeof(struct sockaddr_in), | ||
3368 | &id->route.addr.src_addr, | ||
3369 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | ||
3370 | goto out; | ||
3371 | } | ||
3372 | if (ibnl_put_attr(skb, nlh, | ||
3373 | sizeof(struct sockaddr_in), | ||
3374 | &id->route.addr.dst_addr, | ||
3375 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | ||
3376 | goto out; | ||
3377 | } | ||
3378 | } else if (id->route.addr.src_addr.ss_family == AF_INET6) { | ||
3379 | if (ibnl_put_attr(skb, nlh, | ||
3380 | sizeof(struct sockaddr_in6), | ||
3381 | &id->route.addr.src_addr, | ||
3382 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | ||
3383 | goto out; | ||
3384 | } | ||
3385 | if (ibnl_put_attr(skb, nlh, | ||
3386 | sizeof(struct sockaddr_in6), | ||
3387 | &id->route.addr.dst_addr, | ||
3388 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | ||
3389 | goto out; | ||
3390 | } | ||
3391 | } | ||
3392 | |||
3393 | id_stats->pid = id_priv->owner; | ||
3394 | id_stats->port_space = id->ps; | ||
3395 | id_stats->cm_state = id_priv->state; | ||
3396 | id_stats->qp_num = id_priv->qp_num; | ||
3397 | id_stats->qp_type = id->qp_type; | ||
3398 | |||
3399 | i_id++; | ||
3400 | } | ||
3401 | |||
3402 | cb->args[1] = 0; | ||
3403 | i_dev++; | ||
3404 | } | ||
3405 | |||
3406 | out: | ||
3407 | mutex_unlock(&lock); | ||
3408 | cb->args[0] = i_dev; | ||
3409 | cb->args[1] = i_id; | ||
3410 | |||
3411 | return skb->len; | ||
3412 | } | ||
3413 | |||
3414 | static const struct ibnl_client_cbs cma_cb_table[] = { | ||
3415 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, | ||
3416 | }; | ||
3417 | |||
3331 | static int __init cma_init(void) | 3418 | static int __init cma_init(void) |
3332 | { | 3419 | { |
3333 | int ret; | 3420 | int ret; |
@@ -3343,6 +3430,10 @@ static int __init cma_init(void) | |||
3343 | ret = ib_register_client(&cma_client); | 3430 | ret = ib_register_client(&cma_client); |
3344 | if (ret) | 3431 | if (ret) |
3345 | goto err; | 3432 | goto err; |
3433 | |||
3434 | if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) | ||
3435 | printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); | ||
3436 | |||
3346 | return 0; | 3437 | return 0; |
3347 | 3438 | ||
3348 | err: | 3439 | err: |
@@ -3355,6 +3446,7 @@ err: | |||
3355 | 3446 | ||
3356 | static void __exit cma_cleanup(void) | 3447 | static void __exit cma_cleanup(void) |
3357 | { | 3448 | { |
3449 | ibnl_remove_client(RDMA_NL_RDMA_CM); | ||
3358 | ib_unregister_client(&cma_client); | 3450 | ib_unregister_client(&cma_client); |
3359 | unregister_netdevice_notifier(&cma_nb); | 3451 | unregister_netdevice_notifier(&cma_nb); |
3360 | rdma_addr_unregister_client(&addr_client); | 3452 | rdma_addr_unregister_client(&addr_client); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f793bf2f5da7..4007f721d25d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <rdma/rdma_netlink.h> | ||
41 | 42 | ||
42 | #include "core_priv.h" | 43 | #include "core_priv.h" |
43 | 44 | ||
@@ -725,22 +726,40 @@ static int __init ib_core_init(void) | |||
725 | return -ENOMEM; | 726 | return -ENOMEM; |
726 | 727 | ||
727 | ret = ib_sysfs_setup(); | 728 | ret = ib_sysfs_setup(); |
728 | if (ret) | 729 | if (ret) { |
729 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); | 730 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); |
731 | goto err; | ||
732 | } | ||
733 | |||
734 | ret = ibnl_init(); | ||
735 | if (ret) { | ||
736 | printk(KERN_WARNING "Couldn't init IB netlink interface\n"); | ||
737 | goto err_sysfs; | ||
738 | } | ||
730 | 739 | ||
731 | ret = ib_cache_setup(); | 740 | ret = ib_cache_setup(); |
732 | if (ret) { | 741 | if (ret) { |
733 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); | 742 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); |
734 | ib_sysfs_cleanup(); | 743 | goto err_nl; |
735 | destroy_workqueue(ib_wq); | ||
736 | } | 744 | } |
737 | 745 | ||
746 | return 0; | ||
747 | |||
748 | err_nl: | ||
749 | ibnl_cleanup(); | ||
750 | |||
751 | err_sysfs: | ||
752 | ib_sysfs_cleanup(); | ||
753 | |||
754 | err: | ||
755 | destroy_workqueue(ib_wq); | ||
738 | return ret; | 756 | return ret; |
739 | } | 757 | } |
740 | 758 | ||
741 | static void __exit ib_core_cleanup(void) | 759 | static void __exit ib_core_cleanup(void) |
742 | { | 760 | { |
743 | ib_cache_cleanup(); | 761 | ib_cache_cleanup(); |
762 | ibnl_cleanup(); | ||
744 | ib_sysfs_cleanup(); | 763 | ib_sysfs_cleanup(); |
745 | /* Make sure that any pending umem accounting work is done. */ | 764 | /* Make sure that any pending umem accounting work is done. */ |
746 | destroy_workqueue(ib_wq); | 765 | destroy_workqueue(ib_wq); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 822cfdcd9f78..b4d8672a3e4e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
276 | goto error1; | 276 | goto error1; |
277 | } | 277 | } |
278 | 278 | ||
279 | /* Verify the QP requested is supported. For example, Ethernet devices | ||
280 | * will not have QP0 */ | ||
281 | if (!port_priv->qp_info[qpn].qp) { | ||
282 | ret = ERR_PTR(-EPROTONOSUPPORT); | ||
283 | goto error1; | ||
284 | } | ||
285 | |||
279 | /* Allocate structures */ | 286 | /* Allocate structures */ |
280 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); | 287 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
281 | if (!mad_agent_priv) { | 288 | if (!mad_agent_priv) { |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c new file mode 100644 index 000000000000..4a5abaf0a25c --- /dev/null +++ b/drivers/infiniband/core/netlink.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Voltaire Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ | ||
34 | |||
35 | #include <net/netlink.h> | ||
36 | #include <net/net_namespace.h> | ||
37 | #include <net/sock.h> | ||
38 | #include <rdma/rdma_netlink.h> | ||
39 | |||
40 | struct ibnl_client { | ||
41 | struct list_head list; | ||
42 | int index; | ||
43 | int nops; | ||
44 | const struct ibnl_client_cbs *cb_table; | ||
45 | }; | ||
46 | |||
47 | static DEFINE_MUTEX(ibnl_mutex); | ||
48 | static struct sock *nls; | ||
49 | static LIST_HEAD(client_list); | ||
50 | |||
51 | int ibnl_add_client(int index, int nops, | ||
52 | const struct ibnl_client_cbs cb_table[]) | ||
53 | { | ||
54 | struct ibnl_client *cur; | ||
55 | struct ibnl_client *nl_client; | ||
56 | |||
57 | nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL); | ||
58 | if (!nl_client) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | nl_client->index = index; | ||
62 | nl_client->nops = nops; | ||
63 | nl_client->cb_table = cb_table; | ||
64 | |||
65 | mutex_lock(&ibnl_mutex); | ||
66 | |||
67 | list_for_each_entry(cur, &client_list, list) { | ||
68 | if (cur->index == index) { | ||
69 | pr_warn("Client for %d already exists\n", index); | ||
70 | mutex_unlock(&ibnl_mutex); | ||
71 | kfree(nl_client); | ||
72 | return -EINVAL; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | list_add_tail(&nl_client->list, &client_list); | ||
77 | |||
78 | mutex_unlock(&ibnl_mutex); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | EXPORT_SYMBOL(ibnl_add_client); | ||
83 | |||
84 | int ibnl_remove_client(int index) | ||
85 | { | ||
86 | struct ibnl_client *cur, *next; | ||
87 | |||
88 | mutex_lock(&ibnl_mutex); | ||
89 | list_for_each_entry_safe(cur, next, &client_list, list) { | ||
90 | if (cur->index == index) { | ||
91 | list_del(&(cur->list)); | ||
92 | mutex_unlock(&ibnl_mutex); | ||
93 | kfree(cur); | ||
94 | return 0; | ||
95 | } | ||
96 | } | ||
97 | pr_warn("Can't remove callback for client idx %d. Not found\n", index); | ||
98 | mutex_unlock(&ibnl_mutex); | ||
99 | |||
100 | return -EINVAL; | ||
101 | } | ||
102 | EXPORT_SYMBOL(ibnl_remove_client); | ||
103 | |||
104 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | ||
105 | int len, int client, int op) | ||
106 | { | ||
107 | unsigned char *prev_tail; | ||
108 | |||
109 | prev_tail = skb_tail_pointer(skb); | ||
110 | *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), | ||
111 | len, NLM_F_MULTI); | ||
112 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; | ||
113 | return NLMSG_DATA(*nlh); | ||
114 | |||
115 | nlmsg_failure: | ||
116 | nlmsg_trim(skb, prev_tail); | ||
117 | return NULL; | ||
118 | } | ||
119 | EXPORT_SYMBOL(ibnl_put_msg); | ||
120 | |||
121 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
122 | int len, void *data, int type) | ||
123 | { | ||
124 | unsigned char *prev_tail; | ||
125 | |||
126 | prev_tail = skb_tail_pointer(skb); | ||
127 | NLA_PUT(skb, type, len, data); | ||
128 | nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; | ||
129 | return 0; | ||
130 | |||
131 | nla_put_failure: | ||
132 | nlmsg_trim(skb, prev_tail - nlh->nlmsg_len); | ||
133 | return -EMSGSIZE; | ||
134 | } | ||
135 | EXPORT_SYMBOL(ibnl_put_attr); | ||
136 | |||
137 | static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | ||
138 | { | ||
139 | struct ibnl_client *client; | ||
140 | int type = nlh->nlmsg_type; | ||
141 | int index = RDMA_NL_GET_CLIENT(type); | ||
142 | int op = RDMA_NL_GET_OP(type); | ||
143 | |||
144 | list_for_each_entry(client, &client_list, list) { | ||
145 | if (client->index == index) { | ||
146 | if (op < 0 || op >= client->nops || | ||
147 | !client->cb_table[RDMA_NL_GET_OP(op)].dump) | ||
148 | return -EINVAL; | ||
149 | return netlink_dump_start(nls, skb, nlh, | ||
150 | client->cb_table[op].dump, | ||
151 | NULL); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | pr_info("Index %d wasn't found in client list\n", index); | ||
156 | return -EINVAL; | ||
157 | } | ||
158 | |||
159 | static void ibnl_rcv(struct sk_buff *skb) | ||
160 | { | ||
161 | mutex_lock(&ibnl_mutex); | ||
162 | netlink_rcv_skb(skb, &ibnl_rcv_msg); | ||
163 | mutex_unlock(&ibnl_mutex); | ||
164 | } | ||
165 | |||
166 | int __init ibnl_init(void) | ||
167 | { | ||
168 | nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, | ||
169 | NULL, THIS_MODULE); | ||
170 | if (!nls) { | ||
171 | pr_warn("Failed to create netlink socket\n"); | ||
172 | return -ENOMEM; | ||
173 | } | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | void ibnl_cleanup(void) | ||
179 | { | ||
180 | struct ibnl_client *cur, *next; | ||
181 | |||
182 | mutex_lock(&ibnl_mutex); | ||
183 | list_for_each_entry_safe(cur, next, &client_list, list) { | ||
184 | list_del(&(cur->list)); | ||
185 | kfree(cur); | ||
186 | } | ||
187 | mutex_unlock(&ibnl_mutex); | ||
188 | |||
189 | netlink_kernel_release(nls); | ||
190 | } | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b3fa798525b2..71be5eebd683 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -367,13 +367,28 @@ done: | |||
367 | return ret; | 367 | return ret; |
368 | } | 368 | } |
369 | 369 | ||
370 | static ssize_t ucma_create_id(struct ucma_file *file, | 370 | static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) |
371 | const char __user *inbuf, | 371 | { |
372 | int in_len, int out_len) | 372 | switch (cmd->ps) { |
373 | case RDMA_PS_TCP: | ||
374 | *qp_type = IB_QPT_RC; | ||
375 | return 0; | ||
376 | case RDMA_PS_UDP: | ||
377 | case RDMA_PS_IPOIB: | ||
378 | *qp_type = IB_QPT_UD; | ||
379 | return 0; | ||
380 | default: | ||
381 | return -EINVAL; | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | ||
386 | int in_len, int out_len) | ||
373 | { | 387 | { |
374 | struct rdma_ucm_create_id cmd; | 388 | struct rdma_ucm_create_id cmd; |
375 | struct rdma_ucm_create_id_resp resp; | 389 | struct rdma_ucm_create_id_resp resp; |
376 | struct ucma_context *ctx; | 390 | struct ucma_context *ctx; |
391 | enum ib_qp_type qp_type; | ||
377 | int ret; | 392 | int ret; |
378 | 393 | ||
379 | if (out_len < sizeof(resp)) | 394 | if (out_len < sizeof(resp)) |
@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, | |||
382 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 397 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
383 | return -EFAULT; | 398 | return -EFAULT; |
384 | 399 | ||
400 | ret = ucma_get_qp_type(&cmd, &qp_type); | ||
401 | if (ret) | ||
402 | return ret; | ||
403 | |||
385 | mutex_lock(&file->mut); | 404 | mutex_lock(&file->mut); |
386 | ctx = ucma_alloc_ctx(file); | 405 | ctx = ucma_alloc_ctx(file); |
387 | mutex_unlock(&file->mut); | 406 | mutex_unlock(&file->mut); |
@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, | |||
389 | return -ENOMEM; | 408 | return -ENOMEM; |
390 | 409 | ||
391 | ctx->uid = cmd.uid; | 410 | ctx->uid = cmd.uid; |
392 | ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); | 411 | ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); |
393 | if (IS_ERR(ctx->cm_id)) { | 412 | if (IS_ERR(ctx->cm_id)) { |
394 | ret = PTR_ERR(ctx->cm_id); | 413 | ret = PTR_ERR(ctx->cm_id); |
395 | goto err1; | 414 | goto err1; |
@@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = { | |||
1338 | }; | 1357 | }; |
1339 | 1358 | ||
1340 | static struct miscdevice ucma_misc = { | 1359 | static struct miscdevice ucma_misc = { |
1341 | .minor = MISC_DYNAMIC_MINOR, | 1360 | .minor = MISC_DYNAMIC_MINOR, |
1342 | .name = "rdma_cm", | 1361 | .name = "rdma_cm", |
1343 | .fops = &ucma_fops, | 1362 | .nodename = "infiniband/rdma_cm", |
1363 | .mode = 0666, | ||
1364 | .fops = &ucma_fops, | ||
1344 | }; | 1365 | }; |
1345 | 1366 | ||
1346 | static ssize_t show_abi_version(struct device *dev, | 1367 | static ssize_t show_abi_version(struct device *dev, |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index cd1996d0ad08..8d261b6ea5fe 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device) | |||
1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); | 1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static char *umad_devnode(struct device *dev, mode_t *mode) | ||
1180 | { | ||
1181 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
1182 | } | ||
1183 | |||
1179 | static int __init ib_umad_init(void) | 1184 | static int __init ib_umad_init(void) |
1180 | { | 1185 | { |
1181 | int ret; | 1186 | int ret; |
@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void) | |||
1194 | goto out_chrdev; | 1199 | goto out_chrdev; |
1195 | } | 1200 | } |
1196 | 1201 | ||
1202 | umad_class->devnode = umad_devnode; | ||
1203 | |||
1197 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); | 1204 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); |
1198 | if (ret) { | 1205 | if (ret) { |
1199 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); | 1206 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index ec83e9fe387b..e49a85f8a44d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
824 | kfree(uverbs_dev); | 824 | kfree(uverbs_dev); |
825 | } | 825 | } |
826 | 826 | ||
827 | static char *uverbs_devnode(struct device *dev, mode_t *mode) | ||
828 | { | ||
829 | *mode = 0666; | ||
830 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
831 | } | ||
832 | |||
827 | static int __init ib_uverbs_init(void) | 833 | static int __init ib_uverbs_init(void) |
828 | { | 834 | { |
829 | int ret; | 835 | int ret; |
@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void) | |||
842 | goto out_chrdev; | 848 | goto out_chrdev; |
843 | } | 849 | } |
844 | 850 | ||
851 | uverbs_class->devnode = uverbs_devnode; | ||
852 | |||
845 | ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); | 853 | ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); |
846 | if (ret) { | 854 | if (ret) { |
847 | printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); | 855 | printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 239184138994..0a5008fbebac 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |||
914 | goto err; | 914 | goto err; |
915 | 915 | ||
916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { | 916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { |
917 | iwch_post_zb_read(ep->com.qp); | 917 | iwch_post_zb_read(ep); |
918 | } | 918 | } |
919 | 919 | ||
920 | goto out; | 920 | goto out; |
@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1078 | struct iwch_ep *ep = ctx; | 1078 | struct iwch_ep *ep = ctx; |
1079 | struct cpl_wr_ack *hdr = cplhdr(skb); | 1079 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1080 | unsigned int credits = ntohs(hdr->credits); | 1080 | unsigned int credits = ntohs(hdr->credits); |
1081 | unsigned long flags; | ||
1082 | int post_zb = 0; | ||
1081 | 1083 | ||
1082 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); | 1084 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); |
1083 | 1085 | ||
@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1087 | return CPL_RET_BUF_DONE; | 1089 | return CPL_RET_BUF_DONE; |
1088 | } | 1090 | } |
1089 | 1091 | ||
1092 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1090 | BUG_ON(credits != 1); | 1093 | BUG_ON(credits != 1); |
1091 | dst_confirm(ep->dst); | 1094 | dst_confirm(ep->dst); |
1092 | if (!ep->mpa_skb) { | 1095 | if (!ep->mpa_skb) { |
1093 | PDBG("%s rdma_init wr_ack ep %p state %u\n", | 1096 | PDBG("%s rdma_init wr_ack ep %p state %u\n", |
1094 | __func__, ep, state_read(&ep->com)); | 1097 | __func__, ep, ep->com.state); |
1095 | if (ep->mpa_attr.initiator) { | 1098 | if (ep->mpa_attr.initiator) { |
1096 | PDBG("%s initiator ep %p state %u\n", | 1099 | PDBG("%s initiator ep %p state %u\n", |
1097 | __func__, ep, state_read(&ep->com)); | 1100 | __func__, ep, ep->com.state); |
1098 | if (peer2peer) | 1101 | if (peer2peer && ep->com.state == FPDU_MODE) |
1099 | iwch_post_zb_read(ep->com.qp); | 1102 | post_zb = 1; |
1100 | } else { | 1103 | } else { |
1101 | PDBG("%s responder ep %p state %u\n", | 1104 | PDBG("%s responder ep %p state %u\n", |
1102 | __func__, ep, state_read(&ep->com)); | 1105 | __func__, ep, ep->com.state); |
1103 | ep->com.rpl_done = 1; | 1106 | if (ep->com.state == MPA_REQ_RCVD) { |
1104 | wake_up(&ep->com.waitq); | 1107 | ep->com.rpl_done = 1; |
1108 | wake_up(&ep->com.waitq); | ||
1109 | } | ||
1105 | } | 1110 | } |
1106 | } else { | 1111 | } else { |
1107 | PDBG("%s lsm ack ep %p state %u freeing skb\n", | 1112 | PDBG("%s lsm ack ep %p state %u freeing skb\n", |
1108 | __func__, ep, state_read(&ep->com)); | 1113 | __func__, ep, ep->com.state); |
1109 | kfree_skb(ep->mpa_skb); | 1114 | kfree_skb(ep->mpa_skb); |
1110 | ep->mpa_skb = NULL; | 1115 | ep->mpa_skb = NULL; |
1111 | } | 1116 | } |
1117 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1118 | if (post_zb) | ||
1119 | iwch_post_zb_read(ep); | ||
1112 | return CPL_RET_BUF_DONE; | 1120 | return CPL_RET_BUF_DONE; |
1113 | } | 1121 | } |
1114 | 1122 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index c5406da3f4cd..9a342c9b220d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
332 | struct ib_mw_bind *mw_bind); | 332 | struct ib_mw_bind *mw_bind); |
333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); | 334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); |
335 | int iwch_post_zb_read(struct iwch_qp *qhp); | 335 | int iwch_post_zb_read(struct iwch_ep *ep); |
336 | int iwch_register_device(struct iwch_dev *dev); | 336 | int iwch_register_device(struct iwch_dev *dev); |
337 | void iwch_unregister_device(struct iwch_dev *dev); | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
338 | void stop_read_rep_timer(struct iwch_qp *qhp); | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 1b4cd09f74dc..ecd313f359a4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg, | |||
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
741 | int iwch_post_zb_read(struct iwch_qp *qhp) | 741 | int iwch_post_zb_read(struct iwch_ep *ep) |
742 | { | 742 | { |
743 | union t3_wr *wqe; | 743 | union t3_wr *wqe; |
744 | struct sk_buff *skb; | 744 | struct sk_buff *skb; |
@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp) | |||
761 | wqe->read.local_len = cpu_to_be32(0); | 761 | wqe->read.local_len = cpu_to_be32(0); |
762 | wqe->read.local_to = cpu_to_be64(1); | 762 | wqe->read.local_to = cpu_to_be64(1); |
763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); | 763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); |
764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| | 764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| |
765 | V_FW_RIWR_LEN(flit_cnt)); | 765 | V_FW_RIWR_LEN(flit_cnt)); |
766 | skb->priority = CPL_PRIORITY_DATA; | 766 | skb->priority = CPL_PRIORITY_DATA; |
767 | return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | 767 | return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 35d2a5dd9bb4..4f045375c8e2 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/idr.h> | 37 | #include <linux/idr.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/completion.h> |
39 | #include <linux/netdevice.h> | 39 | #include <linux/netdevice.h> |
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |||
131 | 131 | ||
132 | #define C4IW_WR_TO (10*HZ) | 132 | #define C4IW_WR_TO (10*HZ) |
133 | 133 | ||
134 | enum { | ||
135 | REPLY_READY = 0, | ||
136 | }; | ||
137 | |||
138 | struct c4iw_wr_wait { | 134 | struct c4iw_wr_wait { |
139 | wait_queue_head_t wait; | 135 | struct completion completion; |
140 | unsigned long status; | ||
141 | int ret; | 136 | int ret; |
142 | }; | 137 | }; |
143 | 138 | ||
144 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | 139 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) |
145 | { | 140 | { |
146 | wr_waitp->ret = 0; | 141 | wr_waitp->ret = 0; |
147 | wr_waitp->status = 0; | 142 | init_completion(&wr_waitp->completion); |
148 | init_waitqueue_head(&wr_waitp->wait); | ||
149 | } | 143 | } |
150 | 144 | ||
151 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) | 145 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) |
152 | { | 146 | { |
153 | wr_waitp->ret = ret; | 147 | wr_waitp->ret = ret; |
154 | set_bit(REPLY_READY, &wr_waitp->status); | 148 | complete(&wr_waitp->completion); |
155 | wake_up(&wr_waitp->wait); | ||
156 | } | 149 | } |
157 | 150 | ||
158 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | 151 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | |||
164 | int ret; | 157 | int ret; |
165 | 158 | ||
166 | do { | 159 | do { |
167 | ret = wait_event_timeout(wr_waitp->wait, | 160 | ret = wait_for_completion_timeout(&wr_waitp->completion, to); |
168 | test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); | ||
169 | if (!ret) { | 161 | if (!ret) { |
170 | printk(KERN_ERR MOD "%s - Device %s not responding - " | 162 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
171 | "tid %u qpid %u\n", func, | 163 | "tid %u qpid %u\n", func, |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 13de1192927c..2d668c69f6d9 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, | |||
1138 | u32 i = 0; | 1138 | u32 i = 0; |
1139 | struct nes_device *nesdev; | 1139 | struct nes_device *nesdev; |
1140 | 1140 | ||
1141 | strict_strtoul(buf, 0, &wqm_quanta_value); | 1141 | if (kstrtoul(buf, 0, &wqm_quanta_value) < 0) |
1142 | return -EINVAL; | ||
1143 | |||
1142 | list_for_each_entry(nesdev, &nes_dev_list, list) { | 1144 | list_for_each_entry(nesdev, &nes_dev_list, list) { |
1143 | if (i == ee_flsh_adapter) { | 1145 | if (i == ee_flsh_adapter) { |
1144 | nesdev->nesadapter->wqm_quanta = wqm_quanta_value; | 1146 | nesdev->nesadapter->wqm_quanta = wqm_quanta_value; |
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 7c03a70c55a2..8349f9c5064c 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INFINIBAND_QIB | 1 | config INFINIBAND_QIB |
2 | tristate "QLogic PCIe HCA support" | 2 | tristate "QLogic PCIe HCA support" |
3 | depends on 64BIT && NET | 3 | depends on 64BIT |
4 | ---help--- | 4 | ---help--- |
5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host | 5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host |
6 | channel adapters. This driver does not support the QLogic | 6 | channel adapters. This driver does not support the QLogic |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 9876865732f7..ede1475bee09 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn, | |||
548 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ | 548 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ |
549 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, | 549 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
550 | (void *)ib_conn, | 550 | (void *)ib_conn, |
551 | RDMA_PS_TCP); | 551 | RDMA_PS_TCP, IB_QPT_RC); |
552 | if (IS_ERR(ib_conn->cma_id)) { | 552 | if (IS_ERR(ib_conn->cma_id)) { |
553 | err = PTR_ERR(ib_conn->cma_id); | 553 | err = PTR_ERR(ib_conn->cma_id); |
554 | iser_err("rdma_create_id failed: %d\n", err); | 554 | iser_err("rdma_create_id failed: %d\n", err); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 376d640487d2..ee165fdcb596 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target, | |||
1147 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | 1147 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
1148 | { | 1148 | { |
1149 | struct ib_device *dev = target->srp_host->srp_dev->dev; | 1149 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
1150 | struct srp_iu *iu = (struct srp_iu *) wc->wr_id; | 1150 | struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; |
1151 | int res; | 1151 | int res; |
1152 | u8 opcode; | 1152 | u8 opcode; |
1153 | 1153 | ||
@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1231 | break; | 1231 | break; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | iu = (struct srp_iu *) wc.wr_id; | 1234 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; |
1235 | list_add(&iu->list, &target->free_tx); | 1235 | list_add(&iu->list, &target->free_tx); |
1236 | } | 1236 | } |
1237 | } | 1237 | } |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4c4ac3f3ce5a..a9dd89552f9c 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -24,6 +24,7 @@ | |||
24 | /* leave room for NETLINK_DM (DM Events) */ | 24 | /* leave room for NETLINK_DM (DM Events) */ |
25 | #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ | 25 | #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ |
26 | #define NETLINK_ECRYPTFS 19 | 26 | #define NETLINK_ECRYPTFS 19 |
27 | #define NETLINK_RDMA 20 | ||
27 | 28 | ||
28 | #define MAX_LINKS 32 | 29 | #define MAX_LINKS 32 |
29 | 30 | ||
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild index e7c043216558..ea56f76c0c22 100644 --- a/include/rdma/Kbuild +++ b/include/rdma/Kbuild | |||
@@ -1 +1,6 @@ | |||
1 | header-y += ib_user_cm.h | ||
1 | header-y += ib_user_mad.h | 2 | header-y += ib_user_mad.h |
3 | header-y += ib_user_sa.h | ||
4 | header-y += ib_user_verbs.h | ||
5 | header-y += rdma_netlink.h | ||
6 | header-y += rdma_user_cm.h | ||
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h index bd3d380781e0..f79014aa28f9 100644 --- a/include/rdma/ib_user_cm.h +++ b/include/rdma/ib_user_cm.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #ifndef IB_USER_CM_H | 34 | #ifndef IB_USER_CM_H |
35 | #define IB_USER_CM_H | 35 | #define IB_USER_CM_H |
36 | 36 | ||
37 | #include <linux/types.h> | ||
37 | #include <rdma/ib_user_sa.h> | 38 | #include <rdma/ib_user_sa.h> |
38 | 39 | ||
39 | #define IB_USER_CM_ABI_VERSION 5 | 40 | #define IB_USER_CM_ABI_VERSION 5 |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 169f7a53fb0c..26977c149c41 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -111,6 +111,20 @@ struct rdma_cm_event { | |||
111 | } param; | 111 | } param; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | enum rdma_cm_state { | ||
115 | RDMA_CM_IDLE, | ||
116 | RDMA_CM_ADDR_QUERY, | ||
117 | RDMA_CM_ADDR_RESOLVED, | ||
118 | RDMA_CM_ROUTE_QUERY, | ||
119 | RDMA_CM_ROUTE_RESOLVED, | ||
120 | RDMA_CM_CONNECT, | ||
121 | RDMA_CM_DISCONNECT, | ||
122 | RDMA_CM_ADDR_BOUND, | ||
123 | RDMA_CM_LISTEN, | ||
124 | RDMA_CM_DEVICE_REMOVAL, | ||
125 | RDMA_CM_DESTROYING | ||
126 | }; | ||
127 | |||
114 | struct rdma_cm_id; | 128 | struct rdma_cm_id; |
115 | 129 | ||
116 | /** | 130 | /** |
@@ -130,6 +144,7 @@ struct rdma_cm_id { | |||
130 | rdma_cm_event_handler event_handler; | 144 | rdma_cm_event_handler event_handler; |
131 | struct rdma_route route; | 145 | struct rdma_route route; |
132 | enum rdma_port_space ps; | 146 | enum rdma_port_space ps; |
147 | enum ib_qp_type qp_type; | ||
133 | u8 port_num; | 148 | u8 port_num; |
134 | }; | 149 | }; |
135 | 150 | ||
@@ -140,9 +155,11 @@ struct rdma_cm_id { | |||
140 | * returned rdma_id. | 155 | * returned rdma_id. |
141 | * @context: User specified context associated with the id. | 156 | * @context: User specified context associated with the id. |
142 | * @ps: RDMA port space. | 157 | * @ps: RDMA port space. |
158 | * @qp_type: type of queue pair associated with the id. | ||
143 | */ | 159 | */ |
144 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 160 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
145 | void *context, enum rdma_port_space ps); | 161 | void *context, enum rdma_port_space ps, |
162 | enum ib_qp_type qp_type); | ||
146 | 163 | ||
147 | /** | 164 | /** |
148 | * rdma_destroy_id - Destroys an RDMA identifier. | 165 | * rdma_destroy_id - Destroys an RDMA identifier. |
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h new file mode 100644 index 000000000000..3c5363ab867b --- /dev/null +++ b/include/rdma/rdma_netlink.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _RDMA_NETLINK_H | ||
2 | #define _RDMA_NETLINK_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | enum { | ||
7 | RDMA_NL_RDMA_CM = 1 | ||
8 | }; | ||
9 | |||
10 | #define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) | ||
11 | #define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1)) | ||
12 | #define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op) | ||
13 | |||
14 | enum { | ||
15 | RDMA_NL_RDMA_CM_ID_STATS = 0, | ||
16 | RDMA_NL_RDMA_CM_NUM_OPS | ||
17 | }; | ||
18 | |||
19 | enum { | ||
20 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1, | ||
21 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR, | ||
22 | RDMA_NL_RDMA_CM_NUM_ATTR, | ||
23 | }; | ||
24 | |||
25 | struct rdma_cm_id_stats { | ||
26 | __u32 qp_num; | ||
27 | __u32 bound_dev_if; | ||
28 | __u32 port_space; | ||
29 | __s32 pid; | ||
30 | __u8 cm_state; | ||
31 | __u8 node_type; | ||
32 | __u8 port_num; | ||
33 | __u8 qp_type; | ||
34 | }; | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/netlink.h> | ||
39 | |||
40 | struct ibnl_client_cbs { | ||
41 | int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); | ||
42 | }; | ||
43 | |||
44 | int ibnl_init(void); | ||
45 | void ibnl_cleanup(void); | ||
46 | |||
47 | /** | ||
48 | * Add a a client to the list of IB netlink exporters. | ||
49 | * @index: Index of the added client | ||
50 | * @nops: Number of supported ops by the added client. | ||
51 | * @cb_table: A table for op->callback | ||
52 | * | ||
53 | * Returns 0 on success or a negative error code. | ||
54 | */ | ||
55 | int ibnl_add_client(int index, int nops, | ||
56 | const struct ibnl_client_cbs cb_table[]); | ||
57 | |||
58 | /** | ||
59 | * Remove a client from IB netlink. | ||
60 | * @index: Index of the removed IB client. | ||
61 | * | ||
62 | * Returns 0 on success or a negative error code. | ||
63 | */ | ||
64 | int ibnl_remove_client(int index); | ||
65 | |||
66 | /** | ||
67 | * Put a new message in a supplied skb. | ||
68 | * @skb: The netlink skb. | ||
69 | * @nlh: Pointer to put the header of the new netlink message. | ||
70 | * @seq: The message sequence number. | ||
71 | * @len: The requested message length to allocate. | ||
72 | * @client: Calling IB netlink client. | ||
73 | * @op: message content op. | ||
74 | * Returns the allocated buffer on success and NULL on failure. | ||
75 | */ | ||
76 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | ||
77 | int len, int client, int op); | ||
78 | /** | ||
79 | * Put a new attribute in a supplied skb. | ||
80 | * @skb: The netlink skb. | ||
81 | * @nlh: Header of the netlink message to append the attribute to. | ||
82 | * @len: The length of the attribute data. | ||
83 | * @data: The attribute data to put. | ||
84 | * @type: The attribute type. | ||
85 | * Returns the 0 and a negative error code on failure. | ||
86 | */ | ||
87 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
88 | int len, void *data, int type); | ||
89 | |||
90 | #endif /* __KERNEL__ */ | ||
91 | |||
92 | #endif /* _RDMA_NETLINK_H */ | ||
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 844a7a5607e3..159c50f1c6bf 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) | |||
589 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | 590 | ||
591 | /* Create the RDMA CM ID */ | 591 | /* Create the RDMA CM ID */ |
592 | rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); | 592 | rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, |
593 | IB_QPT_RC); | ||
593 | if (IS_ERR(rdma->cm_id)) | 594 | if (IS_ERR(rdma->cm_id)) |
594 | goto error; | 595 | goto error; |
595 | 596 | ||
diff --git a/net/rds/ib.c b/net/rds/ib.c index cce19f95c624..3b83086bcc30 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr) | |||
325 | /* Create a CMA ID and try to bind it. This catches both | 325 | /* Create a CMA ID and try to bind it. This catches both |
326 | * IB and iWARP capable NICs. | 326 | * IB and iWARP capable NICs. |
327 | */ | 327 | */ |
328 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | 328 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
329 | if (IS_ERR(cm_id)) | 329 | if (IS_ERR(cm_id)) |
330 | return PTR_ERR(cm_id); | 330 | return PTR_ERR(cm_id); |
331 | 331 | ||
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index ee369d201a65..fd453dd5124b 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn) | |||
587 | /* XXX I wonder what affect the port space has */ | 587 | /* XXX I wonder what affect the port space has */ |
588 | /* delegate cm event handler to rdma_transport */ | 588 | /* delegate cm event handler to rdma_transport */ |
589 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, | 589 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
590 | RDMA_PS_TCP); | 590 | RDMA_PS_TCP, IB_QPT_RC); |
591 | if (IS_ERR(ic->i_cm_id)) { | 591 | if (IS_ERR(ic->i_cm_id)) { |
592 | ret = PTR_ERR(ic->i_cm_id); | 592 | ret = PTR_ERR(ic->i_cm_id); |
593 | ic->i_cm_id = NULL; | 593 | ic->i_cm_id = NULL; |
diff --git a/net/rds/iw.c b/net/rds/iw.c index 5a9676fe594f..f7474844f096 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c | |||
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr) | |||
226 | /* Create a CMA ID and try to bind it. This catches both | 226 | /* Create a CMA ID and try to bind it. This catches both |
227 | * IB and iWARP capable NICs. | 227 | * IB and iWARP capable NICs. |
228 | */ | 228 | */ |
229 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | 229 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
230 | if (IS_ERR(cm_id)) | 230 | if (IS_ERR(cm_id)) |
231 | return PTR_ERR(cm_id); | 231 | return PTR_ERR(cm_id); |
232 | 232 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 3a60a15d1b4a..c12db66f24c7 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn) | |||
522 | /* XXX I wonder what affect the port space has */ | 522 | /* XXX I wonder what affect the port space has */ |
523 | /* delegate cm event handler to rdma_transport */ | 523 | /* delegate cm event handler to rdma_transport */ |
524 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, | 524 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
525 | RDMA_PS_TCP); | 525 | RDMA_PS_TCP, IB_QPT_RC); |
526 | if (IS_ERR(ic->i_cm_id)) { | 526 | if (IS_ERR(ic->i_cm_id)) { |
527 | ret = PTR_ERR(ic->i_cm_id); | 527 | ret = PTR_ERR(ic->i_cm_id); |
528 | ic->i_cm_id = NULL; | 528 | ic->i_cm_id = NULL; |
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 4195a0539829..f8760e1b6688 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void) | |||
158 | struct rdma_cm_id *cm_id; | 158 | struct rdma_cm_id *cm_id; |
159 | int ret; | 159 | int ret; |
160 | 160 | ||
161 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); | 161 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP, |
162 | IB_QPT_RC); | ||
162 | if (IS_ERR(cm_id)) { | 163 | if (IS_ERR(cm_id)) { |
163 | ret = PTR_ERR(cm_id); | 164 | ret = PTR_ERR(cm_id); |
164 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " | 165 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 6c014dd3a20b..c3c232a88d94 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
695 | return ERR_PTR(-ENOMEM); | 695 | return ERR_PTR(-ENOMEM); |
696 | xprt = &cma_xprt->sc_xprt; | 696 | xprt = &cma_xprt->sc_xprt; |
697 | 697 | ||
698 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); | 698 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, |
699 | IB_QPT_RC); | ||
699 | if (IS_ERR(listen_id)) { | 700 | if (IS_ERR(listen_id)) { |
700 | ret = PTR_ERR(listen_id); | 701 | ret = PTR_ERR(listen_id); |
701 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); | 702 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index d4297dc43dc4..80f8da344df5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, | |||
387 | 387 | ||
388 | init_completion(&ia->ri_done); | 388 | init_completion(&ia->ri_done); |
389 | 389 | ||
390 | id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); | 390 | id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC); |
391 | if (IS_ERR(id)) { | 391 | if (IS_ERR(id)) { |
392 | rc = PTR_ERR(id); | 392 | rc = PTR_ERR(id); |
393 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | 393 | dprintk("RPC: %s: rdma_create_id() failed %i\n", |