diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /drivers/infiniband/core | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'drivers/infiniband/core')
24 files changed, 389 insertions, 1713 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index eaec8d7a3b7..f2a84c6f854 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/inetdevice.h> | 37 | #include <linux/inetdevice.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
40 | #include <linux/module.h> | ||
41 | #include <net/arp.h> | 40 | #include <net/arp.h> |
42 | #include <net/neighbour.h> | 41 | #include <net/neighbour.h> |
43 | #include <net/route.h> | 42 | #include <net/route.h> |
@@ -129,7 +128,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) | |||
129 | dev_put(dev); | 128 | dev_put(dev); |
130 | break; | 129 | break; |
131 | 130 | ||
132 | #if IS_ENABLED(CONFIG_IPV6) | 131 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
133 | case AF_INET6: | 132 | case AF_INET6: |
134 | rcu_read_lock(); | 133 | rcu_read_lock(); |
135 | for_each_netdev_rcu(&init_net, dev) { | 134 | for_each_netdev_rcu(&init_net, dev) { |
@@ -152,11 +151,13 @@ static void set_timeout(unsigned long time) | |||
152 | { | 151 | { |
153 | unsigned long delay; | 152 | unsigned long delay; |
154 | 153 | ||
154 | cancel_delayed_work(&work); | ||
155 | |||
155 | delay = time - jiffies; | 156 | delay = time - jiffies; |
156 | if ((long)delay <= 0) | 157 | if ((long)delay <= 0) |
157 | delay = 1; | 158 | delay = 1; |
158 | 159 | ||
159 | mod_delayed_work(addr_wq, &work, delay); | 160 | queue_delayed_work(addr_wq, &work, delay); |
160 | } | 161 | } |
161 | 162 | ||
162 | static void queue_req(struct addr_req *req) | 163 | static void queue_req(struct addr_req *req) |
@@ -176,29 +177,6 @@ static void queue_req(struct addr_req *req) | |||
176 | mutex_unlock(&lock); | 177 | mutex_unlock(&lock); |
177 | } | 178 | } |
178 | 179 | ||
179 | static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, void *daddr) | ||
180 | { | ||
181 | struct neighbour *n; | ||
182 | int ret; | ||
183 | |||
184 | n = dst_neigh_lookup(dst, daddr); | ||
185 | |||
186 | rcu_read_lock(); | ||
187 | if (!n || !(n->nud_state & NUD_VALID)) { | ||
188 | if (n) | ||
189 | neigh_event_send(n, NULL); | ||
190 | ret = -ENODATA; | ||
191 | } else { | ||
192 | ret = rdma_copy_addr(dev_addr, dst->dev, n->ha); | ||
193 | } | ||
194 | rcu_read_unlock(); | ||
195 | |||
196 | if (n) | ||
197 | neigh_release(n); | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static int addr4_resolve(struct sockaddr_in *src_in, | 180 | static int addr4_resolve(struct sockaddr_in *src_in, |
203 | struct sockaddr_in *dst_in, | 181 | struct sockaddr_in *dst_in, |
204 | struct rdma_dev_addr *addr) | 182 | struct rdma_dev_addr *addr) |
@@ -206,6 +184,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, | |||
206 | __be32 src_ip = src_in->sin_addr.s_addr; | 184 | __be32 src_ip = src_in->sin_addr.s_addr; |
207 | __be32 dst_ip = dst_in->sin_addr.s_addr; | 185 | __be32 dst_ip = dst_in->sin_addr.s_addr; |
208 | struct rtable *rt; | 186 | struct rtable *rt; |
187 | struct neighbour *neigh; | ||
209 | struct flowi4 fl4; | 188 | struct flowi4 fl4; |
210 | int ret; | 189 | int ret; |
211 | 190 | ||
@@ -234,25 +213,39 @@ static int addr4_resolve(struct sockaddr_in *src_in, | |||
234 | goto put; | 213 | goto put; |
235 | } | 214 | } |
236 | 215 | ||
237 | ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr); | 216 | neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev); |
217 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { | ||
218 | rcu_read_lock(); | ||
219 | neigh_event_send(dst_get_neighbour(&rt->dst), NULL); | ||
220 | rcu_read_unlock(); | ||
221 | ret = -ENODATA; | ||
222 | if (neigh) | ||
223 | goto release; | ||
224 | goto put; | ||
225 | } | ||
226 | |||
227 | ret = rdma_copy_addr(addr, neigh->dev, neigh->ha); | ||
228 | release: | ||
229 | neigh_release(neigh); | ||
238 | put: | 230 | put: |
239 | ip_rt_put(rt); | 231 | ip_rt_put(rt); |
240 | out: | 232 | out: |
241 | return ret; | 233 | return ret; |
242 | } | 234 | } |
243 | 235 | ||
244 | #if IS_ENABLED(CONFIG_IPV6) | 236 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
245 | static int addr6_resolve(struct sockaddr_in6 *src_in, | 237 | static int addr6_resolve(struct sockaddr_in6 *src_in, |
246 | struct sockaddr_in6 *dst_in, | 238 | struct sockaddr_in6 *dst_in, |
247 | struct rdma_dev_addr *addr) | 239 | struct rdma_dev_addr *addr) |
248 | { | 240 | { |
249 | struct flowi6 fl6; | 241 | struct flowi6 fl6; |
242 | struct neighbour *neigh; | ||
250 | struct dst_entry *dst; | 243 | struct dst_entry *dst; |
251 | int ret; | 244 | int ret; |
252 | 245 | ||
253 | memset(&fl6, 0, sizeof fl6); | 246 | memset(&fl6, 0, sizeof fl6); |
254 | fl6.daddr = dst_in->sin6_addr; | 247 | ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr); |
255 | fl6.saddr = src_in->sin6_addr; | 248 | ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr); |
256 | fl6.flowi6_oif = addr->bound_dev_if; | 249 | fl6.flowi6_oif = addr->bound_dev_if; |
257 | 250 | ||
258 | dst = ip6_route_output(&init_net, NULL, &fl6); | 251 | dst = ip6_route_output(&init_net, NULL, &fl6); |
@@ -266,7 +259,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
266 | goto put; | 259 | goto put; |
267 | 260 | ||
268 | src_in->sin6_family = AF_INET6; | 261 | src_in->sin6_family = AF_INET6; |
269 | src_in->sin6_addr = fl6.saddr; | 262 | ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr); |
270 | } | 263 | } |
271 | 264 | ||
272 | if (dst->dev->flags & IFF_LOOPBACK) { | 265 | if (dst->dev->flags & IFF_LOOPBACK) { |
@@ -282,7 +275,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, | |||
282 | goto put; | 275 | goto put; |
283 | } | 276 | } |
284 | 277 | ||
285 | ret = dst_fetch_ha(dst, addr, &fl6.daddr); | 278 | rcu_read_lock(); |
279 | neigh = dst_get_neighbour(dst); | ||
280 | if (!neigh || !(neigh->nud_state & NUD_VALID)) { | ||
281 | if (neigh) | ||
282 | neigh_event_send(neigh, NULL); | ||
283 | ret = -ENODATA; | ||
284 | } else { | ||
285 | ret = rdma_copy_addr(addr, dst->dev, neigh->ha); | ||
286 | } | ||
287 | rcu_read_unlock(); | ||
286 | put: | 288 | put: |
287 | dst_release(dst); | 289 | dst_release(dst); |
288 | return ret; | 290 | return ret; |
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 80f6cf2449f..9353992f9ee 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
@@ -167,7 +167,6 @@ int ib_find_cached_pkey(struct ib_device *device, | |||
167 | unsigned long flags; | 167 | unsigned long flags; |
168 | int i; | 168 | int i; |
169 | int ret = -ENOENT; | 169 | int ret = -ENOENT; |
170 | int partial_ix = -1; | ||
171 | 170 | ||
172 | if (port_num < start_port(device) || port_num > end_port(device)) | 171 | if (port_num < start_port(device) || port_num > end_port(device)) |
173 | return -EINVAL; | 172 | return -EINVAL; |
@@ -180,46 +179,6 @@ int ib_find_cached_pkey(struct ib_device *device, | |||
180 | 179 | ||
181 | for (i = 0; i < cache->table_len; ++i) | 180 | for (i = 0; i < cache->table_len; ++i) |
182 | if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { | 181 | if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { |
183 | if (cache->table[i] & 0x8000) { | ||
184 | *index = i; | ||
185 | ret = 0; | ||
186 | break; | ||
187 | } else | ||
188 | partial_ix = i; | ||
189 | } | ||
190 | |||
191 | if (ret && partial_ix >= 0) { | ||
192 | *index = partial_ix; | ||
193 | ret = 0; | ||
194 | } | ||
195 | |||
196 | read_unlock_irqrestore(&device->cache.lock, flags); | ||
197 | |||
198 | return ret; | ||
199 | } | ||
200 | EXPORT_SYMBOL(ib_find_cached_pkey); | ||
201 | |||
202 | int ib_find_exact_cached_pkey(struct ib_device *device, | ||
203 | u8 port_num, | ||
204 | u16 pkey, | ||
205 | u16 *index) | ||
206 | { | ||
207 | struct ib_pkey_cache *cache; | ||
208 | unsigned long flags; | ||
209 | int i; | ||
210 | int ret = -ENOENT; | ||
211 | |||
212 | if (port_num < start_port(device) || port_num > end_port(device)) | ||
213 | return -EINVAL; | ||
214 | |||
215 | read_lock_irqsave(&device->cache.lock, flags); | ||
216 | |||
217 | cache = device->cache.pkey_cache[port_num - start_port(device)]; | ||
218 | |||
219 | *index = -1; | ||
220 | |||
221 | for (i = 0; i < cache->table_len; ++i) | ||
222 | if (cache->table[i] == pkey) { | ||
223 | *index = i; | 182 | *index = i; |
224 | ret = 0; | 183 | ret = 0; |
225 | break; | 184 | break; |
@@ -229,7 +188,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device, | |||
229 | 188 | ||
230 | return ret; | 189 | return ret; |
231 | } | 190 | } |
232 | EXPORT_SYMBOL(ib_find_exact_cached_pkey); | 191 | EXPORT_SYMBOL(ib_find_cached_pkey); |
233 | 192 | ||
234 | int ib_get_cached_lmc(struct ib_device *device, | 193 | int ib_get_cached_lmc(struct ib_device *device, |
235 | u8 port_num, | 194 | u8 port_num, |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 394fea2ba1b..fc0f2bd9ca8 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/completion.h> | 36 | #include <linux/completion.h> |
37 | #include <linux/dma-mapping.h> | 37 | #include <linux/dma-mapping.h> |
38 | #include <linux/device.h> | 38 | #include <linux/device.h> |
39 | #include <linux/module.h> | ||
40 | #include <linux/err.h> | 39 | #include <linux/err.h> |
41 | #include <linux/idr.h> | 40 | #include <linux/idr.h> |
42 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
@@ -390,7 +389,7 @@ static int cm_alloc_id(struct cm_id_private *cm_id_priv) | |||
390 | ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, | 389 | ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, |
391 | next_id, &id); | 390 | next_id, &id); |
392 | if (!ret) | 391 | if (!ret) |
393 | next_id = ((unsigned) id + 1) & MAX_IDR_MASK; | 392 | next_id = ((unsigned) id + 1) & MAX_ID_MASK; |
394 | spin_unlock_irqrestore(&cm.lock, flags); | 393 | spin_unlock_irqrestore(&cm.lock, flags); |
395 | } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); | 394 | } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) ); |
396 | 395 | ||
@@ -890,8 +889,6 @@ retest: | |||
890 | break; | 889 | break; |
891 | case IB_CM_ESTABLISHED: | 890 | case IB_CM_ESTABLISHED: |
892 | spin_unlock_irq(&cm_id_priv->lock); | 891 | spin_unlock_irq(&cm_id_priv->lock); |
893 | if (cm_id_priv->qp_type == IB_QPT_XRC_TGT) | ||
894 | break; | ||
895 | ib_send_cm_dreq(cm_id, NULL, 0); | 892 | ib_send_cm_dreq(cm_id, NULL, 0); |
896 | goto retest; | 893 | goto retest; |
897 | case IB_CM_DREQ_SENT: | 894 | case IB_CM_DREQ_SENT: |
@@ -1011,6 +1008,7 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1011 | req_msg->service_id = param->service_id; | 1008 | req_msg->service_id = param->service_id; |
1012 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1009 | req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1013 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); | 1010 | cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); |
1011 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1014 | cm_req_set_init_depth(req_msg, param->initiator_depth); | 1012 | cm_req_set_init_depth(req_msg, param->initiator_depth); |
1015 | cm_req_set_remote_resp_timeout(req_msg, | 1013 | cm_req_set_remote_resp_timeout(req_msg, |
1016 | param->remote_cm_response_timeout); | 1014 | param->remote_cm_response_timeout); |
@@ -1019,16 +1017,12 @@ static void cm_format_req(struct cm_req_msg *req_msg, | |||
1019 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); | 1017 | cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn)); |
1020 | cm_req_set_local_resp_timeout(req_msg, | 1018 | cm_req_set_local_resp_timeout(req_msg, |
1021 | param->local_cm_response_timeout); | 1019 | param->local_cm_response_timeout); |
1020 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1022 | req_msg->pkey = param->primary_path->pkey; | 1021 | req_msg->pkey = param->primary_path->pkey; |
1023 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); | 1022 | cm_req_set_path_mtu(req_msg, param->primary_path->mtu); |
1023 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1024 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); | 1024 | cm_req_set_max_cm_retries(req_msg, param->max_cm_retries); |
1025 | 1025 | cm_req_set_srq(req_msg, param->srq); | |
1026 | if (param->qp_type != IB_QPT_XRC_INI) { | ||
1027 | cm_req_set_resp_res(req_msg, param->responder_resources); | ||
1028 | cm_req_set_retry_count(req_msg, param->retry_count); | ||
1029 | cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count); | ||
1030 | cm_req_set_srq(req_msg, param->srq); | ||
1031 | } | ||
1032 | 1026 | ||
1033 | if (pri_path->hop_limit <= 1) { | 1027 | if (pri_path->hop_limit <= 1) { |
1034 | req_msg->primary_local_lid = pri_path->slid; | 1028 | req_msg->primary_local_lid = pri_path->slid; |
@@ -1086,8 +1080,7 @@ static int cm_validate_req_param(struct ib_cm_req_param *param) | |||
1086 | if (!param->primary_path) | 1080 | if (!param->primary_path) |
1087 | return -EINVAL; | 1081 | return -EINVAL; |
1088 | 1082 | ||
1089 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC && | 1083 | if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC) |
1090 | param->qp_type != IB_QPT_XRC_INI) | ||
1091 | return -EINVAL; | 1084 | return -EINVAL; |
1092 | 1085 | ||
1093 | if (param->private_data && | 1086 | if (param->private_data && |
@@ -1608,24 +1601,18 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg, | |||
1608 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); | 1601 | cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid); |
1609 | rep_msg->local_comm_id = cm_id_priv->id.local_id; | 1602 | rep_msg->local_comm_id = cm_id_priv->id.local_id; |
1610 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; | 1603 | rep_msg->remote_comm_id = cm_id_priv->id.remote_id; |
1604 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1611 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); | 1605 | cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn)); |
1612 | rep_msg->resp_resources = param->responder_resources; | 1606 | rep_msg->resp_resources = param->responder_resources; |
1607 | rep_msg->initiator_depth = param->initiator_depth; | ||
1613 | cm_rep_set_target_ack_delay(rep_msg, | 1608 | cm_rep_set_target_ack_delay(rep_msg, |
1614 | cm_id_priv->av.port->cm_dev->ack_delay); | 1609 | cm_id_priv->av.port->cm_dev->ack_delay); |
1615 | cm_rep_set_failover(rep_msg, param->failover_accepted); | 1610 | cm_rep_set_failover(rep_msg, param->failover_accepted); |
1611 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1616 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); | 1612 | cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); |
1613 | cm_rep_set_srq(rep_msg, param->srq); | ||
1617 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; | 1614 | rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; |
1618 | 1615 | ||
1619 | if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) { | ||
1620 | rep_msg->initiator_depth = param->initiator_depth; | ||
1621 | cm_rep_set_flow_ctrl(rep_msg, param->flow_control); | ||
1622 | cm_rep_set_srq(rep_msg, param->srq); | ||
1623 | cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1624 | } else { | ||
1625 | cm_rep_set_srq(rep_msg, 1); | ||
1626 | cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num)); | ||
1627 | } | ||
1628 | |||
1629 | if (param->private_data && param->private_data_len) | 1616 | if (param->private_data && param->private_data_len) |
1630 | memcpy(rep_msg->private_data, param->private_data, | 1617 | memcpy(rep_msg->private_data, param->private_data, |
1631 | param->private_data_len); | 1618 | param->private_data_len); |
@@ -1673,7 +1660,7 @@ int ib_send_cm_rep(struct ib_cm_id *cm_id, | |||
1673 | cm_id_priv->initiator_depth = param->initiator_depth; | 1660 | cm_id_priv->initiator_depth = param->initiator_depth; |
1674 | cm_id_priv->responder_resources = param->responder_resources; | 1661 | cm_id_priv->responder_resources = param->responder_resources; |
1675 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); | 1662 | cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg); |
1676 | cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF); | 1663 | cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg); |
1677 | 1664 | ||
1678 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 1665 | out: spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
1679 | return ret; | 1666 | return ret; |
@@ -1744,7 +1731,7 @@ error: spin_unlock_irqrestore(&cm_id_priv->lock, flags); | |||
1744 | } | 1731 | } |
1745 | EXPORT_SYMBOL(ib_send_cm_rtu); | 1732 | EXPORT_SYMBOL(ib_send_cm_rtu); |
1746 | 1733 | ||
1747 | static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) | 1734 | static void cm_format_rep_event(struct cm_work *work) |
1748 | { | 1735 | { |
1749 | struct cm_rep_msg *rep_msg; | 1736 | struct cm_rep_msg *rep_msg; |
1750 | struct ib_cm_rep_event_param *param; | 1737 | struct ib_cm_rep_event_param *param; |
@@ -1753,7 +1740,7 @@ static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type) | |||
1753 | param = &work->cm_event.param.rep_rcvd; | 1740 | param = &work->cm_event.param.rep_rcvd; |
1754 | param->remote_ca_guid = rep_msg->local_ca_guid; | 1741 | param->remote_ca_guid = rep_msg->local_ca_guid; |
1755 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); | 1742 | param->remote_qkey = be32_to_cpu(rep_msg->local_qkey); |
1756 | param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type)); | 1743 | param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg)); |
1757 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); | 1744 | param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg)); |
1758 | param->responder_resources = rep_msg->initiator_depth; | 1745 | param->responder_resources = rep_msg->initiator_depth; |
1759 | param->initiator_depth = rep_msg->resp_resources; | 1746 | param->initiator_depth = rep_msg->resp_resources; |
@@ -1821,7 +1808,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1821 | return -EINVAL; | 1808 | return -EINVAL; |
1822 | } | 1809 | } |
1823 | 1810 | ||
1824 | cm_format_rep_event(work, cm_id_priv->qp_type); | 1811 | cm_format_rep_event(work); |
1825 | 1812 | ||
1826 | spin_lock_irq(&cm_id_priv->lock); | 1813 | spin_lock_irq(&cm_id_priv->lock); |
1827 | switch (cm_id_priv->id.state) { | 1814 | switch (cm_id_priv->id.state) { |
@@ -1836,7 +1823,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1836 | 1823 | ||
1837 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; | 1824 | cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id; |
1838 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; | 1825 | cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid; |
1839 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); | 1826 | cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg); |
1840 | 1827 | ||
1841 | spin_lock(&cm.lock); | 1828 | spin_lock(&cm.lock); |
1842 | /* Check for duplicate REP. */ | 1829 | /* Check for duplicate REP. */ |
@@ -1863,7 +1850,7 @@ static int cm_rep_handler(struct cm_work *work) | |||
1863 | 1850 | ||
1864 | cm_id_priv->id.state = IB_CM_REP_RCVD; | 1851 | cm_id_priv->id.state = IB_CM_REP_RCVD; |
1865 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; | 1852 | cm_id_priv->id.remote_id = rep_msg->local_comm_id; |
1866 | cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type); | 1853 | cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg); |
1867 | cm_id_priv->initiator_depth = rep_msg->resp_resources; | 1854 | cm_id_priv->initiator_depth = rep_msg->resp_resources; |
1868 | cm_id_priv->responder_resources = rep_msg->initiator_depth; | 1855 | cm_id_priv->responder_resources = rep_msg->initiator_depth; |
1869 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); | 1856 | cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg); |
@@ -3505,8 +3492,7 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, | |||
3505 | qp_attr->path_mtu = cm_id_priv->path_mtu; | 3492 | qp_attr->path_mtu = cm_id_priv->path_mtu; |
3506 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); | 3493 | qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); |
3507 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); | 3494 | qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); |
3508 | if (cm_id_priv->qp_type == IB_QPT_RC || | 3495 | if (cm_id_priv->qp_type == IB_QPT_RC) { |
3509 | cm_id_priv->qp_type == IB_QPT_XRC_TGT) { | ||
3510 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | | 3496 | *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC | |
3511 | IB_QP_MIN_RNR_TIMER; | 3497 | IB_QP_MIN_RNR_TIMER; |
3512 | qp_attr->max_dest_rd_atomic = | 3498 | qp_attr->max_dest_rd_atomic = |
@@ -3551,21 +3537,15 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv, | |||
3551 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { | 3537 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) { |
3552 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; | 3538 | *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN; |
3553 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); | 3539 | qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn); |
3554 | switch (cm_id_priv->qp_type) { | 3540 | if (cm_id_priv->qp_type == IB_QPT_RC) { |
3555 | case IB_QPT_RC: | 3541 | *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | |
3556 | case IB_QPT_XRC_INI: | 3542 | IB_QP_RNR_RETRY | |
3557 | *qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY | | ||
3558 | IB_QP_MAX_QP_RD_ATOMIC; | 3543 | IB_QP_MAX_QP_RD_ATOMIC; |
3544 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3559 | qp_attr->retry_cnt = cm_id_priv->retry_count; | 3545 | qp_attr->retry_cnt = cm_id_priv->retry_count; |
3560 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; | 3546 | qp_attr->rnr_retry = cm_id_priv->rnr_retry_count; |
3561 | qp_attr->max_rd_atomic = cm_id_priv->initiator_depth; | 3547 | qp_attr->max_rd_atomic = |
3562 | /* fall through */ | 3548 | cm_id_priv->initiator_depth; |
3563 | case IB_QPT_XRC_TGT: | ||
3564 | *qp_attr_mask |= IB_QP_TIMEOUT; | ||
3565 | qp_attr->timeout = cm_id_priv->av.timeout; | ||
3566 | break; | ||
3567 | default: | ||
3568 | break; | ||
3569 | } | 3549 | } |
3570 | if (cm_id_priv->alt_av.ah_attr.dlid) { | 3550 | if (cm_id_priv->alt_av.ah_attr.dlid) { |
3571 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; | 3551 | *qp_attr_mask |= IB_QP_PATH_MIG_STATE; |
@@ -3659,7 +3639,7 @@ static struct kobj_type cm_port_obj_type = { | |||
3659 | .release = cm_release_port_obj | 3639 | .release = cm_release_port_obj |
3660 | }; | 3640 | }; |
3661 | 3641 | ||
3662 | static char *cm_devnode(struct device *dev, umode_t *mode) | 3642 | static char *cm_devnode(struct device *dev, mode_t *mode) |
3663 | { | 3643 | { |
3664 | if (mode) | 3644 | if (mode) |
3665 | *mode = 0666; | 3645 | *mode = 0666; |
@@ -3848,28 +3828,24 @@ static int __init ib_cm_init(void) | |||
3848 | INIT_LIST_HEAD(&cm.timewait_list); | 3828 | INIT_LIST_HEAD(&cm.timewait_list); |
3849 | 3829 | ||
3850 | ret = class_register(&cm_class); | 3830 | ret = class_register(&cm_class); |
3851 | if (ret) { | 3831 | if (ret) |
3852 | ret = -ENOMEM; | 3832 | return -ENOMEM; |
3853 | goto error1; | ||
3854 | } | ||
3855 | 3833 | ||
3856 | cm.wq = create_workqueue("ib_cm"); | 3834 | cm.wq = create_workqueue("ib_cm"); |
3857 | if (!cm.wq) { | 3835 | if (!cm.wq) { |
3858 | ret = -ENOMEM; | 3836 | ret = -ENOMEM; |
3859 | goto error2; | 3837 | goto error1; |
3860 | } | 3838 | } |
3861 | 3839 | ||
3862 | ret = ib_register_client(&cm_client); | 3840 | ret = ib_register_client(&cm_client); |
3863 | if (ret) | 3841 | if (ret) |
3864 | goto error3; | 3842 | goto error2; |
3865 | 3843 | ||
3866 | return 0; | 3844 | return 0; |
3867 | error3: | ||
3868 | destroy_workqueue(cm.wq); | ||
3869 | error2: | 3845 | error2: |
3870 | class_unregister(&cm_class); | 3846 | destroy_workqueue(cm.wq); |
3871 | error1: | 3847 | error1: |
3872 | idr_destroy(&cm.local_id_table); | 3848 | class_unregister(&cm_class); |
3873 | return ret; | 3849 | return ret; |
3874 | } | 3850 | } |
3875 | 3851 | ||
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index be068f47e47..7e63c08f697 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2004 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. | 3 | * Copyright (c) 2004 Topspin Corporation. All rights reserved. |
4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. | 4 | * Copyright (c) 2004 Voltaire Corporation. All rights reserved. |
5 | * | 5 | * |
@@ -44,6 +44,18 @@ | |||
44 | 44 | ||
45 | #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ | 45 | #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */ |
46 | 46 | ||
47 | #define CM_REQ_ATTR_ID cpu_to_be16(0x0010) | ||
48 | #define CM_MRA_ATTR_ID cpu_to_be16(0x0011) | ||
49 | #define CM_REJ_ATTR_ID cpu_to_be16(0x0012) | ||
50 | #define CM_REP_ATTR_ID cpu_to_be16(0x0013) | ||
51 | #define CM_RTU_ATTR_ID cpu_to_be16(0x0014) | ||
52 | #define CM_DREQ_ATTR_ID cpu_to_be16(0x0015) | ||
53 | #define CM_DREP_ATTR_ID cpu_to_be16(0x0016) | ||
54 | #define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017) | ||
55 | #define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018) | ||
56 | #define CM_LAP_ATTR_ID cpu_to_be16(0x0019) | ||
57 | #define CM_APR_ATTR_ID cpu_to_be16(0x001A) | ||
58 | |||
47 | enum cm_msg_sequence { | 59 | enum cm_msg_sequence { |
48 | CM_MSG_SEQUENCE_REQ, | 60 | CM_MSG_SEQUENCE_REQ, |
49 | CM_MSG_SEQUENCE_LAP, | 61 | CM_MSG_SEQUENCE_LAP, |
@@ -74,7 +86,7 @@ struct cm_req_msg { | |||
74 | __be16 pkey; | 86 | __be16 pkey; |
75 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ | 87 | /* path MTU:4, RDC exists:1, RNR retry count:3. */ |
76 | u8 offset50; | 88 | u8 offset50; |
77 | /* max CM Retries:4, SRQ:1, extended transport type:3 */ | 89 | /* max CM Retries:4, SRQ:1, rsvd:3 */ |
78 | u8 offset51; | 90 | u8 offset51; |
79 | 91 | ||
80 | __be16 primary_local_lid; | 92 | __be16 primary_local_lid; |
@@ -163,11 +175,6 @@ static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg) | |||
163 | switch(transport_type) { | 175 | switch(transport_type) { |
164 | case 0: return IB_QPT_RC; | 176 | case 0: return IB_QPT_RC; |
165 | case 1: return IB_QPT_UC; | 177 | case 1: return IB_QPT_UC; |
166 | case 3: | ||
167 | switch (req_msg->offset51 & 0x7) { | ||
168 | case 1: return IB_QPT_XRC_TGT; | ||
169 | default: return 0; | ||
170 | } | ||
171 | default: return 0; | 178 | default: return 0; |
172 | } | 179 | } |
173 | } | 180 | } |
@@ -181,12 +188,6 @@ static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg, | |||
181 | req_msg->offset40) & | 188 | req_msg->offset40) & |
182 | 0xFFFFFFF9) | 0x2); | 189 | 0xFFFFFFF9) | 0x2); |
183 | break; | 190 | break; |
184 | case IB_QPT_XRC_INI: | ||
185 | req_msg->offset40 = cpu_to_be32((be32_to_cpu( | ||
186 | req_msg->offset40) & | ||
187 | 0xFFFFFFF9) | 0x6); | ||
188 | req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1; | ||
189 | break; | ||
190 | default: | 191 | default: |
191 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( | 192 | req_msg->offset40 = cpu_to_be32(be32_to_cpu( |
192 | req_msg->offset40) & | 193 | req_msg->offset40) & |
@@ -526,23 +527,6 @@ static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) | |||
526 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); | 527 | (be32_to_cpu(rep_msg->offset12) & 0x000000FF)); |
527 | } | 528 | } |
528 | 529 | ||
529 | static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg) | ||
530 | { | ||
531 | return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8); | ||
532 | } | ||
533 | |||
534 | static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn) | ||
535 | { | ||
536 | rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) | | ||
537 | (be32_to_cpu(rep_msg->offset16) & 0x000000FF)); | ||
538 | } | ||
539 | |||
540 | static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type) | ||
541 | { | ||
542 | return (qp_type == IB_QPT_XRC_INI) ? | ||
543 | cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg); | ||
544 | } | ||
545 | |||
546 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) | 530 | static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg) |
547 | { | 531 | { |
548 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); | 532 | return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8); |
@@ -787,7 +771,6 @@ struct cm_apr_msg { | |||
787 | 771 | ||
788 | u8 info_length; | 772 | u8 info_length; |
789 | u8 ap_status; | 773 | u8 ap_status; |
790 | __be16 rsvd; | ||
791 | u8 info[IB_CM_APR_INFO_LENGTH]; | 774 | u8 info[IB_CM_APR_INFO_LENGTH]; |
792 | 775 | ||
793 | u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE]; | 776 | u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE]; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d789eea3216..ca4c5dcd713 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #include <linux/idr.h> | 41 | #include <linux/idr.h> |
42 | #include <linux/inetdevice.h> | 42 | #include <linux/inetdevice.h> |
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/module.h> | ||
45 | #include <net/route.h> | ||
46 | 44 | ||
47 | #include <net/tcp.h> | 45 | #include <net/tcp.h> |
48 | #include <net/ipv6.h> | 46 | #include <net/ipv6.h> |
@@ -83,7 +81,6 @@ static DEFINE_IDR(sdp_ps); | |||
83 | static DEFINE_IDR(tcp_ps); | 81 | static DEFINE_IDR(tcp_ps); |
84 | static DEFINE_IDR(udp_ps); | 82 | static DEFINE_IDR(udp_ps); |
85 | static DEFINE_IDR(ipoib_ps); | 83 | static DEFINE_IDR(ipoib_ps); |
86 | static DEFINE_IDR(ib_ps); | ||
87 | 84 | ||
88 | struct cma_device { | 85 | struct cma_device { |
89 | struct list_head list; | 86 | struct list_head list; |
@@ -99,10 +96,6 @@ struct rdma_bind_list { | |||
99 | unsigned short port; | 96 | unsigned short port; |
100 | }; | 97 | }; |
101 | 98 | ||
102 | enum { | ||
103 | CMA_OPTION_AFONLY, | ||
104 | }; | ||
105 | |||
106 | /* | 99 | /* |
107 | * Device removal can occur at anytime, so we need extra handling to | 100 | * Device removal can occur at anytime, so we need extra handling to |
108 | * serialize notifying the user of device removal with other callbacks. | 101 | * serialize notifying the user of device removal with other callbacks. |
@@ -141,11 +134,9 @@ struct rdma_id_private { | |||
141 | u32 qkey; | 134 | u32 qkey; |
142 | u32 qp_num; | 135 | u32 qp_num; |
143 | pid_t owner; | 136 | pid_t owner; |
144 | u32 options; | ||
145 | u8 srq; | 137 | u8 srq; |
146 | u8 tos; | 138 | u8 tos; |
147 | u8 reuseaddr; | 139 | u8 reuseaddr; |
148 | u8 afonly; | ||
149 | }; | 140 | }; |
150 | 141 | ||
151 | struct cma_multicast { | 142 | struct cma_multicast { |
@@ -345,17 +336,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu | |||
345 | 336 | ||
346 | err = ib_query_port(device, port_num, &props); | 337 | err = ib_query_port(device, port_num, &props); |
347 | if (err) | 338 | if (err) |
348 | return err; | 339 | return 1; |
349 | 340 | ||
350 | for (i = 0; i < props.gid_tbl_len; ++i) { | 341 | for (i = 0; i < props.gid_tbl_len; ++i) { |
351 | err = ib_query_gid(device, port_num, i, &tmp); | 342 | err = ib_query_gid(device, port_num, i, &tmp); |
352 | if (err) | 343 | if (err) |
353 | return err; | 344 | return 1; |
354 | if (!memcmp(&tmp, gid, sizeof tmp)) | 345 | if (!memcmp(&tmp, gid, sizeof tmp)) |
355 | return 0; | 346 | return 0; |
356 | } | 347 | } |
357 | 348 | ||
358 | return -EADDRNOTAVAIL; | 349 | return -EAGAIN; |
359 | } | 350 | } |
360 | 351 | ||
361 | static int cma_acquire_dev(struct rdma_id_private *id_priv) | 352 | static int cma_acquire_dev(struct rdma_id_private *id_priv) |
@@ -388,7 +379,8 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) | |||
388 | if (!ret) { | 379 | if (!ret) { |
389 | id_priv->id.port_num = port; | 380 | id_priv->id.port_num = port; |
390 | goto out; | 381 | goto out; |
391 | } | 382 | } else if (ret == 1) |
383 | break; | ||
392 | } | 384 | } |
393 | } | 385 | } |
394 | } | 386 | } |
@@ -1116,7 +1108,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1116 | if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { | 1108 | if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) { |
1117 | rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; | 1109 | rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; |
1118 | rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); | 1110 | rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); |
1119 | ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); | 1111 | ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey); |
1120 | } else { | 1112 | } else { |
1121 | ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, | 1113 | ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, |
1122 | &rt->addr.dev_addr); | 1114 | &rt->addr.dev_addr); |
@@ -1187,15 +1179,6 @@ static void cma_set_req_event_data(struct rdma_cm_event *event, | |||
1187 | event->param.conn.qp_num = req_data->remote_qpn; | 1179 | event->param.conn.qp_num = req_data->remote_qpn; |
1188 | } | 1180 | } |
1189 | 1181 | ||
1190 | static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) | ||
1191 | { | ||
1192 | return (((ib_event->event == IB_CM_REQ_RECEIVED) && | ||
1193 | (ib_event->param.req_rcvd.qp_type == id->qp_type)) || | ||
1194 | ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && | ||
1195 | (id->qp_type == IB_QPT_UD)) || | ||
1196 | (!id->qp_type)); | ||
1197 | } | ||
1198 | |||
1199 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | 1182 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
1200 | { | 1183 | { |
1201 | struct rdma_id_private *listen_id, *conn_id; | 1184 | struct rdma_id_private *listen_id, *conn_id; |
@@ -1203,16 +1186,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1203 | int offset, ret; | 1186 | int offset, ret; |
1204 | 1187 | ||
1205 | listen_id = cm_id->context; | 1188 | listen_id = cm_id->context; |
1206 | if (!cma_check_req_qp_type(&listen_id->id, ib_event)) | ||
1207 | return -EINVAL; | ||
1208 | |||
1209 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | 1189 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1210 | return -ECONNABORTED; | 1190 | return -ECONNABORTED; |
1211 | 1191 | ||
1212 | memset(&event, 0, sizeof event); | 1192 | memset(&event, 0, sizeof event); |
1213 | offset = cma_user_data_offset(listen_id->id.ps); | 1193 | offset = cma_user_data_offset(listen_id->id.ps); |
1214 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1194 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1215 | if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { | 1195 | if (listen_id->id.qp_type == IB_QPT_UD) { |
1216 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | 1196 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1217 | event.param.ud.private_data = ib_event->private_data + offset; | 1197 | event.param.ud.private_data = ib_event->private_data + offset; |
1218 | event.param.ud.private_data_len = | 1198 | event.param.ud.private_data_len = |
@@ -1224,13 +1204,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1224 | } | 1204 | } |
1225 | if (!conn_id) { | 1205 | if (!conn_id) { |
1226 | ret = -ENOMEM; | 1206 | ret = -ENOMEM; |
1227 | goto err1; | 1207 | goto out; |
1228 | } | 1208 | } |
1229 | 1209 | ||
1230 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1210 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
1231 | ret = cma_acquire_dev(conn_id); | 1211 | ret = cma_acquire_dev(conn_id); |
1232 | if (ret) | 1212 | if (ret) |
1233 | goto err2; | 1213 | goto release_conn_id; |
1234 | 1214 | ||
1235 | conn_id->cm_id.ib = cm_id; | 1215 | conn_id->cm_id.ib = cm_id; |
1236 | cm_id->context = conn_id; | 1216 | cm_id->context = conn_id; |
@@ -1242,33 +1222,31 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1242 | */ | 1222 | */ |
1243 | atomic_inc(&conn_id->refcount); | 1223 | atomic_inc(&conn_id->refcount); |
1244 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1224 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
1245 | if (ret) | 1225 | if (!ret) { |
1246 | goto err3; | 1226 | /* |
1247 | 1227 | * Acquire mutex to prevent user executing rdma_destroy_id() | |
1248 | /* | 1228 | * while we're accessing the cm_id. |
1249 | * Acquire mutex to prevent user executing rdma_destroy_id() | 1229 | */ |
1250 | * while we're accessing the cm_id. | 1230 | mutex_lock(&lock); |
1251 | */ | 1231 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) |
1252 | mutex_lock(&lock); | 1232 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
1253 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) | 1233 | mutex_unlock(&lock); |
1254 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1234 | mutex_unlock(&conn_id->handler_mutex); |
1255 | mutex_unlock(&lock); | 1235 | cma_deref_id(conn_id); |
1256 | mutex_unlock(&conn_id->handler_mutex); | 1236 | goto out; |
1257 | mutex_unlock(&listen_id->handler_mutex); | 1237 | } |
1258 | cma_deref_id(conn_id); | 1238 | cma_deref_id(conn_id); |
1259 | return 0; | ||
1260 | 1239 | ||
1261 | err3: | ||
1262 | cma_deref_id(conn_id); | ||
1263 | /* Destroy the CM ID by returning a non-zero value. */ | 1240 | /* Destroy the CM ID by returning a non-zero value. */ |
1264 | conn_id->cm_id.ib = NULL; | 1241 | conn_id->cm_id.ib = NULL; |
1265 | err2: | 1242 | |
1243 | release_conn_id: | ||
1266 | cma_exch(conn_id, RDMA_CM_DESTROYING); | 1244 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
1267 | mutex_unlock(&conn_id->handler_mutex); | 1245 | mutex_unlock(&conn_id->handler_mutex); |
1268 | err1: | 1246 | rdma_destroy_id(&conn_id->id); |
1247 | |||
1248 | out: | ||
1269 | mutex_unlock(&listen_id->handler_mutex); | 1249 | mutex_unlock(&listen_id->handler_mutex); |
1270 | if (conn_id) | ||
1271 | rdma_destroy_id(&conn_id->id); | ||
1272 | return ret; | 1250 | return ret; |
1273 | } | 1251 | } |
1274 | 1252 | ||
@@ -1302,10 +1280,8 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | |||
1302 | } else { | 1280 | } else { |
1303 | cma_set_ip_ver(cma_data, 4); | 1281 | cma_set_ip_ver(cma_data, 4); |
1304 | cma_set_ip_ver(cma_mask, 0xF); | 1282 | cma_set_ip_ver(cma_mask, 0xF); |
1305 | if (!cma_any_addr(addr)) { | 1283 | cma_data->dst_addr.ip4.addr = ip4_addr; |
1306 | cma_data->dst_addr.ip4.addr = ip4_addr; | 1284 | cma_mask->dst_addr.ip4.addr = htonl(~0); |
1307 | cma_mask->dst_addr.ip4.addr = htonl(~0); | ||
1308 | } | ||
1309 | } | 1285 | } |
1310 | break; | 1286 | break; |
1311 | case AF_INET6: | 1287 | case AF_INET6: |
@@ -1319,11 +1295,9 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, | |||
1319 | } else { | 1295 | } else { |
1320 | cma_set_ip_ver(cma_data, 6); | 1296 | cma_set_ip_ver(cma_data, 6); |
1321 | cma_set_ip_ver(cma_mask, 0xF); | 1297 | cma_set_ip_ver(cma_mask, 0xF); |
1322 | if (!cma_any_addr(addr)) { | 1298 | cma_data->dst_addr.ip6 = ip6_addr; |
1323 | cma_data->dst_addr.ip6 = ip6_addr; | 1299 | memset(&cma_mask->dst_addr.ip6, 0xFF, |
1324 | memset(&cma_mask->dst_addr.ip6, 0xFF, | 1300 | sizeof cma_mask->dst_addr.ip6); |
1325 | sizeof cma_mask->dst_addr.ip6); | ||
1326 | } | ||
1327 | } | 1301 | } |
1328 | break; | 1302 | break; |
1329 | default: | 1303 | default: |
@@ -1354,8 +1328,6 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1354 | switch (iw_event->status) { | 1328 | switch (iw_event->status) { |
1355 | case 0: | 1329 | case 0: |
1356 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1330 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1357 | event.param.conn.initiator_depth = iw_event->ird; | ||
1358 | event.param.conn.responder_resources = iw_event->ord; | ||
1359 | break; | 1331 | break; |
1360 | case -ECONNRESET: | 1332 | case -ECONNRESET: |
1361 | case -ECONNREFUSED: | 1333 | case -ECONNREFUSED: |
@@ -1371,8 +1343,6 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1371 | break; | 1343 | break; |
1372 | case IW_CM_EVENT_ESTABLISHED: | 1344 | case IW_CM_EVENT_ESTABLISHED: |
1373 | event.event = RDMA_CM_EVENT_ESTABLISHED; | 1345 | event.event = RDMA_CM_EVENT_ESTABLISHED; |
1374 | event.param.conn.initiator_depth = iw_event->ird; | ||
1375 | event.param.conn.responder_resources = iw_event->ord; | ||
1376 | break; | 1346 | break; |
1377 | default: | 1347 | default: |
1378 | BUG_ON(1); | 1348 | BUG_ON(1); |
@@ -1463,8 +1433,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1463 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1433 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1464 | event.param.conn.private_data = iw_event->private_data; | 1434 | event.param.conn.private_data = iw_event->private_data; |
1465 | event.param.conn.private_data_len = iw_event->private_data_len; | 1435 | event.param.conn.private_data_len = iw_event->private_data_len; |
1466 | event.param.conn.initiator_depth = iw_event->ird; | 1436 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; |
1467 | event.param.conn.responder_resources = iw_event->ord; | 1437 | event.param.conn.responder_resources = attr.max_qp_rd_atom; |
1468 | 1438 | ||
1469 | /* | 1439 | /* |
1470 | * Protect against the user destroying conn_id from another thread | 1440 | * Protect against the user destroying conn_id from another thread |
@@ -1508,7 +1478,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) | |||
1508 | 1478 | ||
1509 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | 1479 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; |
1510 | svc_id = cma_get_service_id(id_priv->id.ps, addr); | 1480 | svc_id = cma_get_service_id(id_priv->id.ps, addr); |
1511 | if (cma_any_addr(addr) && !id_priv->afonly) | 1481 | if (cma_any_addr(addr)) |
1512 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); | 1482 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); |
1513 | else { | 1483 | else { |
1514 | cma_set_compare_data(id_priv->id.ps, addr, &compare_data); | 1484 | cma_set_compare_data(id_priv->id.ps, addr, &compare_data); |
@@ -1582,7 +1552,6 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1582 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); | 1552 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
1583 | atomic_inc(&id_priv->refcount); | 1553 | atomic_inc(&id_priv->refcount); |
1584 | dev_id_priv->internal_id = 1; | 1554 | dev_id_priv->internal_id = 1; |
1585 | dev_id_priv->afonly = id_priv->afonly; | ||
1586 | 1555 | ||
1587 | ret = rdma_listen(id, id_priv->backlog); | 1556 | ret = rdma_listen(id, id_priv->backlog); |
1588 | if (ret) | 1557 | if (ret) |
@@ -1839,10 +1808,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
1839 | route->path_rec->reversible = 1; | 1808 | route->path_rec->reversible = 1; |
1840 | route->path_rec->pkey = cpu_to_be16(0xffff); | 1809 | route->path_rec->pkey = cpu_to_be16(0xffff); |
1841 | route->path_rec->mtu_selector = IB_SA_EQ; | 1810 | route->path_rec->mtu_selector = IB_SA_EQ; |
1842 | route->path_rec->sl = netdev_get_prio_tc_map( | 1811 | route->path_rec->sl = id_priv->tos >> 5; |
1843 | ndev->priv_flags & IFF_802_1Q_VLAN ? | ||
1844 | vlan_dev_real_dev(ndev) : ndev, | ||
1845 | rt_tos2priority(id_priv->tos)); | ||
1846 | 1812 | ||
1847 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); | 1813 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); |
1848 | route->path_rec->rate_selector = IB_SA_EQ; | 1814 | route->path_rec->rate_selector = IB_SA_EQ; |
@@ -2021,11 +1987,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
2021 | if (cma_zero_addr(src)) { | 1987 | if (cma_zero_addr(src)) { |
2022 | dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; | 1988 | dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr; |
2023 | if ((src->sa_family = dst->sa_family) == AF_INET) { | 1989 | if ((src->sa_family = dst->sa_family) == AF_INET) { |
2024 | ((struct sockaddr_in *)src)->sin_addr = | 1990 | ((struct sockaddr_in *) src)->sin_addr.s_addr = |
2025 | ((struct sockaddr_in *)dst)->sin_addr; | 1991 | ((struct sockaddr_in *) dst)->sin_addr.s_addr; |
2026 | } else { | 1992 | } else { |
2027 | ((struct sockaddr_in6 *)src)->sin6_addr = | 1993 | ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr, |
2028 | ((struct sockaddr_in6 *)dst)->sin6_addr; | 1994 | &((struct sockaddr_in6 *) dst)->sin6_addr); |
2029 | } | 1995 | } |
2030 | } | 1996 | } |
2031 | 1997 | ||
@@ -2108,26 +2074,6 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | |||
2108 | } | 2074 | } |
2109 | EXPORT_SYMBOL(rdma_set_reuseaddr); | 2075 | EXPORT_SYMBOL(rdma_set_reuseaddr); |
2110 | 2076 | ||
2111 | int rdma_set_afonly(struct rdma_cm_id *id, int afonly) | ||
2112 | { | ||
2113 | struct rdma_id_private *id_priv; | ||
2114 | unsigned long flags; | ||
2115 | int ret; | ||
2116 | |||
2117 | id_priv = container_of(id, struct rdma_id_private, id); | ||
2118 | spin_lock_irqsave(&id_priv->lock, flags); | ||
2119 | if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { | ||
2120 | id_priv->options |= (1 << CMA_OPTION_AFONLY); | ||
2121 | id_priv->afonly = afonly; | ||
2122 | ret = 0; | ||
2123 | } else { | ||
2124 | ret = -EINVAL; | ||
2125 | } | ||
2126 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
2127 | return ret; | ||
2128 | } | ||
2129 | EXPORT_SYMBOL(rdma_set_afonly); | ||
2130 | |||
2131 | static void cma_bind_port(struct rdma_bind_list *bind_list, | 2077 | static void cma_bind_port(struct rdma_bind_list *bind_list, |
2132 | struct rdma_id_private *id_priv) | 2078 | struct rdma_id_private *id_priv) |
2133 | { | 2079 | { |
@@ -2217,24 +2163,22 @@ static int cma_check_port(struct rdma_bind_list *bind_list, | |||
2217 | struct hlist_node *node; | 2163 | struct hlist_node *node; |
2218 | 2164 | ||
2219 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; | 2165 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; |
2166 | if (cma_any_addr(addr) && !reuseaddr) | ||
2167 | return -EADDRNOTAVAIL; | ||
2168 | |||
2220 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | 2169 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { |
2221 | if (id_priv == cur_id) | 2170 | if (id_priv == cur_id) |
2222 | continue; | 2171 | continue; |
2223 | 2172 | ||
2224 | if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr && | 2173 | if ((cur_id->state == RDMA_CM_LISTEN) || |
2225 | cur_id->reuseaddr) | 2174 | !reuseaddr || !cur_id->reuseaddr) { |
2226 | continue; | 2175 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; |
2227 | 2176 | if (cma_any_addr(cur_addr)) | |
2228 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | 2177 | return -EADDRNOTAVAIL; |
2229 | if (id_priv->afonly && cur_id->afonly && | ||
2230 | (addr->sa_family != cur_addr->sa_family)) | ||
2231 | continue; | ||
2232 | |||
2233 | if (cma_any_addr(addr) || cma_any_addr(cur_addr)) | ||
2234 | return -EADDRNOTAVAIL; | ||
2235 | 2178 | ||
2236 | if (!cma_addr_cmp(addr, cur_addr)) | 2179 | if (!cma_addr_cmp(addr, cur_addr)) |
2237 | return -EADDRINUSE; | 2180 | return -EADDRINUSE; |
2181 | } | ||
2238 | } | 2182 | } |
2239 | return 0; | 2183 | return 0; |
2240 | } | 2184 | } |
@@ -2290,9 +2234,6 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2290 | case RDMA_PS_IPOIB: | 2234 | case RDMA_PS_IPOIB: |
2291 | ps = &ipoib_ps; | 2235 | ps = &ipoib_ps; |
2292 | break; | 2236 | break; |
2293 | case RDMA_PS_IB: | ||
2294 | ps = &ib_ps; | ||
2295 | break; | ||
2296 | default: | 2237 | default: |
2297 | return -EPROTONOSUPPORT; | 2238 | return -EPROTONOSUPPORT; |
2298 | } | 2239 | } |
@@ -2310,7 +2251,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2310 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, | 2251 | static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, |
2311 | struct sockaddr *addr) | 2252 | struct sockaddr *addr) |
2312 | { | 2253 | { |
2313 | #if IS_ENABLED(CONFIG_IPV6) | 2254 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
2314 | struct sockaddr_in6 *sin6; | 2255 | struct sockaddr_in6 *sin6; |
2315 | 2256 | ||
2316 | if (addr->sa_family != AF_INET6) | 2257 | if (addr->sa_family != AF_INET6) |
@@ -2403,14 +2344,6 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2403 | } | 2344 | } |
2404 | 2345 | ||
2405 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); | 2346 | memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr)); |
2406 | if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { | ||
2407 | if (addr->sa_family == AF_INET) | ||
2408 | id_priv->afonly = 1; | ||
2409 | #if IS_ENABLED(CONFIG_IPV6) | ||
2410 | else if (addr->sa_family == AF_INET6) | ||
2411 | id_priv->afonly = init_net.ipv6.sysctl.bindv6only; | ||
2412 | #endif | ||
2413 | } | ||
2414 | ret = cma_get_port(id_priv); | 2347 | ret = cma_get_port(id_priv); |
2415 | if (ret) | 2348 | if (ret) |
2416 | goto err2; | 2349 | goto err2; |
@@ -2559,9 +2492,6 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
2559 | 2492 | ||
2560 | req.private_data_len = sizeof(struct cma_hdr) + | 2493 | req.private_data_len = sizeof(struct cma_hdr) + |
2561 | conn_param->private_data_len; | 2494 | conn_param->private_data_len; |
2562 | if (req.private_data_len < conn_param->private_data_len) | ||
2563 | return -EINVAL; | ||
2564 | |||
2565 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | 2495 | req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); |
2566 | if (!req.private_data) | 2496 | if (!req.private_data) |
2567 | return -ENOMEM; | 2497 | return -ENOMEM; |
@@ -2611,9 +2541,6 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2611 | memset(&req, 0, sizeof req); | 2541 | memset(&req, 0, sizeof req); |
2612 | offset = cma_user_data_offset(id_priv->id.ps); | 2542 | offset = cma_user_data_offset(id_priv->id.ps); |
2613 | req.private_data_len = offset + conn_param->private_data_len; | 2543 | req.private_data_len = offset + conn_param->private_data_len; |
2614 | if (req.private_data_len < conn_param->private_data_len) | ||
2615 | return -EINVAL; | ||
2616 | |||
2617 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); | 2544 | private_data = kzalloc(req.private_data_len, GFP_ATOMIC); |
2618 | if (!private_data) | 2545 | if (!private_data) |
2619 | return -ENOMEM; | 2546 | return -ENOMEM; |
@@ -2642,13 +2569,13 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2642 | req.service_id = cma_get_service_id(id_priv->id.ps, | 2569 | req.service_id = cma_get_service_id(id_priv->id.ps, |
2643 | (struct sockaddr *) &route->addr.dst_addr); | 2570 | (struct sockaddr *) &route->addr.dst_addr); |
2644 | req.qp_num = id_priv->qp_num; | 2571 | req.qp_num = id_priv->qp_num; |
2645 | req.qp_type = id_priv->id.qp_type; | 2572 | req.qp_type = IB_QPT_RC; |
2646 | req.starting_psn = id_priv->seq_num; | 2573 | req.starting_psn = id_priv->seq_num; |
2647 | req.responder_resources = conn_param->responder_resources; | 2574 | req.responder_resources = conn_param->responder_resources; |
2648 | req.initiator_depth = conn_param->initiator_depth; | 2575 | req.initiator_depth = conn_param->initiator_depth; |
2649 | req.flow_control = conn_param->flow_control; | 2576 | req.flow_control = conn_param->flow_control; |
2650 | req.retry_count = min_t(u8, 7, conn_param->retry_count); | 2577 | req.retry_count = conn_param->retry_count; |
2651 | req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); | 2578 | req.rnr_retry_count = conn_param->rnr_retry_count; |
2652 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; | 2579 | req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
2653 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; | 2580 | req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT; |
2654 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | 2581 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
@@ -2689,16 +2616,14 @@ static int cma_connect_iw(struct rdma_id_private *id_priv, | |||
2689 | if (ret) | 2616 | if (ret) |
2690 | goto out; | 2617 | goto out; |
2691 | 2618 | ||
2692 | if (conn_param) { | 2619 | iw_param.ord = conn_param->initiator_depth; |
2693 | iw_param.ord = conn_param->initiator_depth; | 2620 | iw_param.ird = conn_param->responder_resources; |
2694 | iw_param.ird = conn_param->responder_resources; | 2621 | iw_param.private_data = conn_param->private_data; |
2695 | iw_param.private_data = conn_param->private_data; | 2622 | iw_param.private_data_len = conn_param->private_data_len; |
2696 | iw_param.private_data_len = conn_param->private_data_len; | 2623 | if (id_priv->id.qp) |
2697 | iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; | ||
2698 | } else { | ||
2699 | memset(&iw_param, 0, sizeof iw_param); | ||
2700 | iw_param.qpn = id_priv->qp_num; | 2624 | iw_param.qpn = id_priv->qp_num; |
2701 | } | 2625 | else |
2626 | iw_param.qpn = conn_param->qp_num; | ||
2702 | ret = iw_cm_connect(cm_id, &iw_param); | 2627 | ret = iw_cm_connect(cm_id, &iw_param); |
2703 | out: | 2628 | out: |
2704 | if (ret) { | 2629 | if (ret) { |
@@ -2769,7 +2694,7 @@ static int cma_accept_ib(struct rdma_id_private *id_priv, | |||
2769 | rep.initiator_depth = conn_param->initiator_depth; | 2694 | rep.initiator_depth = conn_param->initiator_depth; |
2770 | rep.failover_accepted = 0; | 2695 | rep.failover_accepted = 0; |
2771 | rep.flow_control = conn_param->flow_control; | 2696 | rep.flow_control = conn_param->flow_control; |
2772 | rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); | 2697 | rep.rnr_retry_count = conn_param->rnr_retry_count; |
2773 | rep.srq = id_priv->srq ? 1 : 0; | 2698 | rep.srq = id_priv->srq ? 1 : 0; |
2774 | 2699 | ||
2775 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); | 2700 | ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); |
@@ -2840,20 +2765,14 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2840 | 2765 | ||
2841 | switch (rdma_node_get_transport(id->device->node_type)) { | 2766 | switch (rdma_node_get_transport(id->device->node_type)) { |
2842 | case RDMA_TRANSPORT_IB: | 2767 | case RDMA_TRANSPORT_IB: |
2843 | if (id->qp_type == IB_QPT_UD) { | 2768 | if (id->qp_type == IB_QPT_UD) |
2844 | if (conn_param) | 2769 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2845 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2770 | conn_param->private_data, |
2846 | conn_param->private_data, | 2771 | conn_param->private_data_len); |
2847 | conn_param->private_data_len); | 2772 | else if (conn_param) |
2848 | else | 2773 | ret = cma_accept_ib(id_priv, conn_param); |
2849 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2774 | else |
2850 | NULL, 0); | 2775 | ret = cma_rep_recv(id_priv); |
2851 | } else { | ||
2852 | if (conn_param) | ||
2853 | ret = cma_accept_ib(id_priv, conn_param); | ||
2854 | else | ||
2855 | ret = cma_rep_recv(id_priv); | ||
2856 | } | ||
2857 | break; | 2776 | break; |
2858 | case RDMA_TRANSPORT_IWARP: | 2777 | case RDMA_TRANSPORT_IWARP: |
2859 | ret = cma_accept_iw(id_priv, conn_param); | 2778 | ret = cma_accept_iw(id_priv, conn_param); |
@@ -2972,7 +2891,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2972 | mutex_lock(&id_priv->qp_mutex); | 2891 | mutex_lock(&id_priv->qp_mutex); |
2973 | if (!status && id_priv->id.qp) | 2892 | if (!status && id_priv->id.qp) |
2974 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, | 2893 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, |
2975 | be16_to_cpu(multicast->rec.mlid)); | 2894 | multicast->rec.mlid); |
2976 | mutex_unlock(&id_priv->qp_mutex); | 2895 | mutex_unlock(&id_priv->qp_mutex); |
2977 | 2896 | ||
2978 | memset(&event, 0, sizeof event); | 2897 | memset(&event, 0, sizeof event); |
@@ -3057,16 +2976,16 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
3057 | 2976 | ||
3058 | if (id_priv->id.ps == RDMA_PS_IPOIB) | 2977 | if (id_priv->id.ps == RDMA_PS_IPOIB) |
3059 | comp_mask |= IB_SA_MCMEMBER_REC_RATE | | 2978 | comp_mask |= IB_SA_MCMEMBER_REC_RATE | |
3060 | IB_SA_MCMEMBER_REC_RATE_SELECTOR | | 2979 | IB_SA_MCMEMBER_REC_RATE_SELECTOR; |
3061 | IB_SA_MCMEMBER_REC_MTU_SELECTOR | | ||
3062 | IB_SA_MCMEMBER_REC_MTU | | ||
3063 | IB_SA_MCMEMBER_REC_HOP_LIMIT; | ||
3064 | 2980 | ||
3065 | mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, | 2981 | mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, |
3066 | id_priv->id.port_num, &rec, | 2982 | id_priv->id.port_num, &rec, |
3067 | comp_mask, GFP_KERNEL, | 2983 | comp_mask, GFP_KERNEL, |
3068 | cma_ib_mc_handler, mc); | 2984 | cma_ib_mc_handler, mc); |
3069 | return PTR_RET(mc->multicast.ib); | 2985 | if (IS_ERR(mc->multicast.ib)) |
2986 | return PTR_ERR(mc->multicast.ib); | ||
2987 | |||
2988 | return 0; | ||
3070 | } | 2989 | } |
3071 | 2990 | ||
3072 | static void iboe_mcast_work_handler(struct work_struct *work) | 2991 | static void iboe_mcast_work_handler(struct work_struct *work) |
@@ -3233,7 +3152,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) | |||
3233 | if (id->qp) | 3152 | if (id->qp) |
3234 | ib_detach_mcast(id->qp, | 3153 | ib_detach_mcast(id->qp, |
3235 | &mc->multicast.ib->rec.mgid, | 3154 | &mc->multicast.ib->rec.mgid, |
3236 | be16_to_cpu(mc->multicast.ib->rec.mlid)); | 3155 | mc->multicast.ib->rec.mlid); |
3237 | if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { | 3156 | if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { |
3238 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { | 3157 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
3239 | case IB_LINK_LAYER_INFINIBAND: | 3158 | case IB_LINK_LAYER_INFINIBAND: |
@@ -3497,8 +3416,7 @@ out: | |||
3497 | } | 3416 | } |
3498 | 3417 | ||
3499 | static const struct ibnl_client_cbs cma_cb_table[] = { | 3418 | static const struct ibnl_client_cbs cma_cb_table[] = { |
3500 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats, | 3419 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, |
3501 | .module = THIS_MODULE }, | ||
3502 | }; | 3420 | }; |
3503 | 3421 | ||
3504 | static int __init cma_init(void) | 3422 | static int __init cma_init(void) |
@@ -3542,7 +3460,6 @@ static void __exit cma_cleanup(void) | |||
3542 | idr_destroy(&tcp_ps); | 3460 | idr_destroy(&tcp_ps); |
3543 | idr_destroy(&udp_ps); | 3461 | idr_destroy(&udp_ps); |
3544 | idr_destroy(&ipoib_ps); | 3462 | idr_destroy(&ipoib_ps); |
3545 | idr_destroy(&ib_ps); | ||
3546 | } | 3463 | } |
3547 | 3464 | ||
3548 | module_init(cma_init); | 3465 | module_init(cma_init); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 18c1ece765f..e711de400a0 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -707,28 +707,18 @@ int ib_find_pkey(struct ib_device *device, | |||
707 | { | 707 | { |
708 | int ret, i; | 708 | int ret, i; |
709 | u16 tmp_pkey; | 709 | u16 tmp_pkey; |
710 | int partial_ix = -1; | ||
711 | 710 | ||
712 | for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { | 711 | for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { |
713 | ret = ib_query_pkey(device, port_num, i, &tmp_pkey); | 712 | ret = ib_query_pkey(device, port_num, i, &tmp_pkey); |
714 | if (ret) | 713 | if (ret) |
715 | return ret; | 714 | return ret; |
715 | |||
716 | if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { | 716 | if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { |
717 | /* if there is full-member pkey take it.*/ | 717 | *index = i; |
718 | if (tmp_pkey & 0x8000) { | 718 | return 0; |
719 | *index = i; | ||
720 | return 0; | ||
721 | } | ||
722 | if (partial_ix < 0) | ||
723 | partial_ix = i; | ||
724 | } | 719 | } |
725 | } | 720 | } |
726 | 721 | ||
727 | /*no full-member, if exists take the limited*/ | ||
728 | if (partial_ix >= 0) { | ||
729 | *index = partial_ix; | ||
730 | return 0; | ||
731 | } | ||
732 | return -ENOENT; | 722 | return -ENOENT; |
733 | } | 723 | } |
734 | EXPORT_SYMBOL(ib_find_pkey); | 724 | EXPORT_SYMBOL(ib_find_pkey); |
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c index 176c8f90f2b..4507043d24c 100644 --- a/drivers/infiniband/core/fmr_pool.c +++ b/drivers/infiniband/core/fmr_pool.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
36 | #include <linux/export.h> | ||
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | #include <linux/jhash.h> | 37 | #include <linux/jhash.h> |
39 | #include <linux/kthread.h> | 38 | #include <linux/kthread.h> |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 0bb99bb3880..a9c042345c6 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <linux/workqueue.h> | 45 | #include <linux/workqueue.h> |
46 | #include <linux/completion.h> | 46 | #include <linux/completion.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | #include <linux/module.h> | ||
49 | 48 | ||
50 | #include <rdma/iw_cm.h> | 49 | #include <rdma/iw_cm.h> |
51 | #include <rdma/ib_addr.h> | 50 | #include <rdma/ib_addr.h> |
@@ -624,6 +623,17 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
624 | */ | 623 | */ |
625 | BUG_ON(iw_event->status); | 624 | BUG_ON(iw_event->status); |
626 | 625 | ||
626 | /* | ||
627 | * We could be destroying the listening id. If so, ignore this | ||
628 | * upcall. | ||
629 | */ | ||
630 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
631 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
632 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
633 | goto out; | ||
634 | } | ||
635 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
636 | |||
627 | cm_id = iw_create_cm_id(listen_id_priv->id.device, | 637 | cm_id = iw_create_cm_id(listen_id_priv->id.device, |
628 | listen_id_priv->id.cm_handler, | 638 | listen_id_priv->id.cm_handler, |
629 | listen_id_priv->id.context); | 639 | listen_id_priv->id.context); |
@@ -638,19 +648,6 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, | |||
638 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); | 648 | cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); |
639 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; | 649 | cm_id_priv->state = IW_CM_STATE_CONN_RECV; |
640 | 650 | ||
641 | /* | ||
642 | * We could be destroying the listening id. If so, ignore this | ||
643 | * upcall. | ||
644 | */ | ||
645 | spin_lock_irqsave(&listen_id_priv->lock, flags); | ||
646 | if (listen_id_priv->state != IW_CM_STATE_LISTEN) { | ||
647 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
648 | iw_cm_reject(cm_id, NULL, 0); | ||
649 | iw_destroy_cm_id(cm_id); | ||
650 | goto out; | ||
651 | } | ||
652 | spin_unlock_irqrestore(&listen_id_priv->lock, flags); | ||
653 | |||
654 | ret = alloc_work_entries(cm_id_priv, 3); | 651 | ret = alloc_work_entries(cm_id_priv, 3); |
655 | if (ret) { | 652 | if (ret) { |
656 | iw_cm_reject(cm_id, NULL, 0); | 653 | iw_cm_reject(cm_id, NULL, 0); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index dc3fd1e8af0..b4d8672a3e4 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -35,7 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/module.h> | ||
39 | #include <rdma/ib_cache.h> | 38 | #include <rdma/ib_cache.h> |
40 | 39 | ||
41 | #include "mad_priv.h" | 40 | #include "mad_priv.h" |
@@ -1597,9 +1596,6 @@ find_mad_agent(struct ib_mad_port_private *port_priv, | |||
1597 | mad->mad_hdr.class_version].class; | 1596 | mad->mad_hdr.class_version].class; |
1598 | if (!class) | 1597 | if (!class) |
1599 | goto out; | 1598 | goto out; |
1600 | if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= | ||
1601 | IB_MGMT_MAX_METHODS) | ||
1602 | goto out; | ||
1603 | method = class->method_table[convert_mgmt_class( | 1599 | method = class->method_table[convert_mgmt_class( |
1604 | mad->mad_hdr.mgmt_class)]; | 1600 | mad->mad_hdr.mgmt_class)]; |
1605 | if (method) | 1601 | if (method) |
@@ -1842,26 +1838,6 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, | |||
1842 | } | 1838 | } |
1843 | } | 1839 | } |
1844 | 1840 | ||
1845 | static bool generate_unmatched_resp(struct ib_mad_private *recv, | ||
1846 | struct ib_mad_private *response) | ||
1847 | { | ||
1848 | if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || | ||
1849 | recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { | ||
1850 | memcpy(response, recv, sizeof *response); | ||
1851 | response->header.recv_wc.wc = &response->header.wc; | ||
1852 | response->header.recv_wc.recv_buf.mad = &response->mad.mad; | ||
1853 | response->header.recv_wc.recv_buf.grh = &response->grh; | ||
1854 | response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; | ||
1855 | response->mad.mad.mad_hdr.status = | ||
1856 | cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); | ||
1857 | if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) | ||
1858 | response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; | ||
1859 | |||
1860 | return true; | ||
1861 | } else { | ||
1862 | return false; | ||
1863 | } | ||
1864 | } | ||
1865 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | 1841 | static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, |
1866 | struct ib_wc *wc) | 1842 | struct ib_wc *wc) |
1867 | { | 1843 | { |
@@ -1871,7 +1847,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1871 | struct ib_mad_list_head *mad_list; | 1847 | struct ib_mad_list_head *mad_list; |
1872 | struct ib_mad_agent_private *mad_agent; | 1848 | struct ib_mad_agent_private *mad_agent; |
1873 | int port_num; | 1849 | int port_num; |
1874 | int ret = IB_MAD_RESULT_SUCCESS; | ||
1875 | 1850 | ||
1876 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; | 1851 | mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; |
1877 | qp_info = mad_list->mad_queue->qp_info; | 1852 | qp_info = mad_list->mad_queue->qp_info; |
@@ -1955,6 +1930,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, | |||
1955 | local: | 1930 | local: |
1956 | /* Give driver "right of first refusal" on incoming MAD */ | 1931 | /* Give driver "right of first refusal" on incoming MAD */ |
1957 | if (port_priv->device->process_mad) { | 1932 | if (port_priv->device->process_mad) { |
1933 | int ret; | ||
1934 | |||
1958 | ret = port_priv->device->process_mad(port_priv->device, 0, | 1935 | ret = port_priv->device->process_mad(port_priv->device, 0, |
1959 | port_priv->port_num, | 1936 | port_priv->port_num, |
1960 | wc, &recv->grh, | 1937 | wc, &recv->grh, |
@@ -1982,10 +1959,6 @@ local: | |||
1982 | * or via recv_handler in ib_mad_complete_recv() | 1959 | * or via recv_handler in ib_mad_complete_recv() |
1983 | */ | 1960 | */ |
1984 | recv = NULL; | 1961 | recv = NULL; |
1985 | } else if ((ret & IB_MAD_RESULT_SUCCESS) && | ||
1986 | generate_unmatched_resp(recv, response)) { | ||
1987 | agent_send_response(&response->mad.mad, &recv->grh, wc, | ||
1988 | port_priv->device, port_num, qp_info->qp->qp_num); | ||
1989 | } | 1962 | } |
1990 | 1963 | ||
1991 | out: | 1964 | out: |
@@ -2004,7 +1977,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |||
2004 | unsigned long delay; | 1977 | unsigned long delay; |
2005 | 1978 | ||
2006 | if (list_empty(&mad_agent_priv->wait_list)) { | 1979 | if (list_empty(&mad_agent_priv->wait_list)) { |
2007 | cancel_delayed_work(&mad_agent_priv->timed_work); | 1980 | __cancel_delayed_work(&mad_agent_priv->timed_work); |
2008 | } else { | 1981 | } else { |
2009 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, | 1982 | mad_send_wr = list_entry(mad_agent_priv->wait_list.next, |
2010 | struct ib_mad_send_wr_private, | 1983 | struct ib_mad_send_wr_private, |
@@ -2013,11 +1986,13 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv) | |||
2013 | if (time_after(mad_agent_priv->timeout, | 1986 | if (time_after(mad_agent_priv->timeout, |
2014 | mad_send_wr->timeout)) { | 1987 | mad_send_wr->timeout)) { |
2015 | mad_agent_priv->timeout = mad_send_wr->timeout; | 1988 | mad_agent_priv->timeout = mad_send_wr->timeout; |
1989 | __cancel_delayed_work(&mad_agent_priv->timed_work); | ||
2016 | delay = mad_send_wr->timeout - jiffies; | 1990 | delay = mad_send_wr->timeout - jiffies; |
2017 | if ((long)delay <= 0) | 1991 | if ((long)delay <= 0) |
2018 | delay = 1; | 1992 | delay = 1; |
2019 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | 1993 | queue_delayed_work(mad_agent_priv->qp_info-> |
2020 | &mad_agent_priv->timed_work, delay); | 1994 | port_priv->wq, |
1995 | &mad_agent_priv->timed_work, delay); | ||
2021 | } | 1996 | } |
2022 | } | 1997 | } |
2023 | } | 1998 | } |
@@ -2050,9 +2025,11 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr) | |||
2050 | list_add(&mad_send_wr->agent_list, list_item); | 2025 | list_add(&mad_send_wr->agent_list, list_item); |
2051 | 2026 | ||
2052 | /* Reschedule a work item if we have a shorter timeout */ | 2027 | /* Reschedule a work item if we have a shorter timeout */ |
2053 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) | 2028 | if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { |
2054 | mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, | 2029 | __cancel_delayed_work(&mad_agent_priv->timed_work); |
2055 | &mad_agent_priv->timed_work, delay); | 2030 | queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, |
2031 | &mad_agent_priv->timed_work, delay); | ||
2032 | } | ||
2056 | } | 2033 | } |
2057 | 2034 | ||
2058 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, | 2035 | void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, |
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index d2360a8ef0b..68b4162fd9d 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | #include <linux/err.h> | 35 | #include <linux/err.h> |
36 | #include <linux/interrupt.h> | 36 | #include <linux/interrupt.h> |
37 | #include <linux/export.h> | ||
38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
39 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
40 | #include <linux/random.h> | 39 | #include <linux/random.h> |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index da06abde9e0..9227f4acd79 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ | 33 | #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ |
34 | 34 | ||
35 | #include <linux/export.h> | ||
36 | #include <net/netlink.h> | 35 | #include <net/netlink.h> |
37 | #include <net/net_namespace.h> | 36 | #include <net/net_namespace.h> |
38 | #include <net/sock.h> | 37 | #include <net/sock.h> |
@@ -108,14 +107,12 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | |||
108 | unsigned char *prev_tail; | 107 | unsigned char *prev_tail; |
109 | 108 | ||
110 | prev_tail = skb_tail_pointer(skb); | 109 | prev_tail = skb_tail_pointer(skb); |
111 | *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), | 110 | *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), |
112 | len, NLM_F_MULTI); | 111 | len, NLM_F_MULTI); |
113 | if (!*nlh) | ||
114 | goto out_nlmsg_trim; | ||
115 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; | 112 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; |
116 | return nlmsg_data(*nlh); | 113 | return NLMSG_DATA(*nlh); |
117 | 114 | ||
118 | out_nlmsg_trim: | 115 | nlmsg_failure: |
119 | nlmsg_trim(skb, prev_tail); | 116 | nlmsg_trim(skb, prev_tail); |
120 | return NULL; | 117 | return NULL; |
121 | } | 118 | } |
@@ -127,8 +124,7 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
127 | unsigned char *prev_tail; | 124 | unsigned char *prev_tail; |
128 | 125 | ||
129 | prev_tail = skb_tail_pointer(skb); | 126 | prev_tail = skb_tail_pointer(skb); |
130 | if (nla_put(skb, type, len, data)) | 127 | NLA_PUT(skb, type, len, data); |
131 | goto nla_put_failure; | ||
132 | nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; | 128 | nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; |
133 | return 0; | 129 | return 0; |
134 | 130 | ||
@@ -150,14 +146,9 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
150 | if (op < 0 || op >= client->nops || | 146 | if (op < 0 || op >= client->nops || |
151 | !client->cb_table[RDMA_NL_GET_OP(op)].dump) | 147 | !client->cb_table[RDMA_NL_GET_OP(op)].dump) |
152 | return -EINVAL; | 148 | return -EINVAL; |
153 | 149 | return netlink_dump_start(nls, skb, nlh, | |
154 | { | 150 | client->cb_table[op].dump, |
155 | struct netlink_dump_control c = { | 151 | NULL, 0); |
156 | .dump = client->cb_table[op].dump, | ||
157 | .module = client->cb_table[op].module, | ||
158 | }; | ||
159 | return netlink_dump_start(nls, skb, nlh, &c); | ||
160 | } | ||
161 | } | 152 | } |
162 | } | 153 | } |
163 | 154 | ||
@@ -174,11 +165,8 @@ static void ibnl_rcv(struct sk_buff *skb) | |||
174 | 165 | ||
175 | int __init ibnl_init(void) | 166 | int __init ibnl_init(void) |
176 | { | 167 | { |
177 | struct netlink_kernel_cfg cfg = { | 168 | nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, |
178 | .input = ibnl_rcv, | 169 | NULL, THIS_MODULE); |
179 | }; | ||
180 | |||
181 | nls = netlink_kernel_create(&init_net, NETLINK_RDMA, &cfg); | ||
182 | if (!nls) { | 170 | if (!nls) { |
183 | pr_warn("Failed to create netlink socket\n"); | 171 | pr_warn("Failed to create netlink socket\n"); |
184 | return -ENOMEM; | 172 | return -ENOMEM; |
diff --git a/drivers/infiniband/core/packer.c b/drivers/infiniband/core/packer.c index 1b65986c0be..019bd4b0863 100644 --- a/drivers/infiniband/core/packer.c +++ b/drivers/infiniband/core/packer.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/export.h> | ||
35 | #include <linux/string.h> | 34 | #include <linux/string.h> |
36 | 35 | ||
37 | #include <rdma/ib_pack.h> | 36 | #include <rdma/ib_pack.h> |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index a8905abc56e..fbbfa24cf57 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -94,12 +94,6 @@ struct ib_sa_path_query { | |||
94 | struct ib_sa_query sa_query; | 94 | struct ib_sa_query sa_query; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | struct ib_sa_guidinfo_query { | ||
98 | void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); | ||
99 | void *context; | ||
100 | struct ib_sa_query sa_query; | ||
101 | }; | ||
102 | |||
103 | struct ib_sa_mcmember_query { | 97 | struct ib_sa_mcmember_query { |
104 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); | 98 | void (*callback)(int, struct ib_sa_mcmember_rec *, void *); |
105 | void *context; | 99 | void *context; |
@@ -353,34 +347,6 @@ static const struct ib_field service_rec_table[] = { | |||
353 | .size_bits = 2*64 }, | 347 | .size_bits = 2*64 }, |
354 | }; | 348 | }; |
355 | 349 | ||
356 | #define GUIDINFO_REC_FIELD(field) \ | ||
357 | .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ | ||
358 | .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ | ||
359 | .field_name = "sa_guidinfo_rec:" #field | ||
360 | |||
361 | static const struct ib_field guidinfo_rec_table[] = { | ||
362 | { GUIDINFO_REC_FIELD(lid), | ||
363 | .offset_words = 0, | ||
364 | .offset_bits = 0, | ||
365 | .size_bits = 16 }, | ||
366 | { GUIDINFO_REC_FIELD(block_num), | ||
367 | .offset_words = 0, | ||
368 | .offset_bits = 16, | ||
369 | .size_bits = 8 }, | ||
370 | { GUIDINFO_REC_FIELD(res1), | ||
371 | .offset_words = 0, | ||
372 | .offset_bits = 24, | ||
373 | .size_bits = 8 }, | ||
374 | { GUIDINFO_REC_FIELD(res2), | ||
375 | .offset_words = 1, | ||
376 | .offset_bits = 0, | ||
377 | .size_bits = 32 }, | ||
378 | { GUIDINFO_REC_FIELD(guid_info_list), | ||
379 | .offset_words = 2, | ||
380 | .offset_bits = 0, | ||
381 | .size_bits = 512 }, | ||
382 | }; | ||
383 | |||
384 | static void free_sm_ah(struct kref *kref) | 350 | static void free_sm_ah(struct kref *kref) |
385 | { | 351 | { |
386 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); | 352 | struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); |
@@ -979,105 +945,6 @@ err1: | |||
979 | return ret; | 945 | return ret; |
980 | } | 946 | } |
981 | 947 | ||
982 | /* Support GuidInfoRecord */ | ||
983 | static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, | ||
984 | int status, | ||
985 | struct ib_sa_mad *mad) | ||
986 | { | ||
987 | struct ib_sa_guidinfo_query *query = | ||
988 | container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); | ||
989 | |||
990 | if (mad) { | ||
991 | struct ib_sa_guidinfo_rec rec; | ||
992 | |||
993 | ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), | ||
994 | mad->data, &rec); | ||
995 | query->callback(status, &rec, query->context); | ||
996 | } else | ||
997 | query->callback(status, NULL, query->context); | ||
998 | } | ||
999 | |||
1000 | static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) | ||
1001 | { | ||
1002 | kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); | ||
1003 | } | ||
1004 | |||
1005 | int ib_sa_guid_info_rec_query(struct ib_sa_client *client, | ||
1006 | struct ib_device *device, u8 port_num, | ||
1007 | struct ib_sa_guidinfo_rec *rec, | ||
1008 | ib_sa_comp_mask comp_mask, u8 method, | ||
1009 | int timeout_ms, gfp_t gfp_mask, | ||
1010 | void (*callback)(int status, | ||
1011 | struct ib_sa_guidinfo_rec *resp, | ||
1012 | void *context), | ||
1013 | void *context, | ||
1014 | struct ib_sa_query **sa_query) | ||
1015 | { | ||
1016 | struct ib_sa_guidinfo_query *query; | ||
1017 | struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); | ||
1018 | struct ib_sa_port *port; | ||
1019 | struct ib_mad_agent *agent; | ||
1020 | struct ib_sa_mad *mad; | ||
1021 | int ret; | ||
1022 | |||
1023 | if (!sa_dev) | ||
1024 | return -ENODEV; | ||
1025 | |||
1026 | if (method != IB_MGMT_METHOD_GET && | ||
1027 | method != IB_MGMT_METHOD_SET && | ||
1028 | method != IB_SA_METHOD_DELETE) { | ||
1029 | return -EINVAL; | ||
1030 | } | ||
1031 | |||
1032 | port = &sa_dev->port[port_num - sa_dev->start_port]; | ||
1033 | agent = port->agent; | ||
1034 | |||
1035 | query = kmalloc(sizeof *query, gfp_mask); | ||
1036 | if (!query) | ||
1037 | return -ENOMEM; | ||
1038 | |||
1039 | query->sa_query.port = port; | ||
1040 | ret = alloc_mad(&query->sa_query, gfp_mask); | ||
1041 | if (ret) | ||
1042 | goto err1; | ||
1043 | |||
1044 | ib_sa_client_get(client); | ||
1045 | query->sa_query.client = client; | ||
1046 | query->callback = callback; | ||
1047 | query->context = context; | ||
1048 | |||
1049 | mad = query->sa_query.mad_buf->mad; | ||
1050 | init_mad(mad, agent); | ||
1051 | |||
1052 | query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; | ||
1053 | query->sa_query.release = ib_sa_guidinfo_rec_release; | ||
1054 | |||
1055 | mad->mad_hdr.method = method; | ||
1056 | mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); | ||
1057 | mad->sa_hdr.comp_mask = comp_mask; | ||
1058 | |||
1059 | ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, | ||
1060 | mad->data); | ||
1061 | |||
1062 | *sa_query = &query->sa_query; | ||
1063 | |||
1064 | ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); | ||
1065 | if (ret < 0) | ||
1066 | goto err2; | ||
1067 | |||
1068 | return ret; | ||
1069 | |||
1070 | err2: | ||
1071 | *sa_query = NULL; | ||
1072 | ib_sa_client_put(query->sa_query.client); | ||
1073 | free_mad(&query->sa_query); | ||
1074 | |||
1075 | err1: | ||
1076 | kfree(query); | ||
1077 | return ret; | ||
1078 | } | ||
1079 | EXPORT_SYMBOL(ib_sa_guid_info_rec_query); | ||
1080 | |||
1081 | static void send_handler(struct ib_mad_agent *agent, | 948 | static void send_handler(struct ib_mad_agent *agent, |
1082 | struct ib_mad_send_wc *mad_send_wc) | 949 | struct ib_mad_send_wc *mad_send_wc) |
1083 | { | 950 | { |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 246fdc15165..9ab5df72df7 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include "core_priv.h" | 35 | #include "core_priv.h" |
36 | 36 | ||
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/stat.h> | ||
39 | #include <linux/string.h> | 38 | #include <linux/string.h> |
40 | 39 | ||
41 | #include <rdma/ib_mad.h> | 40 | #include <rdma/ib_mad.h> |
@@ -179,7 +178,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
179 | { | 178 | { |
180 | struct ib_port_attr attr; | 179 | struct ib_port_attr attr; |
181 | char *speed = ""; | 180 | char *speed = ""; |
182 | int rate; /* in deci-Gb/sec */ | 181 | int rate; |
183 | ssize_t ret; | 182 | ssize_t ret; |
184 | 183 | ||
185 | ret = ib_query_port(p->ibdev, p->port_num, &attr); | 184 | ret = ib_query_port(p->ibdev, p->port_num, &attr); |
@@ -187,33 +186,11 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, | |||
187 | return ret; | 186 | return ret; |
188 | 187 | ||
189 | switch (attr.active_speed) { | 188 | switch (attr.active_speed) { |
190 | case IB_SPEED_DDR: | 189 | case 2: speed = " DDR"; break; |
191 | speed = " DDR"; | 190 | case 4: speed = " QDR"; break; |
192 | rate = 50; | ||
193 | break; | ||
194 | case IB_SPEED_QDR: | ||
195 | speed = " QDR"; | ||
196 | rate = 100; | ||
197 | break; | ||
198 | case IB_SPEED_FDR10: | ||
199 | speed = " FDR10"; | ||
200 | rate = 100; | ||
201 | break; | ||
202 | case IB_SPEED_FDR: | ||
203 | speed = " FDR"; | ||
204 | rate = 140; | ||
205 | break; | ||
206 | case IB_SPEED_EDR: | ||
207 | speed = " EDR"; | ||
208 | rate = 250; | ||
209 | break; | ||
210 | case IB_SPEED_SDR: | ||
211 | default: /* default to SDR for invalid rates */ | ||
212 | rate = 25; | ||
213 | break; | ||
214 | } | 191 | } |
215 | 192 | ||
216 | rate *= ib_width_enum_to_int(attr.active_width); | 193 | rate = 25 * ib_width_enum_to_int(attr.active_width) * attr.active_speed; |
217 | if (rate < 0) | 194 | if (rate < 0) |
218 | return -EINVAL; | 195 | return -EINVAL; |
219 | 196 | ||
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 49b15ac1987..08f948df8fa 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -106,6 +106,9 @@ enum { | |||
106 | IB_UCM_MAX_DEVICES = 32 | 106 | IB_UCM_MAX_DEVICES = 32 |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */ | ||
110 | extern struct class cm_class; | ||
111 | |||
109 | #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR) | 112 | #define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR) |
110 | 113 | ||
111 | static void ib_ucm_add_one(struct ib_device *device); | 114 | static void ib_ucm_add_one(struct ib_device *device); |
@@ -397,6 +400,7 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file, | |||
397 | struct ib_ucm_event_get cmd; | 400 | struct ib_ucm_event_get cmd; |
398 | struct ib_ucm_event *uevent; | 401 | struct ib_ucm_event *uevent; |
399 | int result = 0; | 402 | int result = 0; |
403 | DEFINE_WAIT(wait); | ||
400 | 404 | ||
401 | if (out_len < sizeof(struct ib_ucm_event_resp)) | 405 | if (out_len < sizeof(struct ib_ucm_event_resp)) |
402 | return -ENOSPC; | 406 | return -ENOSPC; |
@@ -1118,7 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | |||
1118 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1122 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1119 | return -EFAULT; | 1123 | return -EFAULT; |
1120 | 1124 | ||
1121 | if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) | 1125 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) |
1122 | return -EINVAL; | 1126 | return -EINVAL; |
1123 | 1127 | ||
1124 | if (hdr.in + sizeof(hdr) > len) | 1128 | if (hdr.in + sizeof(hdr) > len) |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2709ff58139..71be5eebd68 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/sysctl.h> | 43 | #include <linux/sysctl.h> |
44 | #include <linux/module.h> | ||
45 | 44 | ||
46 | #include <rdma/rdma_user_cm.h> | 45 | #include <rdma/rdma_user_cm.h> |
47 | #include <rdma/ib_marshall.h> | 46 | #include <rdma/ib_marshall.h> |
@@ -66,6 +65,12 @@ static ctl_table ucma_ctl_table[] = { | |||
66 | { } | 65 | { } |
67 | }; | 66 | }; |
68 | 67 | ||
68 | static struct ctl_path ucma_ctl_path[] = { | ||
69 | { .procname = "net" }, | ||
70 | { .procname = "rdma_ucm" }, | ||
71 | { } | ||
72 | }; | ||
73 | |||
69 | struct ucma_file { | 74 | struct ucma_file { |
70 | struct mutex mut; | 75 | struct mutex mut; |
71 | struct file *filp; | 76 | struct file *filp; |
@@ -267,17 +272,17 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, | |||
267 | if (!uevent) | 272 | if (!uevent) |
268 | return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; | 273 | return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; |
269 | 274 | ||
270 | mutex_lock(&ctx->file->mut); | ||
271 | uevent->cm_id = cm_id; | 275 | uevent->cm_id = cm_id; |
272 | ucma_set_event_context(ctx, event, uevent); | 276 | ucma_set_event_context(ctx, event, uevent); |
273 | uevent->resp.event = event->event; | 277 | uevent->resp.event = event->event; |
274 | uevent->resp.status = event->status; | 278 | uevent->resp.status = event->status; |
275 | if (cm_id->qp_type == IB_QPT_UD) | 279 | if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB) |
276 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); | 280 | ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); |
277 | else | 281 | else |
278 | ucma_copy_conn_event(&uevent->resp.param.conn, | 282 | ucma_copy_conn_event(&uevent->resp.param.conn, |
279 | &event->param.conn); | 283 | &event->param.conn); |
280 | 284 | ||
285 | mutex_lock(&ctx->file->mut); | ||
281 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { | 286 | if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { |
282 | if (!ctx->backlog) { | 287 | if (!ctx->backlog) { |
283 | ret = -ENOMEM; | 288 | ret = -ENOMEM; |
@@ -310,6 +315,7 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, | |||
310 | struct rdma_ucm_get_event cmd; | 315 | struct rdma_ucm_get_event cmd; |
311 | struct ucma_event *uevent; | 316 | struct ucma_event *uevent; |
312 | int ret = 0; | 317 | int ret = 0; |
318 | DEFINE_WAIT(wait); | ||
313 | 319 | ||
314 | if (out_len < sizeof uevent->resp) | 320 | if (out_len < sizeof uevent->resp) |
315 | return -ENOSPC; | 321 | return -ENOSPC; |
@@ -371,9 +377,6 @@ static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_ | |||
371 | case RDMA_PS_IPOIB: | 377 | case RDMA_PS_IPOIB: |
372 | *qp_type = IB_QPT_UD; | 378 | *qp_type = IB_QPT_UD; |
373 | return 0; | 379 | return 0; |
374 | case RDMA_PS_IB: | ||
375 | *qp_type = cmd->qp_type; | ||
376 | return 0; | ||
377 | default: | 380 | default: |
378 | return -EINVAL; | 381 | return -EINVAL; |
379 | } | 382 | } |
@@ -442,6 +445,24 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) | |||
442 | mutex_unlock(&mut); | 445 | mutex_unlock(&mut); |
443 | } | 446 | } |
444 | 447 | ||
448 | static void ucma_cleanup_events(struct ucma_context *ctx) | ||
449 | { | ||
450 | struct ucma_event *uevent, *tmp; | ||
451 | |||
452 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { | ||
453 | if (uevent->ctx != ctx) | ||
454 | continue; | ||
455 | |||
456 | list_del(&uevent->list); | ||
457 | |||
458 | /* clear incoming connections. */ | ||
459 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
460 | rdma_destroy_id(uevent->cm_id); | ||
461 | |||
462 | kfree(uevent); | ||
463 | } | ||
464 | } | ||
465 | |||
445 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | 466 | static void ucma_cleanup_mc_events(struct ucma_multicast *mc) |
446 | { | 467 | { |
447 | struct ucma_event *uevent, *tmp; | 468 | struct ucma_event *uevent, *tmp; |
@@ -455,16 +476,9 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) | |||
455 | } | 476 | } |
456 | } | 477 | } |
457 | 478 | ||
458 | /* | ||
459 | * We cannot hold file->mut when calling rdma_destroy_id() or we can | ||
460 | * deadlock. We also acquire file->mut in ucma_event_handler(), and | ||
461 | * rdma_destroy_id() will wait until all callbacks have completed. | ||
462 | */ | ||
463 | static int ucma_free_ctx(struct ucma_context *ctx) | 479 | static int ucma_free_ctx(struct ucma_context *ctx) |
464 | { | 480 | { |
465 | int events_reported; | 481 | int events_reported; |
466 | struct ucma_event *uevent, *tmp; | ||
467 | LIST_HEAD(list); | ||
468 | 482 | ||
469 | /* No new events will be generated after destroying the id. */ | 483 | /* No new events will be generated after destroying the id. */ |
470 | rdma_destroy_id(ctx->cm_id); | 484 | rdma_destroy_id(ctx->cm_id); |
@@ -473,20 +487,10 @@ static int ucma_free_ctx(struct ucma_context *ctx) | |||
473 | 487 | ||
474 | /* Cleanup events not yet reported to the user. */ | 488 | /* Cleanup events not yet reported to the user. */ |
475 | mutex_lock(&ctx->file->mut); | 489 | mutex_lock(&ctx->file->mut); |
476 | list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { | 490 | ucma_cleanup_events(ctx); |
477 | if (uevent->ctx == ctx) | ||
478 | list_move_tail(&uevent->list, &list); | ||
479 | } | ||
480 | list_del(&ctx->list); | 491 | list_del(&ctx->list); |
481 | mutex_unlock(&ctx->file->mut); | 492 | mutex_unlock(&ctx->file->mut); |
482 | 493 | ||
483 | list_for_each_entry_safe(uevent, tmp, &list, list) { | ||
484 | list_del(&uevent->list); | ||
485 | if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) | ||
486 | rdma_destroy_id(uevent->cm_id); | ||
487 | kfree(uevent); | ||
488 | } | ||
489 | |||
490 | events_reported = ctx->events_reported; | 494 | events_reported = ctx->events_reported; |
491 | kfree(ctx); | 495 | kfree(ctx); |
492 | return events_reported; | 496 | return events_reported; |
@@ -800,12 +804,9 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, | |||
800 | return PTR_ERR(ctx); | 804 | return PTR_ERR(ctx); |
801 | 805 | ||
802 | if (cmd.conn_param.valid) { | 806 | if (cmd.conn_param.valid) { |
807 | ctx->uid = cmd.uid; | ||
803 | ucma_copy_conn_param(&conn_param, &cmd.conn_param); | 808 | ucma_copy_conn_param(&conn_param, &cmd.conn_param); |
804 | mutex_lock(&file->mut); | ||
805 | ret = rdma_accept(ctx->cm_id, &conn_param); | 809 | ret = rdma_accept(ctx->cm_id, &conn_param); |
806 | if (!ret) | ||
807 | ctx->uid = cmd.uid; | ||
808 | mutex_unlock(&file->mut); | ||
809 | } else | 810 | } else |
810 | ret = rdma_accept(ctx->cm_id, NULL); | 811 | ret = rdma_accept(ctx->cm_id, NULL); |
811 | 812 | ||
@@ -908,13 +909,6 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, | |||
908 | } | 909 | } |
909 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); | 910 | ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); |
910 | break; | 911 | break; |
911 | case RDMA_OPTION_ID_AFONLY: | ||
912 | if (optlen != sizeof(int)) { | ||
913 | ret = -EINVAL; | ||
914 | break; | ||
915 | } | ||
916 | ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); | ||
917 | break; | ||
918 | default: | 912 | default: |
919 | ret = -ENOSYS; | 913 | ret = -ENOSYS; |
920 | } | 914 | } |
@@ -1001,18 +995,23 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, | |||
1001 | if (IS_ERR(ctx)) | 995 | if (IS_ERR(ctx)) |
1002 | return PTR_ERR(ctx); | 996 | return PTR_ERR(ctx); |
1003 | 997 | ||
1004 | optval = memdup_user((void __user *) (unsigned long) cmd.optval, | 998 | optval = kmalloc(cmd.optlen, GFP_KERNEL); |
1005 | cmd.optlen); | 999 | if (!optval) { |
1006 | if (IS_ERR(optval)) { | 1000 | ret = -ENOMEM; |
1007 | ret = PTR_ERR(optval); | 1001 | goto out1; |
1008 | goto out; | 1002 | } |
1003 | |||
1004 | if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval, | ||
1005 | cmd.optlen)) { | ||
1006 | ret = -EFAULT; | ||
1007 | goto out2; | ||
1009 | } | 1008 | } |
1010 | 1009 | ||
1011 | ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, | 1010 | ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, |
1012 | cmd.optlen); | 1011 | cmd.optlen); |
1012 | out2: | ||
1013 | kfree(optval); | 1013 | kfree(optval); |
1014 | 1014 | out1: | |
1015 | out: | ||
1016 | ucma_put_ctx(ctx); | 1015 | ucma_put_ctx(ctx); |
1017 | return ret; | 1016 | return ret; |
1018 | } | 1017 | } |
@@ -1183,7 +1182,7 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, | |||
1183 | struct rdma_ucm_migrate_id cmd; | 1182 | struct rdma_ucm_migrate_id cmd; |
1184 | struct rdma_ucm_migrate_resp resp; | 1183 | struct rdma_ucm_migrate_resp resp; |
1185 | struct ucma_context *ctx; | 1184 | struct ucma_context *ctx; |
1186 | struct fd f; | 1185 | struct file *filp; |
1187 | struct ucma_file *cur_file; | 1186 | struct ucma_file *cur_file; |
1188 | int ret = 0; | 1187 | int ret = 0; |
1189 | 1188 | ||
@@ -1191,12 +1190,12 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file, | |||
1191 | return -EFAULT; | 1190 | return -EFAULT; |
1192 | 1191 | ||
1193 | /* Get current fd to protect against it being closed */ | 1192 | /* Get current fd to protect against it being closed */ |
1194 | f = fdget(cmd.fd); | 1193 | filp = fget(cmd.fd); |
1195 | if (!f.file) | 1194 | if (!filp) |
1196 | return -ENOENT; | 1195 | return -ENOENT; |
1197 | 1196 | ||
1198 | /* Validate current fd and prevent destruction of id. */ | 1197 | /* Validate current fd and prevent destruction of id. */ |
1199 | ctx = ucma_get_ctx(f.file->private_data, cmd.id); | 1198 | ctx = ucma_get_ctx(filp->private_data, cmd.id); |
1200 | if (IS_ERR(ctx)) { | 1199 | if (IS_ERR(ctx)) { |
1201 | ret = PTR_ERR(ctx); | 1200 | ret = PTR_ERR(ctx); |
1202 | goto file_put; | 1201 | goto file_put; |
@@ -1230,7 +1229,7 @@ response: | |||
1230 | 1229 | ||
1231 | ucma_put_ctx(ctx); | 1230 | ucma_put_ctx(ctx); |
1232 | file_put: | 1231 | file_put: |
1233 | fdput(f); | 1232 | fput(filp); |
1234 | return ret; | 1233 | return ret; |
1235 | } | 1234 | } |
1236 | 1235 | ||
@@ -1271,7 +1270,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, | |||
1271 | if (copy_from_user(&hdr, buf, sizeof(hdr))) | 1270 | if (copy_from_user(&hdr, buf, sizeof(hdr))) |
1272 | return -EFAULT; | 1271 | return -EFAULT; |
1273 | 1272 | ||
1274 | if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) | 1273 | if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) |
1275 | return -EINVAL; | 1274 | return -EINVAL; |
1276 | 1275 | ||
1277 | if (hdr.in + sizeof(hdr) > len) | 1276 | if (hdr.in + sizeof(hdr) > len) |
@@ -1387,7 +1386,7 @@ static int __init ucma_init(void) | |||
1387 | goto err1; | 1386 | goto err1; |
1388 | } | 1387 | } |
1389 | 1388 | ||
1390 | ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table); | 1389 | ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); |
1391 | if (!ucma_ctl_table_hdr) { | 1390 | if (!ucma_ctl_table_hdr) { |
1392 | printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); | 1391 | printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); |
1393 | ret = -ENOMEM; | 1392 | ret = -ENOMEM; |
@@ -1403,7 +1402,7 @@ err1: | |||
1403 | 1402 | ||
1404 | static void __exit ucma_cleanup(void) | 1403 | static void __exit ucma_cleanup(void) |
1405 | { | 1404 | { |
1406 | unregister_net_sysctl_table(ucma_ctl_table_hdr); | 1405 | unregister_sysctl_table(ucma_ctl_table_hdr); |
1407 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | 1406 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); |
1408 | misc_deregister(&ucma_misc); | 1407 | misc_deregister(&ucma_misc); |
1409 | idr_destroy(&ctx_idr); | 1408 | idr_destroy(&ctx_idr); |
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index 72feee620eb..9b737ff133e 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c | |||
@@ -33,7 +33,6 @@ | |||
33 | 33 | ||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/export.h> | ||
37 | #include <linux/if_ether.h> | 36 | #include <linux/if_ether.h> |
38 | 37 | ||
39 | #include <rdma/ib_pack.h> | 38 | #include <rdma/ib_pack.h> |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index a8411232207..b645e558876 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/export.h> | ||
39 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
40 | #include <linux/dma-attrs.h> | 39 | #include <linux/dma-attrs.h> |
41 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
@@ -137,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
137 | 136 | ||
138 | down_write(¤t->mm->mmap_sem); | 137 | down_write(¤t->mm->mmap_sem); |
139 | 138 | ||
140 | locked = npages + current->mm->pinned_vm; | 139 | locked = npages + current->mm->locked_vm; |
141 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; | 140 | lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
142 | 141 | ||
143 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { | 142 | if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { |
@@ -207,7 +206,7 @@ out: | |||
207 | __ib_umem_release(context->device, umem, 0); | 206 | __ib_umem_release(context->device, umem, 0); |
208 | kfree(umem); | 207 | kfree(umem); |
209 | } else | 208 | } else |
210 | current->mm->pinned_vm = locked; | 209 | current->mm->locked_vm = locked; |
211 | 210 | ||
212 | up_write(¤t->mm->mmap_sem); | 211 | up_write(¤t->mm->mmap_sem); |
213 | if (vma_list) | 212 | if (vma_list) |
@@ -223,7 +222,7 @@ static void ib_umem_account(struct work_struct *work) | |||
223 | struct ib_umem *umem = container_of(work, struct ib_umem, work); | 222 | struct ib_umem *umem = container_of(work, struct ib_umem, work); |
224 | 223 | ||
225 | down_write(&umem->mm->mmap_sem); | 224 | down_write(&umem->mm->mmap_sem); |
226 | umem->mm->pinned_vm -= umem->diff; | 225 | umem->mm->locked_vm -= umem->diff; |
227 | up_write(&umem->mm->mmap_sem); | 226 | up_write(&umem->mm->mmap_sem); |
228 | mmput(umem->mm); | 227 | mmput(umem->mm); |
229 | kfree(umem); | 228 | kfree(umem); |
@@ -269,7 +268,7 @@ void ib_umem_release(struct ib_umem *umem) | |||
269 | } else | 268 | } else |
270 | down_write(&mm->mmap_sem); | 269 | down_write(&mm->mmap_sem); |
271 | 270 | ||
272 | current->mm->pinned_vm -= diff; | 271 | current->mm->locked_vm -= diff; |
273 | up_write(&mm->mmap_sem); | 272 | up_write(&mm->mmap_sem); |
274 | mmput(mm); | 273 | mmput(mm); |
275 | kfree(umem); | 274 | kfree(umem); |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index f0d588f8859..8d261b6ea5f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -458,7 +458,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
458 | goto err; | 458 | goto err; |
459 | } | 459 | } |
460 | 460 | ||
461 | if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | 461 | if (packet->mad.hdr.id < 0 || |
462 | packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | ||
462 | ret = -EINVAL; | 463 | ret = -EINVAL; |
463 | goto err; | 464 | goto err; |
464 | } | 465 | } |
@@ -702,7 +703,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) | |||
702 | mutex_lock(&file->port->file_mutex); | 703 | mutex_lock(&file->port->file_mutex); |
703 | mutex_lock(&file->mutex); | 704 | mutex_lock(&file->mutex); |
704 | 705 | ||
705 | if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { | 706 | if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { |
706 | ret = -EINVAL; | 707 | ret = -EINVAL; |
707 | goto out; | 708 | goto out; |
708 | } | 709 | } |
@@ -1175,7 +1176,7 @@ static void ib_umad_remove_one(struct ib_device *device) | |||
1175 | kref_put(&umad_dev->ref, ib_umad_release_dev); | 1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); |
1176 | } | 1177 | } |
1177 | 1178 | ||
1178 | static char *umad_devnode(struct device *dev, umode_t *mode) | 1179 | static char *umad_devnode(struct device *dev, mode_t *mode) |
1179 | { | 1180 | { |
1180 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | 1181 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); |
1181 | } | 1182 | } |
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 5bcb2afd3dc..a078e5624d2 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h | |||
@@ -76,8 +76,6 @@ struct ib_uverbs_device { | |||
76 | struct ib_device *ib_dev; | 76 | struct ib_device *ib_dev; |
77 | int devnum; | 77 | int devnum; |
78 | struct cdev cdev; | 78 | struct cdev cdev; |
79 | struct rb_root xrcd_tree; | ||
80 | struct mutex xrcd_tree_mutex; | ||
81 | }; | 79 | }; |
82 | 80 | ||
83 | struct ib_uverbs_event_file { | 81 | struct ib_uverbs_event_file { |
@@ -122,16 +120,6 @@ struct ib_uevent_object { | |||
122 | u32 events_reported; | 120 | u32 events_reported; |
123 | }; | 121 | }; |
124 | 122 | ||
125 | struct ib_uxrcd_object { | ||
126 | struct ib_uobject uobject; | ||
127 | atomic_t refcnt; | ||
128 | }; | ||
129 | |||
130 | struct ib_usrq_object { | ||
131 | struct ib_uevent_object uevent; | ||
132 | struct ib_uxrcd_object *uxrcd; | ||
133 | }; | ||
134 | |||
135 | struct ib_uqp_object { | 123 | struct ib_uqp_object { |
136 | struct ib_uevent_object uevent; | 124 | struct ib_uevent_object uevent; |
137 | struct list_head mcast_list; | 125 | struct list_head mcast_list; |
@@ -154,7 +142,6 @@ extern struct idr ib_uverbs_ah_idr; | |||
154 | extern struct idr ib_uverbs_cq_idr; | 142 | extern struct idr ib_uverbs_cq_idr; |
155 | extern struct idr ib_uverbs_qp_idr; | 143 | extern struct idr ib_uverbs_qp_idr; |
156 | extern struct idr ib_uverbs_srq_idr; | 144 | extern struct idr ib_uverbs_srq_idr; |
157 | extern struct idr ib_uverbs_xrcd_idr; | ||
158 | 145 | ||
159 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); | 146 | void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); |
160 | 147 | ||
@@ -174,7 +161,6 @@ void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr); | |||
174 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); | 161 | void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr); |
175 | void ib_uverbs_event_handler(struct ib_event_handler *handler, | 162 | void ib_uverbs_event_handler(struct ib_event_handler *handler, |
176 | struct ib_event *event); | 163 | struct ib_event *event); |
177 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, struct ib_xrcd *xrcd); | ||
178 | 164 | ||
179 | #define IB_UVERBS_DECLARE_CMD(name) \ | 165 | #define IB_UVERBS_DECLARE_CMD(name) \ |
180 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ | 166 | ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \ |
@@ -195,7 +181,6 @@ IB_UVERBS_DECLARE_CMD(poll_cq); | |||
195 | IB_UVERBS_DECLARE_CMD(req_notify_cq); | 181 | IB_UVERBS_DECLARE_CMD(req_notify_cq); |
196 | IB_UVERBS_DECLARE_CMD(destroy_cq); | 182 | IB_UVERBS_DECLARE_CMD(destroy_cq); |
197 | IB_UVERBS_DECLARE_CMD(create_qp); | 183 | IB_UVERBS_DECLARE_CMD(create_qp); |
198 | IB_UVERBS_DECLARE_CMD(open_qp); | ||
199 | IB_UVERBS_DECLARE_CMD(query_qp); | 184 | IB_UVERBS_DECLARE_CMD(query_qp); |
200 | IB_UVERBS_DECLARE_CMD(modify_qp); | 185 | IB_UVERBS_DECLARE_CMD(modify_qp); |
201 | IB_UVERBS_DECLARE_CMD(destroy_qp); | 186 | IB_UVERBS_DECLARE_CMD(destroy_qp); |
@@ -210,8 +195,5 @@ IB_UVERBS_DECLARE_CMD(create_srq); | |||
210 | IB_UVERBS_DECLARE_CMD(modify_srq); | 195 | IB_UVERBS_DECLARE_CMD(modify_srq); |
211 | IB_UVERBS_DECLARE_CMD(query_srq); | 196 | IB_UVERBS_DECLARE_CMD(query_srq); |
212 | IB_UVERBS_DECLARE_CMD(destroy_srq); | 197 | IB_UVERBS_DECLARE_CMD(destroy_srq); |
213 | IB_UVERBS_DECLARE_CMD(create_xsrq); | ||
214 | IB_UVERBS_DECLARE_CMD(open_xrcd); | ||
215 | IB_UVERBS_DECLARE_CMD(close_xrcd); | ||
216 | 198 | ||
217 | #endif /* UVERBS_H */ | 199 | #endif /* UVERBS_H */ |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 0cb0007724a..c42699285f8 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -41,18 +41,12 @@ | |||
41 | 41 | ||
42 | #include "uverbs.h" | 42 | #include "uverbs.h" |
43 | 43 | ||
44 | struct uverbs_lock_class { | 44 | static struct lock_class_key pd_lock_key; |
45 | struct lock_class_key key; | 45 | static struct lock_class_key mr_lock_key; |
46 | char name[16]; | 46 | static struct lock_class_key cq_lock_key; |
47 | }; | 47 | static struct lock_class_key qp_lock_key; |
48 | 48 | static struct lock_class_key ah_lock_key; | |
49 | static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" }; | 49 | static struct lock_class_key srq_lock_key; |
50 | static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" }; | ||
51 | static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" }; | ||
52 | static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" }; | ||
53 | static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" }; | ||
54 | static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" }; | ||
55 | static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; | ||
56 | 50 | ||
57 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ | 51 | #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ |
58 | do { \ | 52 | do { \ |
@@ -88,13 +82,13 @@ static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" }; | |||
88 | */ | 82 | */ |
89 | 83 | ||
90 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, | 84 | static void init_uobj(struct ib_uobject *uobj, u64 user_handle, |
91 | struct ib_ucontext *context, struct uverbs_lock_class *c) | 85 | struct ib_ucontext *context, struct lock_class_key *key) |
92 | { | 86 | { |
93 | uobj->user_handle = user_handle; | 87 | uobj->user_handle = user_handle; |
94 | uobj->context = context; | 88 | uobj->context = context; |
95 | kref_init(&uobj->ref); | 89 | kref_init(&uobj->ref); |
96 | init_rwsem(&uobj->mutex); | 90 | init_rwsem(&uobj->mutex); |
97 | lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name); | 91 | lockdep_set_class(&uobj->mutex, key); |
98 | uobj->live = 0; | 92 | uobj->live = 0; |
99 | } | 93 | } |
100 | 94 | ||
@@ -246,24 +240,11 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context) | |||
246 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); | 240 | return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0); |
247 | } | 241 | } |
248 | 242 | ||
249 | static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context) | ||
250 | { | ||
251 | struct ib_uobject *uobj; | ||
252 | |||
253 | uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context); | ||
254 | return uobj ? uobj->object : NULL; | ||
255 | } | ||
256 | |||
257 | static void put_qp_read(struct ib_qp *qp) | 243 | static void put_qp_read(struct ib_qp *qp) |
258 | { | 244 | { |
259 | put_uobj_read(qp->uobject); | 245 | put_uobj_read(qp->uobject); |
260 | } | 246 | } |
261 | 247 | ||
262 | static void put_qp_write(struct ib_qp *qp) | ||
263 | { | ||
264 | put_uobj_write(qp->uobject); | ||
265 | } | ||
266 | |||
267 | static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) | 248 | static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context) |
268 | { | 249 | { |
269 | return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); | 250 | return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0); |
@@ -274,18 +255,6 @@ static void put_srq_read(struct ib_srq *srq) | |||
274 | put_uobj_read(srq->uobject); | 255 | put_uobj_read(srq->uobject); |
275 | } | 256 | } |
276 | 257 | ||
277 | static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context, | ||
278 | struct ib_uobject **uobj) | ||
279 | { | ||
280 | *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0); | ||
281 | return *uobj ? (*uobj)->object : NULL; | ||
282 | } | ||
283 | |||
284 | static void put_xrcd_read(struct ib_uobject *uobj) | ||
285 | { | ||
286 | put_uobj_read(uobj); | ||
287 | } | ||
288 | |||
289 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | 258 | ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, |
290 | const char __user *buf, | 259 | const char __user *buf, |
291 | int in_len, int out_len) | 260 | int in_len, int out_len) |
@@ -329,7 +298,6 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, | |||
329 | INIT_LIST_HEAD(&ucontext->qp_list); | 298 | INIT_LIST_HEAD(&ucontext->qp_list); |
330 | INIT_LIST_HEAD(&ucontext->srq_list); | 299 | INIT_LIST_HEAD(&ucontext->srq_list); |
331 | INIT_LIST_HEAD(&ucontext->ah_list); | 300 | INIT_LIST_HEAD(&ucontext->ah_list); |
332 | INIT_LIST_HEAD(&ucontext->xrcd_list); | ||
333 | ucontext->closing = 0; | 301 | ucontext->closing = 0; |
334 | 302 | ||
335 | resp.num_comp_vectors = file->device->num_comp_vectors; | 303 | resp.num_comp_vectors = file->device->num_comp_vectors; |
@@ -527,7 +495,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file, | |||
527 | if (!uobj) | 495 | if (!uobj) |
528 | return -ENOMEM; | 496 | return -ENOMEM; |
529 | 497 | ||
530 | init_uobj(uobj, 0, file->ucontext, &pd_lock_class); | 498 | init_uobj(uobj, 0, file->ucontext, &pd_lock_key); |
531 | down_write(&uobj->mutex); | 499 | down_write(&uobj->mutex); |
532 | 500 | ||
533 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, | 501 | pd = file->device->ib_dev->alloc_pd(file->device->ib_dev, |
@@ -611,305 +579,6 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file, | |||
611 | return in_len; | 579 | return in_len; |
612 | } | 580 | } |
613 | 581 | ||
614 | struct xrcd_table_entry { | ||
615 | struct rb_node node; | ||
616 | struct ib_xrcd *xrcd; | ||
617 | struct inode *inode; | ||
618 | }; | ||
619 | |||
620 | static int xrcd_table_insert(struct ib_uverbs_device *dev, | ||
621 | struct inode *inode, | ||
622 | struct ib_xrcd *xrcd) | ||
623 | { | ||
624 | struct xrcd_table_entry *entry, *scan; | ||
625 | struct rb_node **p = &dev->xrcd_tree.rb_node; | ||
626 | struct rb_node *parent = NULL; | ||
627 | |||
628 | entry = kmalloc(sizeof *entry, GFP_KERNEL); | ||
629 | if (!entry) | ||
630 | return -ENOMEM; | ||
631 | |||
632 | entry->xrcd = xrcd; | ||
633 | entry->inode = inode; | ||
634 | |||
635 | while (*p) { | ||
636 | parent = *p; | ||
637 | scan = rb_entry(parent, struct xrcd_table_entry, node); | ||
638 | |||
639 | if (inode < scan->inode) { | ||
640 | p = &(*p)->rb_left; | ||
641 | } else if (inode > scan->inode) { | ||
642 | p = &(*p)->rb_right; | ||
643 | } else { | ||
644 | kfree(entry); | ||
645 | return -EEXIST; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | rb_link_node(&entry->node, parent, p); | ||
650 | rb_insert_color(&entry->node, &dev->xrcd_tree); | ||
651 | igrab(inode); | ||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev, | ||
656 | struct inode *inode) | ||
657 | { | ||
658 | struct xrcd_table_entry *entry; | ||
659 | struct rb_node *p = dev->xrcd_tree.rb_node; | ||
660 | |||
661 | while (p) { | ||
662 | entry = rb_entry(p, struct xrcd_table_entry, node); | ||
663 | |||
664 | if (inode < entry->inode) | ||
665 | p = p->rb_left; | ||
666 | else if (inode > entry->inode) | ||
667 | p = p->rb_right; | ||
668 | else | ||
669 | return entry; | ||
670 | } | ||
671 | |||
672 | return NULL; | ||
673 | } | ||
674 | |||
675 | static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode) | ||
676 | { | ||
677 | struct xrcd_table_entry *entry; | ||
678 | |||
679 | entry = xrcd_table_search(dev, inode); | ||
680 | if (!entry) | ||
681 | return NULL; | ||
682 | |||
683 | return entry->xrcd; | ||
684 | } | ||
685 | |||
686 | static void xrcd_table_delete(struct ib_uverbs_device *dev, | ||
687 | struct inode *inode) | ||
688 | { | ||
689 | struct xrcd_table_entry *entry; | ||
690 | |||
691 | entry = xrcd_table_search(dev, inode); | ||
692 | if (entry) { | ||
693 | iput(inode); | ||
694 | rb_erase(&entry->node, &dev->xrcd_tree); | ||
695 | kfree(entry); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file, | ||
700 | const char __user *buf, int in_len, | ||
701 | int out_len) | ||
702 | { | ||
703 | struct ib_uverbs_open_xrcd cmd; | ||
704 | struct ib_uverbs_open_xrcd_resp resp; | ||
705 | struct ib_udata udata; | ||
706 | struct ib_uxrcd_object *obj; | ||
707 | struct ib_xrcd *xrcd = NULL; | ||
708 | struct fd f = {NULL, 0}; | ||
709 | struct inode *inode = NULL; | ||
710 | int ret = 0; | ||
711 | int new_xrcd = 0; | ||
712 | |||
713 | if (out_len < sizeof resp) | ||
714 | return -ENOSPC; | ||
715 | |||
716 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
717 | return -EFAULT; | ||
718 | |||
719 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
720 | (unsigned long) cmd.response + sizeof resp, | ||
721 | in_len - sizeof cmd, out_len - sizeof resp); | ||
722 | |||
723 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
724 | |||
725 | if (cmd.fd != -1) { | ||
726 | /* search for file descriptor */ | ||
727 | f = fdget(cmd.fd); | ||
728 | if (!f.file) { | ||
729 | ret = -EBADF; | ||
730 | goto err_tree_mutex_unlock; | ||
731 | } | ||
732 | |||
733 | inode = f.file->f_path.dentry->d_inode; | ||
734 | xrcd = find_xrcd(file->device, inode); | ||
735 | if (!xrcd && !(cmd.oflags & O_CREAT)) { | ||
736 | /* no file descriptor. Need CREATE flag */ | ||
737 | ret = -EAGAIN; | ||
738 | goto err_tree_mutex_unlock; | ||
739 | } | ||
740 | |||
741 | if (xrcd && cmd.oflags & O_EXCL) { | ||
742 | ret = -EINVAL; | ||
743 | goto err_tree_mutex_unlock; | ||
744 | } | ||
745 | } | ||
746 | |||
747 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
748 | if (!obj) { | ||
749 | ret = -ENOMEM; | ||
750 | goto err_tree_mutex_unlock; | ||
751 | } | ||
752 | |||
753 | init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class); | ||
754 | |||
755 | down_write(&obj->uobject.mutex); | ||
756 | |||
757 | if (!xrcd) { | ||
758 | xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev, | ||
759 | file->ucontext, &udata); | ||
760 | if (IS_ERR(xrcd)) { | ||
761 | ret = PTR_ERR(xrcd); | ||
762 | goto err; | ||
763 | } | ||
764 | |||
765 | xrcd->inode = inode; | ||
766 | xrcd->device = file->device->ib_dev; | ||
767 | atomic_set(&xrcd->usecnt, 0); | ||
768 | mutex_init(&xrcd->tgt_qp_mutex); | ||
769 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
770 | new_xrcd = 1; | ||
771 | } | ||
772 | |||
773 | atomic_set(&obj->refcnt, 0); | ||
774 | obj->uobject.object = xrcd; | ||
775 | ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
776 | if (ret) | ||
777 | goto err_idr; | ||
778 | |||
779 | memset(&resp, 0, sizeof resp); | ||
780 | resp.xrcd_handle = obj->uobject.id; | ||
781 | |||
782 | if (inode) { | ||
783 | if (new_xrcd) { | ||
784 | /* create new inode/xrcd table entry */ | ||
785 | ret = xrcd_table_insert(file->device, inode, xrcd); | ||
786 | if (ret) | ||
787 | goto err_insert_xrcd; | ||
788 | } | ||
789 | atomic_inc(&xrcd->usecnt); | ||
790 | } | ||
791 | |||
792 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
793 | &resp, sizeof resp)) { | ||
794 | ret = -EFAULT; | ||
795 | goto err_copy; | ||
796 | } | ||
797 | |||
798 | if (f.file) | ||
799 | fdput(f); | ||
800 | |||
801 | mutex_lock(&file->mutex); | ||
802 | list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list); | ||
803 | mutex_unlock(&file->mutex); | ||
804 | |||
805 | obj->uobject.live = 1; | ||
806 | up_write(&obj->uobject.mutex); | ||
807 | |||
808 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
809 | return in_len; | ||
810 | |||
811 | err_copy: | ||
812 | if (inode) { | ||
813 | if (new_xrcd) | ||
814 | xrcd_table_delete(file->device, inode); | ||
815 | atomic_dec(&xrcd->usecnt); | ||
816 | } | ||
817 | |||
818 | err_insert_xrcd: | ||
819 | idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject); | ||
820 | |||
821 | err_idr: | ||
822 | ib_dealloc_xrcd(xrcd); | ||
823 | |||
824 | err: | ||
825 | put_uobj_write(&obj->uobject); | ||
826 | |||
827 | err_tree_mutex_unlock: | ||
828 | if (f.file) | ||
829 | fdput(f); | ||
830 | |||
831 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
832 | |||
833 | return ret; | ||
834 | } | ||
835 | |||
836 | ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file, | ||
837 | const char __user *buf, int in_len, | ||
838 | int out_len) | ||
839 | { | ||
840 | struct ib_uverbs_close_xrcd cmd; | ||
841 | struct ib_uobject *uobj; | ||
842 | struct ib_xrcd *xrcd = NULL; | ||
843 | struct inode *inode = NULL; | ||
844 | struct ib_uxrcd_object *obj; | ||
845 | int live; | ||
846 | int ret = 0; | ||
847 | |||
848 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
849 | return -EFAULT; | ||
850 | |||
851 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
852 | uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext); | ||
853 | if (!uobj) { | ||
854 | ret = -EINVAL; | ||
855 | goto out; | ||
856 | } | ||
857 | |||
858 | xrcd = uobj->object; | ||
859 | inode = xrcd->inode; | ||
860 | obj = container_of(uobj, struct ib_uxrcd_object, uobject); | ||
861 | if (atomic_read(&obj->refcnt)) { | ||
862 | put_uobj_write(uobj); | ||
863 | ret = -EBUSY; | ||
864 | goto out; | ||
865 | } | ||
866 | |||
867 | if (!inode || atomic_dec_and_test(&xrcd->usecnt)) { | ||
868 | ret = ib_dealloc_xrcd(uobj->object); | ||
869 | if (!ret) | ||
870 | uobj->live = 0; | ||
871 | } | ||
872 | |||
873 | live = uobj->live; | ||
874 | if (inode && ret) | ||
875 | atomic_inc(&xrcd->usecnt); | ||
876 | |||
877 | put_uobj_write(uobj); | ||
878 | |||
879 | if (ret) | ||
880 | goto out; | ||
881 | |||
882 | if (inode && !live) | ||
883 | xrcd_table_delete(file->device, inode); | ||
884 | |||
885 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
886 | mutex_lock(&file->mutex); | ||
887 | list_del(&uobj->list); | ||
888 | mutex_unlock(&file->mutex); | ||
889 | |||
890 | put_uobj(uobj); | ||
891 | ret = in_len; | ||
892 | |||
893 | out: | ||
894 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
895 | return ret; | ||
896 | } | ||
897 | |||
898 | void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev, | ||
899 | struct ib_xrcd *xrcd) | ||
900 | { | ||
901 | struct inode *inode; | ||
902 | |||
903 | inode = xrcd->inode; | ||
904 | if (inode && !atomic_dec_and_test(&xrcd->usecnt)) | ||
905 | return; | ||
906 | |||
907 | ib_dealloc_xrcd(xrcd); | ||
908 | |||
909 | if (inode) | ||
910 | xrcd_table_delete(dev, inode); | ||
911 | } | ||
912 | |||
913 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | 582 | ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, |
914 | const char __user *buf, int in_len, | 583 | const char __user *buf, int in_len, |
915 | int out_len) | 584 | int out_len) |
@@ -947,7 +616,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, | |||
947 | if (!uobj) | 616 | if (!uobj) |
948 | return -ENOMEM; | 617 | return -ENOMEM; |
949 | 618 | ||
950 | init_uobj(uobj, 0, file->ucontext, &mr_lock_class); | 619 | init_uobj(uobj, 0, file->ucontext, &mr_lock_key); |
951 | down_write(&uobj->mutex); | 620 | down_write(&uobj->mutex); |
952 | 621 | ||
953 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 622 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -1115,7 +784,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, | |||
1115 | if (!obj) | 784 | if (!obj) |
1116 | return -ENOMEM; | 785 | return -ENOMEM; |
1117 | 786 | ||
1118 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class); | 787 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key); |
1119 | down_write(&obj->uobject.mutex); | 788 | down_write(&obj->uobject.mutex); |
1120 | 789 | ||
1121 | if (cmd.comp_channel >= 0) { | 790 | if (cmd.comp_channel >= 0) { |
@@ -1383,12 +1052,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1383 | struct ib_uverbs_create_qp_resp resp; | 1052 | struct ib_uverbs_create_qp_resp resp; |
1384 | struct ib_udata udata; | 1053 | struct ib_udata udata; |
1385 | struct ib_uqp_object *obj; | 1054 | struct ib_uqp_object *obj; |
1386 | struct ib_device *device; | 1055 | struct ib_pd *pd; |
1387 | struct ib_pd *pd = NULL; | 1056 | struct ib_cq *scq, *rcq; |
1388 | struct ib_xrcd *xrcd = NULL; | 1057 | struct ib_srq *srq; |
1389 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1390 | struct ib_cq *scq = NULL, *rcq = NULL; | ||
1391 | struct ib_srq *srq = NULL; | ||
1392 | struct ib_qp *qp; | 1058 | struct ib_qp *qp; |
1393 | struct ib_qp_init_attr attr; | 1059 | struct ib_qp_init_attr attr; |
1394 | int ret; | 1060 | int ret; |
@@ -1399,9 +1065,6 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1399 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 1065 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
1400 | return -EFAULT; | 1066 | return -EFAULT; |
1401 | 1067 | ||
1402 | if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW)) | ||
1403 | return -EPERM; | ||
1404 | |||
1405 | INIT_UDATA(&udata, buf + sizeof cmd, | 1068 | INIT_UDATA(&udata, buf + sizeof cmd, |
1406 | (unsigned long) cmd.response + sizeof resp, | 1069 | (unsigned long) cmd.response + sizeof resp, |
1407 | in_len - sizeof cmd, out_len - sizeof resp); | 1070 | in_len - sizeof cmd, out_len - sizeof resp); |
@@ -1410,46 +1073,18 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1410 | if (!obj) | 1073 | if (!obj) |
1411 | return -ENOMEM; | 1074 | return -ENOMEM; |
1412 | 1075 | ||
1413 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); | 1076 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key); |
1414 | down_write(&obj->uevent.uobject.mutex); | 1077 | down_write(&obj->uevent.uobject.mutex); |
1415 | 1078 | ||
1416 | if (cmd.qp_type == IB_QPT_XRC_TGT) { | 1079 | srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL; |
1417 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); | 1080 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
1418 | if (!xrcd) { | 1081 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0); |
1419 | ret = -EINVAL; | 1082 | rcq = cmd.recv_cq_handle == cmd.send_cq_handle ? |
1420 | goto err_put; | 1083 | scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1); |
1421 | } | ||
1422 | device = xrcd->device; | ||
1423 | } else { | ||
1424 | if (cmd.qp_type == IB_QPT_XRC_INI) { | ||
1425 | cmd.max_recv_wr = cmd.max_recv_sge = 0; | ||
1426 | } else { | ||
1427 | if (cmd.is_srq) { | ||
1428 | srq = idr_read_srq(cmd.srq_handle, file->ucontext); | ||
1429 | if (!srq || srq->srq_type != IB_SRQT_BASIC) { | ||
1430 | ret = -EINVAL; | ||
1431 | goto err_put; | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | if (cmd.recv_cq_handle != cmd.send_cq_handle) { | ||
1436 | rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0); | ||
1437 | if (!rcq) { | ||
1438 | ret = -EINVAL; | ||
1439 | goto err_put; | ||
1440 | } | ||
1441 | } | ||
1442 | } | ||
1443 | |||
1444 | scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq); | ||
1445 | rcq = rcq ?: scq; | ||
1446 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | ||
1447 | if (!pd || !scq) { | ||
1448 | ret = -EINVAL; | ||
1449 | goto err_put; | ||
1450 | } | ||
1451 | 1084 | ||
1452 | device = pd->device; | 1085 | if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) { |
1086 | ret = -EINVAL; | ||
1087 | goto err_put; | ||
1453 | } | 1088 | } |
1454 | 1089 | ||
1455 | attr.event_handler = ib_uverbs_qp_event_handler; | 1090 | attr.event_handler = ib_uverbs_qp_event_handler; |
@@ -1457,7 +1092,6 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1457 | attr.send_cq = scq; | 1092 | attr.send_cq = scq; |
1458 | attr.recv_cq = rcq; | 1093 | attr.recv_cq = rcq; |
1459 | attr.srq = srq; | 1094 | attr.srq = srq; |
1460 | attr.xrcd = xrcd; | ||
1461 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; | 1095 | attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; |
1462 | attr.qp_type = cmd.qp_type; | 1096 | attr.qp_type = cmd.qp_type; |
1463 | attr.create_flags = 0; | 1097 | attr.create_flags = 0; |
@@ -1472,35 +1106,26 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1472 | INIT_LIST_HEAD(&obj->uevent.event_list); | 1106 | INIT_LIST_HEAD(&obj->uevent.event_list); |
1473 | INIT_LIST_HEAD(&obj->mcast_list); | 1107 | INIT_LIST_HEAD(&obj->mcast_list); |
1474 | 1108 | ||
1475 | if (cmd.qp_type == IB_QPT_XRC_TGT) | 1109 | qp = pd->device->create_qp(pd, &attr, &udata); |
1476 | qp = ib_create_qp(pd, &attr); | ||
1477 | else | ||
1478 | qp = device->create_qp(pd, &attr, &udata); | ||
1479 | |||
1480 | if (IS_ERR(qp)) { | 1110 | if (IS_ERR(qp)) { |
1481 | ret = PTR_ERR(qp); | 1111 | ret = PTR_ERR(qp); |
1482 | goto err_put; | 1112 | goto err_put; |
1483 | } | 1113 | } |
1484 | 1114 | ||
1485 | if (cmd.qp_type != IB_QPT_XRC_TGT) { | 1115 | qp->device = pd->device; |
1486 | qp->real_qp = qp; | 1116 | qp->pd = pd; |
1487 | qp->device = device; | 1117 | qp->send_cq = attr.send_cq; |
1488 | qp->pd = pd; | 1118 | qp->recv_cq = attr.recv_cq; |
1489 | qp->send_cq = attr.send_cq; | 1119 | qp->srq = attr.srq; |
1490 | qp->recv_cq = attr.recv_cq; | 1120 | qp->uobject = &obj->uevent.uobject; |
1491 | qp->srq = attr.srq; | 1121 | qp->event_handler = attr.event_handler; |
1492 | qp->event_handler = attr.event_handler; | 1122 | qp->qp_context = attr.qp_context; |
1493 | qp->qp_context = attr.qp_context; | 1123 | qp->qp_type = attr.qp_type; |
1494 | qp->qp_type = attr.qp_type; | 1124 | atomic_inc(&pd->usecnt); |
1495 | atomic_set(&qp->usecnt, 0); | 1125 | atomic_inc(&attr.send_cq->usecnt); |
1496 | atomic_inc(&pd->usecnt); | 1126 | atomic_inc(&attr.recv_cq->usecnt); |
1497 | atomic_inc(&attr.send_cq->usecnt); | 1127 | if (attr.srq) |
1498 | if (attr.recv_cq) | 1128 | atomic_inc(&attr.srq->usecnt); |
1499 | atomic_inc(&attr.recv_cq->usecnt); | ||
1500 | if (attr.srq) | ||
1501 | atomic_inc(&attr.srq->usecnt); | ||
1502 | } | ||
1503 | qp->uobject = &obj->uevent.uobject; | ||
1504 | 1129 | ||
1505 | obj->uevent.uobject.object = qp; | 1130 | obj->uevent.uobject.object = qp; |
1506 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | 1131 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); |
@@ -1522,13 +1147,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file, | |||
1522 | goto err_copy; | 1147 | goto err_copy; |
1523 | } | 1148 | } |
1524 | 1149 | ||
1525 | if (xrcd) | 1150 | put_pd_read(pd); |
1526 | put_xrcd_read(xrcd_uobj); | 1151 | put_cq_read(scq); |
1527 | if (pd) | 1152 | if (rcq != scq) |
1528 | put_pd_read(pd); | ||
1529 | if (scq) | ||
1530 | put_cq_read(scq); | ||
1531 | if (rcq && rcq != scq) | ||
1532 | put_cq_read(rcq); | 1153 | put_cq_read(rcq); |
1533 | if (srq) | 1154 | if (srq) |
1534 | put_srq_read(srq); | 1155 | put_srq_read(srq); |
@@ -1550,8 +1171,6 @@ err_destroy: | |||
1550 | ib_destroy_qp(qp); | 1171 | ib_destroy_qp(qp); |
1551 | 1172 | ||
1552 | err_put: | 1173 | err_put: |
1553 | if (xrcd) | ||
1554 | put_xrcd_read(xrcd_uobj); | ||
1555 | if (pd) | 1174 | if (pd) |
1556 | put_pd_read(pd); | 1175 | put_pd_read(pd); |
1557 | if (scq) | 1176 | if (scq) |
@@ -1565,98 +1184,6 @@ err_put: | |||
1565 | return ret; | 1184 | return ret; |
1566 | } | 1185 | } |
1567 | 1186 | ||
1568 | ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file, | ||
1569 | const char __user *buf, int in_len, int out_len) | ||
1570 | { | ||
1571 | struct ib_uverbs_open_qp cmd; | ||
1572 | struct ib_uverbs_create_qp_resp resp; | ||
1573 | struct ib_udata udata; | ||
1574 | struct ib_uqp_object *obj; | ||
1575 | struct ib_xrcd *xrcd; | ||
1576 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
1577 | struct ib_qp *qp; | ||
1578 | struct ib_qp_open_attr attr; | ||
1579 | int ret; | ||
1580 | |||
1581 | if (out_len < sizeof resp) | ||
1582 | return -ENOSPC; | ||
1583 | |||
1584 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
1585 | return -EFAULT; | ||
1586 | |||
1587 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
1588 | (unsigned long) cmd.response + sizeof resp, | ||
1589 | in_len - sizeof cmd, out_len - sizeof resp); | ||
1590 | |||
1591 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | ||
1592 | if (!obj) | ||
1593 | return -ENOMEM; | ||
1594 | |||
1595 | init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class); | ||
1596 | down_write(&obj->uevent.uobject.mutex); | ||
1597 | |||
1598 | xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj); | ||
1599 | if (!xrcd) { | ||
1600 | ret = -EINVAL; | ||
1601 | goto err_put; | ||
1602 | } | ||
1603 | |||
1604 | attr.event_handler = ib_uverbs_qp_event_handler; | ||
1605 | attr.qp_context = file; | ||
1606 | attr.qp_num = cmd.qpn; | ||
1607 | attr.qp_type = cmd.qp_type; | ||
1608 | |||
1609 | obj->uevent.events_reported = 0; | ||
1610 | INIT_LIST_HEAD(&obj->uevent.event_list); | ||
1611 | INIT_LIST_HEAD(&obj->mcast_list); | ||
1612 | |||
1613 | qp = ib_open_qp(xrcd, &attr); | ||
1614 | if (IS_ERR(qp)) { | ||
1615 | ret = PTR_ERR(qp); | ||
1616 | goto err_put; | ||
1617 | } | ||
1618 | |||
1619 | qp->uobject = &obj->uevent.uobject; | ||
1620 | |||
1621 | obj->uevent.uobject.object = qp; | ||
1622 | ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1623 | if (ret) | ||
1624 | goto err_destroy; | ||
1625 | |||
1626 | memset(&resp, 0, sizeof resp); | ||
1627 | resp.qpn = qp->qp_num; | ||
1628 | resp.qp_handle = obj->uevent.uobject.id; | ||
1629 | |||
1630 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | ||
1631 | &resp, sizeof resp)) { | ||
1632 | ret = -EFAULT; | ||
1633 | goto err_remove; | ||
1634 | } | ||
1635 | |||
1636 | put_xrcd_read(xrcd_uobj); | ||
1637 | |||
1638 | mutex_lock(&file->mutex); | ||
1639 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list); | ||
1640 | mutex_unlock(&file->mutex); | ||
1641 | |||
1642 | obj->uevent.uobject.live = 1; | ||
1643 | |||
1644 | up_write(&obj->uevent.uobject.mutex); | ||
1645 | |||
1646 | return in_len; | ||
1647 | |||
1648 | err_remove: | ||
1649 | idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject); | ||
1650 | |||
1651 | err_destroy: | ||
1652 | ib_destroy_qp(qp); | ||
1653 | |||
1654 | err_put: | ||
1655 | put_xrcd_read(xrcd_uobj); | ||
1656 | put_uobj_write(&obj->uevent.uobject); | ||
1657 | return ret; | ||
1658 | } | ||
1659 | |||
1660 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, | 1187 | ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, |
1661 | const char __user *buf, int in_len, | 1188 | const char __user *buf, int in_len, |
1662 | int out_len) | 1189 | int out_len) |
@@ -1757,20 +1284,6 @@ out: | |||
1757 | return ret ? ret : in_len; | 1284 | return ret ? ret : in_len; |
1758 | } | 1285 | } |
1759 | 1286 | ||
1760 | /* Remove ignored fields set in the attribute mask */ | ||
1761 | static int modify_qp_mask(enum ib_qp_type qp_type, int mask) | ||
1762 | { | ||
1763 | switch (qp_type) { | ||
1764 | case IB_QPT_XRC_INI: | ||
1765 | return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER); | ||
1766 | case IB_QPT_XRC_TGT: | ||
1767 | return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT | | ||
1768 | IB_QP_RNR_RETRY); | ||
1769 | default: | ||
1770 | return mask; | ||
1771 | } | ||
1772 | } | ||
1773 | |||
1774 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | 1287 | ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, |
1775 | const char __user *buf, int in_len, | 1288 | const char __user *buf, int in_len, |
1776 | int out_len) | 1289 | int out_len) |
@@ -1843,12 +1356,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, | |||
1843 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; | 1356 | attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0; |
1844 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; | 1357 | attr->alt_ah_attr.port_num = cmd.alt_dest.port_num; |
1845 | 1358 | ||
1846 | if (qp->real_qp == qp) { | 1359 | ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata); |
1847 | ret = qp->device->modify_qp(qp, attr, | ||
1848 | modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); | ||
1849 | } else { | ||
1850 | ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); | ||
1851 | } | ||
1852 | 1360 | ||
1853 | put_qp_read(qp); | 1361 | put_qp_read(qp); |
1854 | 1362 | ||
@@ -2045,7 +1553,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, | |||
2045 | } | 1553 | } |
2046 | 1554 | ||
2047 | resp.bad_wr = 0; | 1555 | resp.bad_wr = 0; |
2048 | ret = qp->device->post_send(qp->real_qp, wr, &bad_wr); | 1556 | ret = qp->device->post_send(qp, wr, &bad_wr); |
2049 | if (ret) | 1557 | if (ret) |
2050 | for (next = wr; next; next = next->next) { | 1558 | for (next = wr; next; next = next->next) { |
2051 | ++resp.bad_wr; | 1559 | ++resp.bad_wr; |
@@ -2183,7 +1691,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file, | |||
2183 | goto out; | 1691 | goto out; |
2184 | 1692 | ||
2185 | resp.bad_wr = 0; | 1693 | resp.bad_wr = 0; |
2186 | ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr); | 1694 | ret = qp->device->post_recv(qp, wr, &bad_wr); |
2187 | 1695 | ||
2188 | put_qp_read(qp); | 1696 | put_qp_read(qp); |
2189 | 1697 | ||
@@ -2279,7 +1787,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file, | |||
2279 | if (!uobj) | 1787 | if (!uobj) |
2280 | return -ENOMEM; | 1788 | return -ENOMEM; |
2281 | 1789 | ||
2282 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class); | 1790 | init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key); |
2283 | down_write(&uobj->mutex); | 1791 | down_write(&uobj->mutex); |
2284 | 1792 | ||
2285 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); | 1793 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
@@ -2396,7 +1904,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, | |||
2396 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 1904 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2397 | return -EFAULT; | 1905 | return -EFAULT; |
2398 | 1906 | ||
2399 | qp = idr_write_qp(cmd.qp_handle, file->ucontext); | 1907 | qp = idr_read_qp(cmd.qp_handle, file->ucontext); |
2400 | if (!qp) | 1908 | if (!qp) |
2401 | return -EINVAL; | 1909 | return -EINVAL; |
2402 | 1910 | ||
@@ -2425,7 +1933,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file, | |||
2425 | kfree(mcast); | 1933 | kfree(mcast); |
2426 | 1934 | ||
2427 | out_put: | 1935 | out_put: |
2428 | put_qp_write(qp); | 1936 | put_qp_read(qp); |
2429 | 1937 | ||
2430 | return ret ? ret : in_len; | 1938 | return ret ? ret : in_len; |
2431 | } | 1939 | } |
@@ -2443,7 +1951,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, | |||
2443 | if (copy_from_user(&cmd, buf, sizeof cmd)) | 1951 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2444 | return -EFAULT; | 1952 | return -EFAULT; |
2445 | 1953 | ||
2446 | qp = idr_write_qp(cmd.qp_handle, file->ucontext); | 1954 | qp = idr_read_qp(cmd.qp_handle, file->ucontext); |
2447 | if (!qp) | 1955 | if (!qp) |
2448 | return -EINVAL; | 1956 | return -EINVAL; |
2449 | 1957 | ||
@@ -2462,122 +1970,100 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file, | |||
2462 | } | 1970 | } |
2463 | 1971 | ||
2464 | out_put: | 1972 | out_put: |
2465 | put_qp_write(qp); | 1973 | put_qp_read(qp); |
2466 | 1974 | ||
2467 | return ret ? ret : in_len; | 1975 | return ret ? ret : in_len; |
2468 | } | 1976 | } |
2469 | 1977 | ||
2470 | static int __uverbs_create_xsrq(struct ib_uverbs_file *file, | 1978 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, |
2471 | struct ib_uverbs_create_xsrq *cmd, | 1979 | const char __user *buf, int in_len, |
2472 | struct ib_udata *udata) | 1980 | int out_len) |
2473 | { | 1981 | { |
1982 | struct ib_uverbs_create_srq cmd; | ||
2474 | struct ib_uverbs_create_srq_resp resp; | 1983 | struct ib_uverbs_create_srq_resp resp; |
2475 | struct ib_usrq_object *obj; | 1984 | struct ib_udata udata; |
1985 | struct ib_uevent_object *obj; | ||
2476 | struct ib_pd *pd; | 1986 | struct ib_pd *pd; |
2477 | struct ib_srq *srq; | 1987 | struct ib_srq *srq; |
2478 | struct ib_uobject *uninitialized_var(xrcd_uobj); | ||
2479 | struct ib_srq_init_attr attr; | 1988 | struct ib_srq_init_attr attr; |
2480 | int ret; | 1989 | int ret; |
2481 | 1990 | ||
2482 | obj = kmalloc(sizeof *obj, GFP_KERNEL); | 1991 | if (out_len < sizeof resp) |
2483 | if (!obj) | 1992 | return -ENOSPC; |
2484 | return -ENOMEM; | ||
2485 | 1993 | ||
2486 | init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class); | 1994 | if (copy_from_user(&cmd, buf, sizeof cmd)) |
2487 | down_write(&obj->uevent.uobject.mutex); | 1995 | return -EFAULT; |
2488 | 1996 | ||
2489 | if (cmd->srq_type == IB_SRQT_XRC) { | 1997 | INIT_UDATA(&udata, buf + sizeof cmd, |
2490 | attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj); | 1998 | (unsigned long) cmd.response + sizeof resp, |
2491 | if (!attr.ext.xrc.xrcd) { | 1999 | in_len - sizeof cmd, out_len - sizeof resp); |
2492 | ret = -EINVAL; | ||
2493 | goto err; | ||
2494 | } | ||
2495 | 2000 | ||
2496 | obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject); | 2001 | obj = kmalloc(sizeof *obj, GFP_KERNEL); |
2497 | atomic_inc(&obj->uxrcd->refcnt); | 2002 | if (!obj) |
2003 | return -ENOMEM; | ||
2498 | 2004 | ||
2499 | attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0); | 2005 | init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key); |
2500 | if (!attr.ext.xrc.cq) { | 2006 | down_write(&obj->uobject.mutex); |
2501 | ret = -EINVAL; | ||
2502 | goto err_put_xrcd; | ||
2503 | } | ||
2504 | } | ||
2505 | 2007 | ||
2506 | pd = idr_read_pd(cmd->pd_handle, file->ucontext); | 2008 | pd = idr_read_pd(cmd.pd_handle, file->ucontext); |
2507 | if (!pd) { | 2009 | if (!pd) { |
2508 | ret = -EINVAL; | 2010 | ret = -EINVAL; |
2509 | goto err_put_cq; | 2011 | goto err; |
2510 | } | 2012 | } |
2511 | 2013 | ||
2512 | attr.event_handler = ib_uverbs_srq_event_handler; | 2014 | attr.event_handler = ib_uverbs_srq_event_handler; |
2513 | attr.srq_context = file; | 2015 | attr.srq_context = file; |
2514 | attr.srq_type = cmd->srq_type; | 2016 | attr.attr.max_wr = cmd.max_wr; |
2515 | attr.attr.max_wr = cmd->max_wr; | 2017 | attr.attr.max_sge = cmd.max_sge; |
2516 | attr.attr.max_sge = cmd->max_sge; | 2018 | attr.attr.srq_limit = cmd.srq_limit; |
2517 | attr.attr.srq_limit = cmd->srq_limit; | ||
2518 | 2019 | ||
2519 | obj->uevent.events_reported = 0; | 2020 | obj->events_reported = 0; |
2520 | INIT_LIST_HEAD(&obj->uevent.event_list); | 2021 | INIT_LIST_HEAD(&obj->event_list); |
2521 | 2022 | ||
2522 | srq = pd->device->create_srq(pd, &attr, udata); | 2023 | srq = pd->device->create_srq(pd, &attr, &udata); |
2523 | if (IS_ERR(srq)) { | 2024 | if (IS_ERR(srq)) { |
2524 | ret = PTR_ERR(srq); | 2025 | ret = PTR_ERR(srq); |
2525 | goto err_put; | 2026 | goto err_put; |
2526 | } | 2027 | } |
2527 | 2028 | ||
2528 | srq->device = pd->device; | 2029 | srq->device = pd->device; |
2529 | srq->pd = pd; | 2030 | srq->pd = pd; |
2530 | srq->srq_type = cmd->srq_type; | 2031 | srq->uobject = &obj->uobject; |
2531 | srq->uobject = &obj->uevent.uobject; | ||
2532 | srq->event_handler = attr.event_handler; | 2032 | srq->event_handler = attr.event_handler; |
2533 | srq->srq_context = attr.srq_context; | 2033 | srq->srq_context = attr.srq_context; |
2534 | |||
2535 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2536 | srq->ext.xrc.cq = attr.ext.xrc.cq; | ||
2537 | srq->ext.xrc.xrcd = attr.ext.xrc.xrcd; | ||
2538 | atomic_inc(&attr.ext.xrc.cq->usecnt); | ||
2539 | atomic_inc(&attr.ext.xrc.xrcd->usecnt); | ||
2540 | } | ||
2541 | |||
2542 | atomic_inc(&pd->usecnt); | 2034 | atomic_inc(&pd->usecnt); |
2543 | atomic_set(&srq->usecnt, 0); | 2035 | atomic_set(&srq->usecnt, 0); |
2544 | 2036 | ||
2545 | obj->uevent.uobject.object = srq; | 2037 | obj->uobject.object = srq; |
2546 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); | 2038 | ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject); |
2547 | if (ret) | 2039 | if (ret) |
2548 | goto err_destroy; | 2040 | goto err_destroy; |
2549 | 2041 | ||
2550 | memset(&resp, 0, sizeof resp); | 2042 | memset(&resp, 0, sizeof resp); |
2551 | resp.srq_handle = obj->uevent.uobject.id; | 2043 | resp.srq_handle = obj->uobject.id; |
2552 | resp.max_wr = attr.attr.max_wr; | 2044 | resp.max_wr = attr.attr.max_wr; |
2553 | resp.max_sge = attr.attr.max_sge; | 2045 | resp.max_sge = attr.attr.max_sge; |
2554 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2555 | resp.srqn = srq->ext.xrc.srq_num; | ||
2556 | 2046 | ||
2557 | if (copy_to_user((void __user *) (unsigned long) cmd->response, | 2047 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
2558 | &resp, sizeof resp)) { | 2048 | &resp, sizeof resp)) { |
2559 | ret = -EFAULT; | 2049 | ret = -EFAULT; |
2560 | goto err_copy; | 2050 | goto err_copy; |
2561 | } | 2051 | } |
2562 | 2052 | ||
2563 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2564 | put_uobj_read(xrcd_uobj); | ||
2565 | put_cq_read(attr.ext.xrc.cq); | ||
2566 | } | ||
2567 | put_pd_read(pd); | 2053 | put_pd_read(pd); |
2568 | 2054 | ||
2569 | mutex_lock(&file->mutex); | 2055 | mutex_lock(&file->mutex); |
2570 | list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list); | 2056 | list_add_tail(&obj->uobject.list, &file->ucontext->srq_list); |
2571 | mutex_unlock(&file->mutex); | 2057 | mutex_unlock(&file->mutex); |
2572 | 2058 | ||
2573 | obj->uevent.uobject.live = 1; | 2059 | obj->uobject.live = 1; |
2574 | 2060 | ||
2575 | up_write(&obj->uevent.uobject.mutex); | 2061 | up_write(&obj->uobject.mutex); |
2576 | 2062 | ||
2577 | return 0; | 2063 | return in_len; |
2578 | 2064 | ||
2579 | err_copy: | 2065 | err_copy: |
2580 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject); | 2066 | idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject); |
2581 | 2067 | ||
2582 | err_destroy: | 2068 | err_destroy: |
2583 | ib_destroy_srq(srq); | 2069 | ib_destroy_srq(srq); |
@@ -2585,81 +2071,11 @@ err_destroy: | |||
2585 | err_put: | 2071 | err_put: |
2586 | put_pd_read(pd); | 2072 | put_pd_read(pd); |
2587 | 2073 | ||
2588 | err_put_cq: | ||
2589 | if (cmd->srq_type == IB_SRQT_XRC) | ||
2590 | put_cq_read(attr.ext.xrc.cq); | ||
2591 | |||
2592 | err_put_xrcd: | ||
2593 | if (cmd->srq_type == IB_SRQT_XRC) { | ||
2594 | atomic_dec(&obj->uxrcd->refcnt); | ||
2595 | put_uobj_read(xrcd_uobj); | ||
2596 | } | ||
2597 | |||
2598 | err: | 2074 | err: |
2599 | put_uobj_write(&obj->uevent.uobject); | 2075 | put_uobj_write(&obj->uobject); |
2600 | return ret; | 2076 | return ret; |
2601 | } | 2077 | } |
2602 | 2078 | ||
2603 | ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file, | ||
2604 | const char __user *buf, int in_len, | ||
2605 | int out_len) | ||
2606 | { | ||
2607 | struct ib_uverbs_create_srq cmd; | ||
2608 | struct ib_uverbs_create_xsrq xcmd; | ||
2609 | struct ib_uverbs_create_srq_resp resp; | ||
2610 | struct ib_udata udata; | ||
2611 | int ret; | ||
2612 | |||
2613 | if (out_len < sizeof resp) | ||
2614 | return -ENOSPC; | ||
2615 | |||
2616 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2617 | return -EFAULT; | ||
2618 | |||
2619 | xcmd.response = cmd.response; | ||
2620 | xcmd.user_handle = cmd.user_handle; | ||
2621 | xcmd.srq_type = IB_SRQT_BASIC; | ||
2622 | xcmd.pd_handle = cmd.pd_handle; | ||
2623 | xcmd.max_wr = cmd.max_wr; | ||
2624 | xcmd.max_sge = cmd.max_sge; | ||
2625 | xcmd.srq_limit = cmd.srq_limit; | ||
2626 | |||
2627 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2628 | (unsigned long) cmd.response + sizeof resp, | ||
2629 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2630 | |||
2631 | ret = __uverbs_create_xsrq(file, &xcmd, &udata); | ||
2632 | if (ret) | ||
2633 | return ret; | ||
2634 | |||
2635 | return in_len; | ||
2636 | } | ||
2637 | |||
2638 | ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file, | ||
2639 | const char __user *buf, int in_len, int out_len) | ||
2640 | { | ||
2641 | struct ib_uverbs_create_xsrq cmd; | ||
2642 | struct ib_uverbs_create_srq_resp resp; | ||
2643 | struct ib_udata udata; | ||
2644 | int ret; | ||
2645 | |||
2646 | if (out_len < sizeof resp) | ||
2647 | return -ENOSPC; | ||
2648 | |||
2649 | if (copy_from_user(&cmd, buf, sizeof cmd)) | ||
2650 | return -EFAULT; | ||
2651 | |||
2652 | INIT_UDATA(&udata, buf + sizeof cmd, | ||
2653 | (unsigned long) cmd.response + sizeof resp, | ||
2654 | in_len - sizeof cmd, out_len - sizeof resp); | ||
2655 | |||
2656 | ret = __uverbs_create_xsrq(file, &cmd, &udata); | ||
2657 | if (ret) | ||
2658 | return ret; | ||
2659 | |||
2660 | return in_len; | ||
2661 | } | ||
2662 | |||
2663 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, | 2079 | ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file, |
2664 | const char __user *buf, int in_len, | 2080 | const char __user *buf, int in_len, |
2665 | int out_len) | 2081 | int out_len) |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 6f2ce6fa98f..56898b6578a 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -72,7 +72,6 @@ DEFINE_IDR(ib_uverbs_ah_idr); | |||
72 | DEFINE_IDR(ib_uverbs_cq_idr); | 72 | DEFINE_IDR(ib_uverbs_cq_idr); |
73 | DEFINE_IDR(ib_uverbs_qp_idr); | 73 | DEFINE_IDR(ib_uverbs_qp_idr); |
74 | DEFINE_IDR(ib_uverbs_srq_idr); | 74 | DEFINE_IDR(ib_uverbs_srq_idr); |
75 | DEFINE_IDR(ib_uverbs_xrcd_idr); | ||
76 | 75 | ||
77 | static DEFINE_SPINLOCK(map_lock); | 76 | static DEFINE_SPINLOCK(map_lock); |
78 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); | 77 | static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -108,10 +107,6 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, | |||
108 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, | 107 | [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, |
109 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, | 108 | [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, |
110 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, | 109 | [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, |
111 | [IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd, | ||
112 | [IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd, | ||
113 | [IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq, | ||
114 | [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp | ||
115 | }; | 110 | }; |
116 | 111 | ||
117 | static void ib_uverbs_add_one(struct ib_device *device); | 112 | static void ib_uverbs_add_one(struct ib_device *device); |
@@ -207,12 +202,8 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
207 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 202 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
208 | 203 | ||
209 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 204 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
210 | if (qp != qp->real_qp) { | 205 | ib_uverbs_detach_umcast(qp, uqp); |
211 | ib_close_qp(qp); | 206 | ib_destroy_qp(qp); |
212 | } else { | ||
213 | ib_uverbs_detach_umcast(qp, uqp); | ||
214 | ib_destroy_qp(qp); | ||
215 | } | ||
216 | ib_uverbs_release_uevent(file, &uqp->uevent); | 207 | ib_uverbs_release_uevent(file, &uqp->uevent); |
217 | kfree(uqp); | 208 | kfree(uqp); |
218 | } | 209 | } |
@@ -250,18 +241,6 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
250 | kfree(uobj); | 241 | kfree(uobj); |
251 | } | 242 | } |
252 | 243 | ||
253 | mutex_lock(&file->device->xrcd_tree_mutex); | ||
254 | list_for_each_entry_safe(uobj, tmp, &context->xrcd_list, list) { | ||
255 | struct ib_xrcd *xrcd = uobj->object; | ||
256 | struct ib_uxrcd_object *uxrcd = | ||
257 | container_of(uobj, struct ib_uxrcd_object, uobject); | ||
258 | |||
259 | idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj); | ||
260 | ib_uverbs_dealloc_xrcd(file->device, xrcd); | ||
261 | kfree(uxrcd); | ||
262 | } | ||
263 | mutex_unlock(&file->device->xrcd_tree_mutex); | ||
264 | |||
265 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { | 244 | list_for_each_entry_safe(uobj, tmp, &context->pd_list, list) { |
266 | struct ib_pd *pd = uobj->object; | 245 | struct ib_pd *pd = uobj->object; |
267 | 246 | ||
@@ -541,15 +520,16 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, | |||
541 | struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) | 520 | struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) |
542 | { | 521 | { |
543 | struct ib_uverbs_event_file *ev_file = NULL; | 522 | struct ib_uverbs_event_file *ev_file = NULL; |
544 | struct fd f = fdget(fd); | 523 | struct file *filp; |
545 | 524 | ||
546 | if (!f.file) | 525 | filp = fget(fd); |
526 | if (!filp) | ||
547 | return NULL; | 527 | return NULL; |
548 | 528 | ||
549 | if (f.file->f_op != &uverbs_event_fops) | 529 | if (filp->f_op != &uverbs_event_fops) |
550 | goto out; | 530 | goto out; |
551 | 531 | ||
552 | ev_file = f.file->private_data; | 532 | ev_file = filp->private_data; |
553 | if (ev_file->is_async) { | 533 | if (ev_file->is_async) { |
554 | ev_file = NULL; | 534 | ev_file = NULL; |
555 | goto out; | 535 | goto out; |
@@ -558,7 +538,7 @@ struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd) | |||
558 | kref_get(&ev_file->ref); | 538 | kref_get(&ev_file->ref); |
559 | 539 | ||
560 | out: | 540 | out: |
561 | fdput(f); | 541 | fput(filp); |
562 | return ev_file; | 542 | return ev_file; |
563 | } | 543 | } |
564 | 544 | ||
@@ -577,7 +557,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, | |||
577 | if (hdr.in_words * 4 != count) | 557 | if (hdr.in_words * 4 != count) |
578 | return -EINVAL; | 558 | return -EINVAL; |
579 | 559 | ||
580 | if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | 560 | if (hdr.command < 0 || |
561 | hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || | ||
581 | !uverbs_cmd_table[hdr.command]) | 562 | !uverbs_cmd_table[hdr.command]) |
582 | return -EINVAL; | 563 | return -EINVAL; |
583 | 564 | ||
@@ -760,8 +741,6 @@ static void ib_uverbs_add_one(struct ib_device *device) | |||
760 | 741 | ||
761 | kref_init(&uverbs_dev->ref); | 742 | kref_init(&uverbs_dev->ref); |
762 | init_completion(&uverbs_dev->comp); | 743 | init_completion(&uverbs_dev->comp); |
763 | uverbs_dev->xrcd_tree = RB_ROOT; | ||
764 | mutex_init(&uverbs_dev->xrcd_tree_mutex); | ||
765 | 744 | ||
766 | spin_lock(&map_lock); | 745 | spin_lock(&map_lock); |
767 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); | 746 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
@@ -845,7 +824,7 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
845 | kfree(uverbs_dev); | 824 | kfree(uverbs_dev); |
846 | } | 825 | } |
847 | 826 | ||
848 | static char *uverbs_devnode(struct device *dev, umode_t *mode) | 827 | static char *uverbs_devnode(struct device *dev, mode_t *mode) |
849 | { | 828 | { |
850 | if (mode) | 829 | if (mode) |
851 | *mode = 0666; | 830 | *mode = 0666; |
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index e7bee46868d..1b1146f8712 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
@@ -30,7 +30,6 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/export.h> | ||
34 | #include <rdma/ib_marshall.h> | 33 | #include <rdma/ib_marshall.h> |
35 | 34 | ||
36 | void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, | 35 | void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 30f199e8579..af7a8b08b2e 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -38,9 +38,7 @@ | |||
38 | 38 | ||
39 | #include <linux/errno.h> | 39 | #include <linux/errno.h> |
40 | #include <linux/err.h> | 40 | #include <linux/err.h> |
41 | #include <linux/export.h> | ||
42 | #include <linux/string.h> | 41 | #include <linux/string.h> |
43 | #include <linux/slab.h> | ||
44 | 42 | ||
45 | #include <rdma/ib_verbs.h> | 43 | #include <rdma/ib_verbs.h> |
46 | #include <rdma/ib_cache.h> | 44 | #include <rdma/ib_cache.h> |
@@ -79,31 +77,6 @@ enum ib_rate mult_to_ib_rate(int mult) | |||
79 | } | 77 | } |
80 | EXPORT_SYMBOL(mult_to_ib_rate); | 78 | EXPORT_SYMBOL(mult_to_ib_rate); |
81 | 79 | ||
82 | int ib_rate_to_mbps(enum ib_rate rate) | ||
83 | { | ||
84 | switch (rate) { | ||
85 | case IB_RATE_2_5_GBPS: return 2500; | ||
86 | case IB_RATE_5_GBPS: return 5000; | ||
87 | case IB_RATE_10_GBPS: return 10000; | ||
88 | case IB_RATE_20_GBPS: return 20000; | ||
89 | case IB_RATE_30_GBPS: return 30000; | ||
90 | case IB_RATE_40_GBPS: return 40000; | ||
91 | case IB_RATE_60_GBPS: return 60000; | ||
92 | case IB_RATE_80_GBPS: return 80000; | ||
93 | case IB_RATE_120_GBPS: return 120000; | ||
94 | case IB_RATE_14_GBPS: return 14062; | ||
95 | case IB_RATE_56_GBPS: return 56250; | ||
96 | case IB_RATE_112_GBPS: return 112500; | ||
97 | case IB_RATE_168_GBPS: return 168750; | ||
98 | case IB_RATE_25_GBPS: return 25781; | ||
99 | case IB_RATE_100_GBPS: return 103125; | ||
100 | case IB_RATE_200_GBPS: return 206250; | ||
101 | case IB_RATE_300_GBPS: return 309375; | ||
102 | default: return -1; | ||
103 | } | ||
104 | } | ||
105 | EXPORT_SYMBOL(ib_rate_to_mbps); | ||
106 | |||
107 | enum rdma_transport_type | 80 | enum rdma_transport_type |
108 | rdma_node_get_transport(enum rdma_node_type node_type) | 81 | rdma_node_get_transport(enum rdma_node_type node_type) |
109 | { | 82 | { |
@@ -277,13 +250,6 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd, | |||
277 | srq->uobject = NULL; | 250 | srq->uobject = NULL; |
278 | srq->event_handler = srq_init_attr->event_handler; | 251 | srq->event_handler = srq_init_attr->event_handler; |
279 | srq->srq_context = srq_init_attr->srq_context; | 252 | srq->srq_context = srq_init_attr->srq_context; |
280 | srq->srq_type = srq_init_attr->srq_type; | ||
281 | if (srq->srq_type == IB_SRQT_XRC) { | ||
282 | srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; | ||
283 | srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq; | ||
284 | atomic_inc(&srq->ext.xrc.xrcd->usecnt); | ||
285 | atomic_inc(&srq->ext.xrc.cq->usecnt); | ||
286 | } | ||
287 | atomic_inc(&pd->usecnt); | 253 | atomic_inc(&pd->usecnt); |
288 | atomic_set(&srq->usecnt, 0); | 254 | atomic_set(&srq->usecnt, 0); |
289 | } | 255 | } |
@@ -313,29 +279,16 @@ EXPORT_SYMBOL(ib_query_srq); | |||
313 | int ib_destroy_srq(struct ib_srq *srq) | 279 | int ib_destroy_srq(struct ib_srq *srq) |
314 | { | 280 | { |
315 | struct ib_pd *pd; | 281 | struct ib_pd *pd; |
316 | enum ib_srq_type srq_type; | ||
317 | struct ib_xrcd *uninitialized_var(xrcd); | ||
318 | struct ib_cq *uninitialized_var(cq); | ||
319 | int ret; | 282 | int ret; |
320 | 283 | ||
321 | if (atomic_read(&srq->usecnt)) | 284 | if (atomic_read(&srq->usecnt)) |
322 | return -EBUSY; | 285 | return -EBUSY; |
323 | 286 | ||
324 | pd = srq->pd; | 287 | pd = srq->pd; |
325 | srq_type = srq->srq_type; | ||
326 | if (srq_type == IB_SRQT_XRC) { | ||
327 | xrcd = srq->ext.xrc.xrcd; | ||
328 | cq = srq->ext.xrc.cq; | ||
329 | } | ||
330 | 288 | ||
331 | ret = srq->device->destroy_srq(srq); | 289 | ret = srq->device->destroy_srq(srq); |
332 | if (!ret) { | 290 | if (!ret) |
333 | atomic_dec(&pd->usecnt); | 291 | atomic_dec(&pd->usecnt); |
334 | if (srq_type == IB_SRQT_XRC) { | ||
335 | atomic_dec(&xrcd->usecnt); | ||
336 | atomic_dec(&cq->usecnt); | ||
337 | } | ||
338 | } | ||
339 | 292 | ||
340 | return ret; | 293 | return ret; |
341 | } | 294 | } |
@@ -343,123 +296,28 @@ EXPORT_SYMBOL(ib_destroy_srq); | |||
343 | 296 | ||
344 | /* Queue pairs */ | 297 | /* Queue pairs */ |
345 | 298 | ||
346 | static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) | ||
347 | { | ||
348 | struct ib_qp *qp = context; | ||
349 | |||
350 | list_for_each_entry(event->element.qp, &qp->open_list, open_list) | ||
351 | event->element.qp->event_handler(event, event->element.qp->qp_context); | ||
352 | } | ||
353 | |||
354 | static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) | ||
355 | { | ||
356 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
357 | list_add(&qp->xrcd_list, &xrcd->tgt_qp_list); | ||
358 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
359 | } | ||
360 | |||
361 | static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, | ||
362 | void (*event_handler)(struct ib_event *, void *), | ||
363 | void *qp_context) | ||
364 | { | ||
365 | struct ib_qp *qp; | ||
366 | unsigned long flags; | ||
367 | |||
368 | qp = kzalloc(sizeof *qp, GFP_KERNEL); | ||
369 | if (!qp) | ||
370 | return ERR_PTR(-ENOMEM); | ||
371 | |||
372 | qp->real_qp = real_qp; | ||
373 | atomic_inc(&real_qp->usecnt); | ||
374 | qp->device = real_qp->device; | ||
375 | qp->event_handler = event_handler; | ||
376 | qp->qp_context = qp_context; | ||
377 | qp->qp_num = real_qp->qp_num; | ||
378 | qp->qp_type = real_qp->qp_type; | ||
379 | |||
380 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
381 | list_add(&qp->open_list, &real_qp->open_list); | ||
382 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
383 | |||
384 | return qp; | ||
385 | } | ||
386 | |||
387 | struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd, | ||
388 | struct ib_qp_open_attr *qp_open_attr) | ||
389 | { | ||
390 | struct ib_qp *qp, *real_qp; | ||
391 | |||
392 | if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) | ||
393 | return ERR_PTR(-EINVAL); | ||
394 | |||
395 | qp = ERR_PTR(-EINVAL); | ||
396 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
397 | list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) { | ||
398 | if (real_qp->qp_num == qp_open_attr->qp_num) { | ||
399 | qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, | ||
400 | qp_open_attr->qp_context); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
405 | return qp; | ||
406 | } | ||
407 | EXPORT_SYMBOL(ib_open_qp); | ||
408 | |||
409 | struct ib_qp *ib_create_qp(struct ib_pd *pd, | 299 | struct ib_qp *ib_create_qp(struct ib_pd *pd, |
410 | struct ib_qp_init_attr *qp_init_attr) | 300 | struct ib_qp_init_attr *qp_init_attr) |
411 | { | 301 | { |
412 | struct ib_qp *qp, *real_qp; | 302 | struct ib_qp *qp; |
413 | struct ib_device *device; | ||
414 | 303 | ||
415 | device = pd ? pd->device : qp_init_attr->xrcd->device; | 304 | qp = pd->device->create_qp(pd, qp_init_attr, NULL); |
416 | qp = device->create_qp(pd, qp_init_attr, NULL); | ||
417 | 305 | ||
418 | if (!IS_ERR(qp)) { | 306 | if (!IS_ERR(qp)) { |
419 | qp->device = device; | 307 | qp->device = pd->device; |
420 | qp->real_qp = qp; | 308 | qp->pd = pd; |
421 | qp->uobject = NULL; | 309 | qp->send_cq = qp_init_attr->send_cq; |
422 | qp->qp_type = qp_init_attr->qp_type; | 310 | qp->recv_cq = qp_init_attr->recv_cq; |
423 | 311 | qp->srq = qp_init_attr->srq; | |
424 | atomic_set(&qp->usecnt, 0); | 312 | qp->uobject = NULL; |
425 | if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) { | 313 | qp->event_handler = qp_init_attr->event_handler; |
426 | qp->event_handler = __ib_shared_qp_event_handler; | 314 | qp->qp_context = qp_init_attr->qp_context; |
427 | qp->qp_context = qp; | 315 | qp->qp_type = qp_init_attr->qp_type; |
428 | qp->pd = NULL; | 316 | atomic_inc(&pd->usecnt); |
429 | qp->send_cq = qp->recv_cq = NULL; | 317 | atomic_inc(&qp_init_attr->send_cq->usecnt); |
430 | qp->srq = NULL; | 318 | atomic_inc(&qp_init_attr->recv_cq->usecnt); |
431 | qp->xrcd = qp_init_attr->xrcd; | 319 | if (qp_init_attr->srq) |
432 | atomic_inc(&qp_init_attr->xrcd->usecnt); | 320 | atomic_inc(&qp_init_attr->srq->usecnt); |
433 | INIT_LIST_HEAD(&qp->open_list); | ||
434 | |||
435 | real_qp = qp; | ||
436 | qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, | ||
437 | qp_init_attr->qp_context); | ||
438 | if (!IS_ERR(qp)) | ||
439 | __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp); | ||
440 | else | ||
441 | real_qp->device->destroy_qp(real_qp); | ||
442 | } else { | ||
443 | qp->event_handler = qp_init_attr->event_handler; | ||
444 | qp->qp_context = qp_init_attr->qp_context; | ||
445 | if (qp_init_attr->qp_type == IB_QPT_XRC_INI) { | ||
446 | qp->recv_cq = NULL; | ||
447 | qp->srq = NULL; | ||
448 | } else { | ||
449 | qp->recv_cq = qp_init_attr->recv_cq; | ||
450 | atomic_inc(&qp_init_attr->recv_cq->usecnt); | ||
451 | qp->srq = qp_init_attr->srq; | ||
452 | if (qp->srq) | ||
453 | atomic_inc(&qp_init_attr->srq->usecnt); | ||
454 | } | ||
455 | |||
456 | qp->pd = pd; | ||
457 | qp->send_cq = qp_init_attr->send_cq; | ||
458 | qp->xrcd = NULL; | ||
459 | |||
460 | atomic_inc(&pd->usecnt); | ||
461 | atomic_inc(&qp_init_attr->send_cq->usecnt); | ||
462 | } | ||
463 | } | 321 | } |
464 | 322 | ||
465 | return qp; | 323 | return qp; |
@@ -468,8 +326,8 @@ EXPORT_SYMBOL(ib_create_qp); | |||
468 | 326 | ||
469 | static const struct { | 327 | static const struct { |
470 | int valid; | 328 | int valid; |
471 | enum ib_qp_attr_mask req_param[IB_QPT_MAX]; | 329 | enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1]; |
472 | enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; | 330 | enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1]; |
473 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { | 331 | } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { |
474 | [IB_QPS_RESET] = { | 332 | [IB_QPS_RESET] = { |
475 | [IB_QPS_RESET] = { .valid = 1 }, | 333 | [IB_QPS_RESET] = { .valid = 1 }, |
@@ -479,19 +337,12 @@ static const struct { | |||
479 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | 337 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
480 | IB_QP_PORT | | 338 | IB_QP_PORT | |
481 | IB_QP_QKEY), | 339 | IB_QP_QKEY), |
482 | [IB_QPT_RAW_PACKET] = IB_QP_PORT, | ||
483 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | | 340 | [IB_QPT_UC] = (IB_QP_PKEY_INDEX | |
484 | IB_QP_PORT | | 341 | IB_QP_PORT | |
485 | IB_QP_ACCESS_FLAGS), | 342 | IB_QP_ACCESS_FLAGS), |
486 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 343 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
487 | IB_QP_PORT | | 344 | IB_QP_PORT | |
488 | IB_QP_ACCESS_FLAGS), | 345 | IB_QP_ACCESS_FLAGS), |
489 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
490 | IB_QP_PORT | | ||
491 | IB_QP_ACCESS_FLAGS), | ||
492 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
493 | IB_QP_PORT | | ||
494 | IB_QP_ACCESS_FLAGS), | ||
495 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 346 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
496 | IB_QP_QKEY), | 347 | IB_QP_QKEY), |
497 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 348 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -514,12 +365,6 @@ static const struct { | |||
514 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | | 365 | [IB_QPT_RC] = (IB_QP_PKEY_INDEX | |
515 | IB_QP_PORT | | 366 | IB_QP_PORT | |
516 | IB_QP_ACCESS_FLAGS), | 367 | IB_QP_ACCESS_FLAGS), |
517 | [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX | | ||
518 | IB_QP_PORT | | ||
519 | IB_QP_ACCESS_FLAGS), | ||
520 | [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX | | ||
521 | IB_QP_PORT | | ||
522 | IB_QP_ACCESS_FLAGS), | ||
523 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 368 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
524 | IB_QP_QKEY), | 369 | IB_QP_QKEY), |
525 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 370 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -539,16 +384,6 @@ static const struct { | |||
539 | IB_QP_RQ_PSN | | 384 | IB_QP_RQ_PSN | |
540 | IB_QP_MAX_DEST_RD_ATOMIC | | 385 | IB_QP_MAX_DEST_RD_ATOMIC | |
541 | IB_QP_MIN_RNR_TIMER), | 386 | IB_QP_MIN_RNR_TIMER), |
542 | [IB_QPT_XRC_INI] = (IB_QP_AV | | ||
543 | IB_QP_PATH_MTU | | ||
544 | IB_QP_DEST_QPN | | ||
545 | IB_QP_RQ_PSN), | ||
546 | [IB_QPT_XRC_TGT] = (IB_QP_AV | | ||
547 | IB_QP_PATH_MTU | | ||
548 | IB_QP_DEST_QPN | | ||
549 | IB_QP_RQ_PSN | | ||
550 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
551 | IB_QP_MIN_RNR_TIMER), | ||
552 | }, | 387 | }, |
553 | .opt_param = { | 388 | .opt_param = { |
554 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | | 389 | [IB_QPT_UD] = (IB_QP_PKEY_INDEX | |
@@ -559,12 +394,6 @@ static const struct { | |||
559 | [IB_QPT_RC] = (IB_QP_ALT_PATH | | 394 | [IB_QPT_RC] = (IB_QP_ALT_PATH | |
560 | IB_QP_ACCESS_FLAGS | | 395 | IB_QP_ACCESS_FLAGS | |
561 | IB_QP_PKEY_INDEX), | 396 | IB_QP_PKEY_INDEX), |
562 | [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH | | ||
563 | IB_QP_ACCESS_FLAGS | | ||
564 | IB_QP_PKEY_INDEX), | ||
565 | [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH | | ||
566 | IB_QP_ACCESS_FLAGS | | ||
567 | IB_QP_PKEY_INDEX), | ||
568 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 397 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
569 | IB_QP_QKEY), | 398 | IB_QP_QKEY), |
570 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 399 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -585,13 +414,6 @@ static const struct { | |||
585 | IB_QP_RNR_RETRY | | 414 | IB_QP_RNR_RETRY | |
586 | IB_QP_SQ_PSN | | 415 | IB_QP_SQ_PSN | |
587 | IB_QP_MAX_QP_RD_ATOMIC), | 416 | IB_QP_MAX_QP_RD_ATOMIC), |
588 | [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT | | ||
589 | IB_QP_RETRY_CNT | | ||
590 | IB_QP_RNR_RETRY | | ||
591 | IB_QP_SQ_PSN | | ||
592 | IB_QP_MAX_QP_RD_ATOMIC), | ||
593 | [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT | | ||
594 | IB_QP_SQ_PSN), | ||
595 | [IB_QPT_SMI] = IB_QP_SQ_PSN, | 417 | [IB_QPT_SMI] = IB_QP_SQ_PSN, |
596 | [IB_QPT_GSI] = IB_QP_SQ_PSN, | 418 | [IB_QPT_GSI] = IB_QP_SQ_PSN, |
597 | }, | 419 | }, |
@@ -607,15 +429,6 @@ static const struct { | |||
607 | IB_QP_ACCESS_FLAGS | | 429 | IB_QP_ACCESS_FLAGS | |
608 | IB_QP_MIN_RNR_TIMER | | 430 | IB_QP_MIN_RNR_TIMER | |
609 | IB_QP_PATH_MIG_STATE), | 431 | IB_QP_PATH_MIG_STATE), |
610 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
611 | IB_QP_ALT_PATH | | ||
612 | IB_QP_ACCESS_FLAGS | | ||
613 | IB_QP_PATH_MIG_STATE), | ||
614 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
615 | IB_QP_ALT_PATH | | ||
616 | IB_QP_ACCESS_FLAGS | | ||
617 | IB_QP_MIN_RNR_TIMER | | ||
618 | IB_QP_PATH_MIG_STATE), | ||
619 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 432 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
620 | IB_QP_QKEY), | 433 | IB_QP_QKEY), |
621 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 434 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -640,15 +453,6 @@ static const struct { | |||
640 | IB_QP_ALT_PATH | | 453 | IB_QP_ALT_PATH | |
641 | IB_QP_PATH_MIG_STATE | | 454 | IB_QP_PATH_MIG_STATE | |
642 | IB_QP_MIN_RNR_TIMER), | 455 | IB_QP_MIN_RNR_TIMER), |
643 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
644 | IB_QP_ACCESS_FLAGS | | ||
645 | IB_QP_ALT_PATH | | ||
646 | IB_QP_PATH_MIG_STATE), | ||
647 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
648 | IB_QP_ACCESS_FLAGS | | ||
649 | IB_QP_ALT_PATH | | ||
650 | IB_QP_PATH_MIG_STATE | | ||
651 | IB_QP_MIN_RNR_TIMER), | ||
652 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 456 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
653 | IB_QP_QKEY), | 457 | IB_QP_QKEY), |
654 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 458 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -661,8 +465,6 @@ static const struct { | |||
661 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 465 | [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
662 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 466 | [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
663 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 467 | [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
664 | [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | ||
665 | [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */ | ||
666 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, | 468 | [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY, |
667 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY | 469 | [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY |
668 | } | 470 | } |
@@ -685,15 +487,6 @@ static const struct { | |||
685 | IB_QP_ACCESS_FLAGS | | 487 | IB_QP_ACCESS_FLAGS | |
686 | IB_QP_MIN_RNR_TIMER | | 488 | IB_QP_MIN_RNR_TIMER | |
687 | IB_QP_PATH_MIG_STATE), | 489 | IB_QP_PATH_MIG_STATE), |
688 | [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE | | ||
689 | IB_QP_ALT_PATH | | ||
690 | IB_QP_ACCESS_FLAGS | | ||
691 | IB_QP_PATH_MIG_STATE), | ||
692 | [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE | | ||
693 | IB_QP_ALT_PATH | | ||
694 | IB_QP_ACCESS_FLAGS | | ||
695 | IB_QP_MIN_RNR_TIMER | | ||
696 | IB_QP_PATH_MIG_STATE), | ||
697 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | | 490 | [IB_QPT_SMI] = (IB_QP_CUR_STATE | |
698 | IB_QP_QKEY), | 491 | IB_QP_QKEY), |
699 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | | 492 | [IB_QPT_GSI] = (IB_QP_CUR_STATE | |
@@ -722,25 +515,6 @@ static const struct { | |||
722 | IB_QP_PKEY_INDEX | | 515 | IB_QP_PKEY_INDEX | |
723 | IB_QP_MIN_RNR_TIMER | | 516 | IB_QP_MIN_RNR_TIMER | |
724 | IB_QP_PATH_MIG_STATE), | 517 | IB_QP_PATH_MIG_STATE), |
725 | [IB_QPT_XRC_INI] = (IB_QP_PORT | | ||
726 | IB_QP_AV | | ||
727 | IB_QP_TIMEOUT | | ||
728 | IB_QP_RETRY_CNT | | ||
729 | IB_QP_RNR_RETRY | | ||
730 | IB_QP_MAX_QP_RD_ATOMIC | | ||
731 | IB_QP_ALT_PATH | | ||
732 | IB_QP_ACCESS_FLAGS | | ||
733 | IB_QP_PKEY_INDEX | | ||
734 | IB_QP_PATH_MIG_STATE), | ||
735 | [IB_QPT_XRC_TGT] = (IB_QP_PORT | | ||
736 | IB_QP_AV | | ||
737 | IB_QP_TIMEOUT | | ||
738 | IB_QP_MAX_DEST_RD_ATOMIC | | ||
739 | IB_QP_ALT_PATH | | ||
740 | IB_QP_ACCESS_FLAGS | | ||
741 | IB_QP_PKEY_INDEX | | ||
742 | IB_QP_MIN_RNR_TIMER | | ||
743 | IB_QP_PATH_MIG_STATE), | ||
744 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | | 518 | [IB_QPT_SMI] = (IB_QP_PKEY_INDEX | |
745 | IB_QP_QKEY), | 519 | IB_QP_QKEY), |
746 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | | 520 | [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | |
@@ -805,7 +579,7 @@ int ib_modify_qp(struct ib_qp *qp, | |||
805 | struct ib_qp_attr *qp_attr, | 579 | struct ib_qp_attr *qp_attr, |
806 | int qp_attr_mask) | 580 | int qp_attr_mask) |
807 | { | 581 | { |
808 | return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); | 582 | return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL); |
809 | } | 583 | } |
810 | EXPORT_SYMBOL(ib_modify_qp); | 584 | EXPORT_SYMBOL(ib_modify_qp); |
811 | 585 | ||
@@ -815,59 +589,11 @@ int ib_query_qp(struct ib_qp *qp, | |||
815 | struct ib_qp_init_attr *qp_init_attr) | 589 | struct ib_qp_init_attr *qp_init_attr) |
816 | { | 590 | { |
817 | return qp->device->query_qp ? | 591 | return qp->device->query_qp ? |
818 | qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) : | 592 | qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : |
819 | -ENOSYS; | 593 | -ENOSYS; |
820 | } | 594 | } |
821 | EXPORT_SYMBOL(ib_query_qp); | 595 | EXPORT_SYMBOL(ib_query_qp); |
822 | 596 | ||
823 | int ib_close_qp(struct ib_qp *qp) | ||
824 | { | ||
825 | struct ib_qp *real_qp; | ||
826 | unsigned long flags; | ||
827 | |||
828 | real_qp = qp->real_qp; | ||
829 | if (real_qp == qp) | ||
830 | return -EINVAL; | ||
831 | |||
832 | spin_lock_irqsave(&real_qp->device->event_handler_lock, flags); | ||
833 | list_del(&qp->open_list); | ||
834 | spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); | ||
835 | |||
836 | atomic_dec(&real_qp->usecnt); | ||
837 | kfree(qp); | ||
838 | |||
839 | return 0; | ||
840 | } | ||
841 | EXPORT_SYMBOL(ib_close_qp); | ||
842 | |||
843 | static int __ib_destroy_shared_qp(struct ib_qp *qp) | ||
844 | { | ||
845 | struct ib_xrcd *xrcd; | ||
846 | struct ib_qp *real_qp; | ||
847 | int ret; | ||
848 | |||
849 | real_qp = qp->real_qp; | ||
850 | xrcd = real_qp->xrcd; | ||
851 | |||
852 | mutex_lock(&xrcd->tgt_qp_mutex); | ||
853 | ib_close_qp(qp); | ||
854 | if (atomic_read(&real_qp->usecnt) == 0) | ||
855 | list_del(&real_qp->xrcd_list); | ||
856 | else | ||
857 | real_qp = NULL; | ||
858 | mutex_unlock(&xrcd->tgt_qp_mutex); | ||
859 | |||
860 | if (real_qp) { | ||
861 | ret = ib_destroy_qp(real_qp); | ||
862 | if (!ret) | ||
863 | atomic_dec(&xrcd->usecnt); | ||
864 | else | ||
865 | __ib_insert_xrcd_qp(xrcd, real_qp); | ||
866 | } | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | int ib_destroy_qp(struct ib_qp *qp) | 597 | int ib_destroy_qp(struct ib_qp *qp) |
872 | { | 598 | { |
873 | struct ib_pd *pd; | 599 | struct ib_pd *pd; |
@@ -875,25 +601,16 @@ int ib_destroy_qp(struct ib_qp *qp) | |||
875 | struct ib_srq *srq; | 601 | struct ib_srq *srq; |
876 | int ret; | 602 | int ret; |
877 | 603 | ||
878 | if (atomic_read(&qp->usecnt)) | 604 | pd = qp->pd; |
879 | return -EBUSY; | 605 | scq = qp->send_cq; |
880 | 606 | rcq = qp->recv_cq; | |
881 | if (qp->real_qp != qp) | 607 | srq = qp->srq; |
882 | return __ib_destroy_shared_qp(qp); | ||
883 | |||
884 | pd = qp->pd; | ||
885 | scq = qp->send_cq; | ||
886 | rcq = qp->recv_cq; | ||
887 | srq = qp->srq; | ||
888 | 608 | ||
889 | ret = qp->device->destroy_qp(qp); | 609 | ret = qp->device->destroy_qp(qp); |
890 | if (!ret) { | 610 | if (!ret) { |
891 | if (pd) | 611 | atomic_dec(&pd->usecnt); |
892 | atomic_dec(&pd->usecnt); | 612 | atomic_dec(&scq->usecnt); |
893 | if (scq) | 613 | atomic_dec(&rcq->usecnt); |
894 | atomic_dec(&scq->usecnt); | ||
895 | if (rcq) | ||
896 | atomic_dec(&rcq->usecnt); | ||
897 | if (srq) | 614 | if (srq) |
898 | atomic_dec(&srq->usecnt); | 615 | atomic_dec(&srq->usecnt); |
899 | } | 616 | } |
@@ -1184,71 +901,22 @@ EXPORT_SYMBOL(ib_dealloc_fmr); | |||
1184 | 901 | ||
1185 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | 902 | int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
1186 | { | 903 | { |
1187 | int ret; | ||
1188 | |||
1189 | if (!qp->device->attach_mcast) | 904 | if (!qp->device->attach_mcast) |
1190 | return -ENOSYS; | 905 | return -ENOSYS; |
1191 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) | 906 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
1192 | return -EINVAL; | 907 | return -EINVAL; |
1193 | 908 | ||
1194 | ret = qp->device->attach_mcast(qp, gid, lid); | 909 | return qp->device->attach_mcast(qp, gid, lid); |
1195 | if (!ret) | ||
1196 | atomic_inc(&qp->usecnt); | ||
1197 | return ret; | ||
1198 | } | 910 | } |
1199 | EXPORT_SYMBOL(ib_attach_mcast); | 911 | EXPORT_SYMBOL(ib_attach_mcast); |
1200 | 912 | ||
1201 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) | 913 | int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) |
1202 | { | 914 | { |
1203 | int ret; | ||
1204 | |||
1205 | if (!qp->device->detach_mcast) | 915 | if (!qp->device->detach_mcast) |
1206 | return -ENOSYS; | 916 | return -ENOSYS; |
1207 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) | 917 | if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD) |
1208 | return -EINVAL; | 918 | return -EINVAL; |
1209 | 919 | ||
1210 | ret = qp->device->detach_mcast(qp, gid, lid); | 920 | return qp->device->detach_mcast(qp, gid, lid); |
1211 | if (!ret) | ||
1212 | atomic_dec(&qp->usecnt); | ||
1213 | return ret; | ||
1214 | } | 921 | } |
1215 | EXPORT_SYMBOL(ib_detach_mcast); | 922 | EXPORT_SYMBOL(ib_detach_mcast); |
1216 | |||
1217 | struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device) | ||
1218 | { | ||
1219 | struct ib_xrcd *xrcd; | ||
1220 | |||
1221 | if (!device->alloc_xrcd) | ||
1222 | return ERR_PTR(-ENOSYS); | ||
1223 | |||
1224 | xrcd = device->alloc_xrcd(device, NULL, NULL); | ||
1225 | if (!IS_ERR(xrcd)) { | ||
1226 | xrcd->device = device; | ||
1227 | xrcd->inode = NULL; | ||
1228 | atomic_set(&xrcd->usecnt, 0); | ||
1229 | mutex_init(&xrcd->tgt_qp_mutex); | ||
1230 | INIT_LIST_HEAD(&xrcd->tgt_qp_list); | ||
1231 | } | ||
1232 | |||
1233 | return xrcd; | ||
1234 | } | ||
1235 | EXPORT_SYMBOL(ib_alloc_xrcd); | ||
1236 | |||
1237 | int ib_dealloc_xrcd(struct ib_xrcd *xrcd) | ||
1238 | { | ||
1239 | struct ib_qp *qp; | ||
1240 | int ret; | ||
1241 | |||
1242 | if (atomic_read(&xrcd->usecnt)) | ||
1243 | return -EBUSY; | ||
1244 | |||
1245 | while (!list_empty(&xrcd->tgt_qp_list)) { | ||
1246 | qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list); | ||
1247 | ret = ib_destroy_qp(qp); | ||
1248 | if (ret) | ||
1249 | return ret; | ||
1250 | } | ||
1251 | |||
1252 | return xrcd->device->dealloc_xrcd(xrcd); | ||
1253 | } | ||
1254 | EXPORT_SYMBOL(ib_dealloc_xrcd); | ||