diff options
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r-- | drivers/infiniband/core/agent.c | 29 | ||||
-rw-r--r-- | drivers/infiniband/core/cma.c | 313 | ||||
-rw-r--r-- | drivers/infiniband/core/iwcm.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/core/mad.c | 27 | ||||
-rw-r--r-- | drivers/infiniband/core/multicast.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/core/sysfs.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 92 | ||||
-rw-r--r-- | drivers/infiniband/core/ud_header.c | 140 | ||||
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/uverbs_cmd.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/verbs.c | 16 |
12 files changed, 606 insertions, 87 deletions
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index ae7c2880e624..91916a8d5de4 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num) | |||
59 | struct ib_agent_port_private *entry; | 59 | struct ib_agent_port_private *entry; |
60 | 60 | ||
61 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { | 61 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { |
62 | if (entry->agent[0]->device == device && | 62 | if (entry->agent[1]->device == device && |
63 | entry->agent[0]->port_num == port_num) | 63 | entry->agent[1]->port_num == port_num) |
64 | return entry; | 64 | return entry; |
65 | } | 65 | } |
66 | return NULL; | 66 | return NULL; |
@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
155 | goto error1; | 155 | goto error1; |
156 | } | 156 | } |
157 | 157 | ||
158 | /* Obtain send only MAD agent for SMI QP */ | 158 | if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { |
159 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, | 159 | /* Obtain send only MAD agent for SMI QP */ |
160 | IB_QPT_SMI, NULL, 0, | 160 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, |
161 | &agent_send_handler, | 161 | IB_QPT_SMI, NULL, 0, |
162 | NULL, NULL); | 162 | &agent_send_handler, |
163 | if (IS_ERR(port_priv->agent[0])) { | 163 | NULL, NULL); |
164 | ret = PTR_ERR(port_priv->agent[0]); | 164 | if (IS_ERR(port_priv->agent[0])) { |
165 | goto error2; | 165 | ret = PTR_ERR(port_priv->agent[0]); |
166 | goto error2; | ||
167 | } | ||
166 | } | 168 | } |
167 | 169 | ||
168 | /* Obtain send only MAD agent for GSI QP */ | 170 | /* Obtain send only MAD agent for GSI QP */ |
@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
182 | return 0; | 184 | return 0; |
183 | 185 | ||
184 | error3: | 186 | error3: |
185 | ib_unregister_mad_agent(port_priv->agent[0]); | 187 | if (port_priv->agent[0]) |
188 | ib_unregister_mad_agent(port_priv->agent[0]); | ||
186 | error2: | 189 | error2: |
187 | kfree(port_priv); | 190 | kfree(port_priv); |
188 | error1: | 191 | error1: |
@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
205 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 208 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
206 | 209 | ||
207 | ib_unregister_mad_agent(port_priv->agent[1]); | 210 | ib_unregister_mad_agent(port_priv->agent[1]); |
208 | ib_unregister_mad_agent(port_priv->agent[0]); | 211 | if (port_priv->agent[0]) |
212 | ib_unregister_mad_agent(port_priv->agent[0]); | ||
213 | |||
209 | kfree(port_priv); | 214 | kfree(port_priv); |
210 | return 0; | 215 | return 0; |
211 | } | 216 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b930b8110a63..6884da24fde1 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -59,6 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
59 | #define CMA_CM_RESPONSE_TIMEOUT 20 | 59 | #define CMA_CM_RESPONSE_TIMEOUT 20 |
60 | #define CMA_MAX_CM_RETRIES 15 | 60 | #define CMA_MAX_CM_RETRIES 15 |
61 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) | 61 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) |
62 | #define CMA_IBOE_PACKET_LIFETIME 18 | ||
62 | 63 | ||
63 | static void cma_add_one(struct ib_device *device); | 64 | static void cma_add_one(struct ib_device *device); |
64 | static void cma_remove_one(struct ib_device *device); | 65 | static void cma_remove_one(struct ib_device *device); |
@@ -157,6 +158,7 @@ struct cma_multicast { | |||
157 | struct list_head list; | 158 | struct list_head list; |
158 | void *context; | 159 | void *context; |
159 | struct sockaddr_storage addr; | 160 | struct sockaddr_storage addr; |
161 | struct kref mcref; | ||
160 | }; | 162 | }; |
161 | 163 | ||
162 | struct cma_work { | 164 | struct cma_work { |
@@ -173,6 +175,12 @@ struct cma_ndev_work { | |||
173 | struct rdma_cm_event event; | 175 | struct rdma_cm_event event; |
174 | }; | 176 | }; |
175 | 177 | ||
178 | struct iboe_mcast_work { | ||
179 | struct work_struct work; | ||
180 | struct rdma_id_private *id; | ||
181 | struct cma_multicast *mc; | ||
182 | }; | ||
183 | |||
176 | union cma_ip_addr { | 184 | union cma_ip_addr { |
177 | struct in6_addr ip6; | 185 | struct in6_addr ip6; |
178 | struct { | 186 | struct { |
@@ -281,6 +289,8 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, | |||
281 | atomic_inc(&cma_dev->refcount); | 289 | atomic_inc(&cma_dev->refcount); |
282 | id_priv->cma_dev = cma_dev; | 290 | id_priv->cma_dev = cma_dev; |
283 | id_priv->id.device = cma_dev->device; | 291 | id_priv->id.device = cma_dev->device; |
292 | id_priv->id.route.addr.dev_addr.transport = | ||
293 | rdma_node_get_transport(cma_dev->device->node_type); | ||
284 | list_add_tail(&id_priv->list, &cma_dev->id_list); | 294 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
285 | } | 295 | } |
286 | 296 | ||
@@ -290,6 +300,14 @@ static inline void cma_deref_dev(struct cma_device *cma_dev) | |||
290 | complete(&cma_dev->comp); | 300 | complete(&cma_dev->comp); |
291 | } | 301 | } |
292 | 302 | ||
303 | static inline void release_mc(struct kref *kref) | ||
304 | { | ||
305 | struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); | ||
306 | |||
307 | kfree(mc->multicast.ib); | ||
308 | kfree(mc); | ||
309 | } | ||
310 | |||
293 | static void cma_detach_from_dev(struct rdma_id_private *id_priv) | 311 | static void cma_detach_from_dev(struct rdma_id_private *id_priv) |
294 | { | 312 | { |
295 | list_del(&id_priv->list); | 313 | list_del(&id_priv->list); |
@@ -323,22 +341,63 @@ static int cma_set_qkey(struct rdma_id_private *id_priv) | |||
323 | return ret; | 341 | return ret; |
324 | } | 342 | } |
325 | 343 | ||
344 | static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) | ||
345 | { | ||
346 | int i; | ||
347 | int err; | ||
348 | struct ib_port_attr props; | ||
349 | union ib_gid tmp; | ||
350 | |||
351 | err = ib_query_port(device, port_num, &props); | ||
352 | if (err) | ||
353 | return 1; | ||
354 | |||
355 | for (i = 0; i < props.gid_tbl_len; ++i) { | ||
356 | err = ib_query_gid(device, port_num, i, &tmp); | ||
357 | if (err) | ||
358 | return 1; | ||
359 | if (!memcmp(&tmp, gid, sizeof tmp)) | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | return -EAGAIN; | ||
364 | } | ||
365 | |||
326 | static int cma_acquire_dev(struct rdma_id_private *id_priv) | 366 | static int cma_acquire_dev(struct rdma_id_private *id_priv) |
327 | { | 367 | { |
328 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | 368 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
329 | struct cma_device *cma_dev; | 369 | struct cma_device *cma_dev; |
330 | union ib_gid gid; | 370 | union ib_gid gid, iboe_gid; |
331 | int ret = -ENODEV; | 371 | int ret = -ENODEV; |
372 | u8 port; | ||
373 | enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? | ||
374 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; | ||
332 | 375 | ||
333 | rdma_addr_get_sgid(dev_addr, &gid); | 376 | iboe_addr_get_sgid(dev_addr, &iboe_gid); |
377 | memcpy(&gid, dev_addr->src_dev_addr + | ||
378 | rdma_addr_gid_offset(dev_addr), sizeof gid); | ||
334 | list_for_each_entry(cma_dev, &dev_list, list) { | 379 | list_for_each_entry(cma_dev, &dev_list, list) { |
335 | ret = ib_find_cached_gid(cma_dev->device, &gid, | 380 | for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { |
336 | &id_priv->id.port_num, NULL); | 381 | if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { |
337 | if (!ret) { | 382 | if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && |
338 | cma_attach_to_dev(id_priv, cma_dev); | 383 | rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) |
339 | break; | 384 | ret = find_gid_port(cma_dev->device, &iboe_gid, port); |
385 | else | ||
386 | ret = find_gid_port(cma_dev->device, &gid, port); | ||
387 | |||
388 | if (!ret) { | ||
389 | id_priv->id.port_num = port; | ||
390 | goto out; | ||
391 | } else if (ret == 1) | ||
392 | break; | ||
393 | } | ||
340 | } | 394 | } |
341 | } | 395 | } |
396 | |||
397 | out: | ||
398 | if (!ret) | ||
399 | cma_attach_to_dev(id_priv, cma_dev); | ||
400 | |||
342 | return ret; | 401 | return ret; |
343 | } | 402 | } |
344 | 403 | ||
@@ -556,10 +615,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, | |||
556 | { | 615 | { |
557 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | 616 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
558 | int ret; | 617 | int ret; |
618 | u16 pkey; | ||
619 | |||
620 | if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == | ||
621 | IB_LINK_LAYER_INFINIBAND) | ||
622 | pkey = ib_addr_get_pkey(dev_addr); | ||
623 | else | ||
624 | pkey = 0xffff; | ||
559 | 625 | ||
560 | ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, | 626 | ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, |
561 | ib_addr_get_pkey(dev_addr), | 627 | pkey, &qp_attr->pkey_index); |
562 | &qp_attr->pkey_index); | ||
563 | if (ret) | 628 | if (ret) |
564 | return ret; | 629 | return ret; |
565 | 630 | ||
@@ -737,8 +802,8 @@ static inline int cma_user_data_offset(enum rdma_port_space ps) | |||
737 | 802 | ||
738 | static void cma_cancel_route(struct rdma_id_private *id_priv) | 803 | static void cma_cancel_route(struct rdma_id_private *id_priv) |
739 | { | 804 | { |
740 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | 805 | switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { |
741 | case RDMA_TRANSPORT_IB: | 806 | case IB_LINK_LAYER_INFINIBAND: |
742 | if (id_priv->query) | 807 | if (id_priv->query) |
743 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); | 808 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); |
744 | break; | 809 | break; |
@@ -816,8 +881,17 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) | |||
816 | mc = container_of(id_priv->mc_list.next, | 881 | mc = container_of(id_priv->mc_list.next, |
817 | struct cma_multicast, list); | 882 | struct cma_multicast, list); |
818 | list_del(&mc->list); | 883 | list_del(&mc->list); |
819 | ib_sa_free_multicast(mc->multicast.ib); | 884 | switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { |
820 | kfree(mc); | 885 | case IB_LINK_LAYER_INFINIBAND: |
886 | ib_sa_free_multicast(mc->multicast.ib); | ||
887 | kfree(mc); | ||
888 | break; | ||
889 | case IB_LINK_LAYER_ETHERNET: | ||
890 | kref_put(&mc->mcref, release_mc); | ||
891 | break; | ||
892 | default: | ||
893 | break; | ||
894 | } | ||
821 | } | 895 | } |
822 | } | 896 | } |
823 | 897 | ||
@@ -833,7 +907,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
833 | mutex_lock(&lock); | 907 | mutex_lock(&lock); |
834 | if (id_priv->cma_dev) { | 908 | if (id_priv->cma_dev) { |
835 | mutex_unlock(&lock); | 909 | mutex_unlock(&lock); |
836 | switch (rdma_node_get_transport(id->device->node_type)) { | 910 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
837 | case RDMA_TRANSPORT_IB: | 911 | case RDMA_TRANSPORT_IB: |
838 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | 912 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) |
839 | ib_destroy_cm_id(id_priv->cm_id.ib); | 913 | ib_destroy_cm_id(id_priv->cm_id.ib); |
@@ -1708,6 +1782,81 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1708 | return 0; | 1782 | return 0; |
1709 | } | 1783 | } |
1710 | 1784 | ||
1785 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | ||
1786 | { | ||
1787 | struct rdma_route *route = &id_priv->id.route; | ||
1788 | struct rdma_addr *addr = &route->addr; | ||
1789 | struct cma_work *work; | ||
1790 | int ret; | ||
1791 | struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; | ||
1792 | struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; | ||
1793 | struct net_device *ndev = NULL; | ||
1794 | u16 vid; | ||
1795 | |||
1796 | if (src_addr->sin_family != dst_addr->sin_family) | ||
1797 | return -EINVAL; | ||
1798 | |||
1799 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
1800 | if (!work) | ||
1801 | return -ENOMEM; | ||
1802 | |||
1803 | work->id = id_priv; | ||
1804 | INIT_WORK(&work->work, cma_work_handler); | ||
1805 | |||
1806 | route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); | ||
1807 | if (!route->path_rec) { | ||
1808 | ret = -ENOMEM; | ||
1809 | goto err1; | ||
1810 | } | ||
1811 | |||
1812 | route->num_paths = 1; | ||
1813 | |||
1814 | if (addr->dev_addr.bound_dev_if) | ||
1815 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | ||
1816 | if (!ndev) { | ||
1817 | ret = -ENODEV; | ||
1818 | goto err2; | ||
1819 | } | ||
1820 | |||
1821 | vid = rdma_vlan_dev_vlan_id(ndev); | ||
1822 | |||
1823 | iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); | ||
1824 | iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); | ||
1825 | |||
1826 | route->path_rec->hop_limit = 1; | ||
1827 | route->path_rec->reversible = 1; | ||
1828 | route->path_rec->pkey = cpu_to_be16(0xffff); | ||
1829 | route->path_rec->mtu_selector = IB_SA_EQ; | ||
1830 | route->path_rec->sl = id_priv->tos >> 5; | ||
1831 | |||
1832 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); | ||
1833 | route->path_rec->rate_selector = IB_SA_EQ; | ||
1834 | route->path_rec->rate = iboe_get_rate(ndev); | ||
1835 | dev_put(ndev); | ||
1836 | route->path_rec->packet_life_time_selector = IB_SA_EQ; | ||
1837 | route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; | ||
1838 | if (!route->path_rec->mtu) { | ||
1839 | ret = -EINVAL; | ||
1840 | goto err2; | ||
1841 | } | ||
1842 | |||
1843 | work->old_state = CMA_ROUTE_QUERY; | ||
1844 | work->new_state = CMA_ROUTE_RESOLVED; | ||
1845 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | ||
1846 | work->event.status = 0; | ||
1847 | |||
1848 | queue_work(cma_wq, &work->work); | ||
1849 | |||
1850 | return 0; | ||
1851 | |||
1852 | err2: | ||
1853 | kfree(route->path_rec); | ||
1854 | route->path_rec = NULL; | ||
1855 | err1: | ||
1856 | kfree(work); | ||
1857 | return ret; | ||
1858 | } | ||
1859 | |||
1711 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | 1860 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) |
1712 | { | 1861 | { |
1713 | struct rdma_id_private *id_priv; | 1862 | struct rdma_id_private *id_priv; |
@@ -1720,7 +1869,16 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1720 | atomic_inc(&id_priv->refcount); | 1869 | atomic_inc(&id_priv->refcount); |
1721 | switch (rdma_node_get_transport(id->device->node_type)) { | 1870 | switch (rdma_node_get_transport(id->device->node_type)) { |
1722 | case RDMA_TRANSPORT_IB: | 1871 | case RDMA_TRANSPORT_IB: |
1723 | ret = cma_resolve_ib_route(id_priv, timeout_ms); | 1872 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
1873 | case IB_LINK_LAYER_INFINIBAND: | ||
1874 | ret = cma_resolve_ib_route(id_priv, timeout_ms); | ||
1875 | break; | ||
1876 | case IB_LINK_LAYER_ETHERNET: | ||
1877 | ret = cma_resolve_iboe_route(id_priv); | ||
1878 | break; | ||
1879 | default: | ||
1880 | ret = -ENOSYS; | ||
1881 | } | ||
1724 | break; | 1882 | break; |
1725 | case RDMA_TRANSPORT_IWARP: | 1883 | case RDMA_TRANSPORT_IWARP: |
1726 | ret = cma_resolve_iw_route(id_priv, timeout_ms); | 1884 | ret = cma_resolve_iw_route(id_priv, timeout_ms); |
@@ -1773,7 +1931,7 @@ port_found: | |||
1773 | goto out; | 1931 | goto out; |
1774 | 1932 | ||
1775 | id_priv->id.route.addr.dev_addr.dev_type = | 1933 | id_priv->id.route.addr.dev_addr.dev_type = |
1776 | (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ? | 1934 | (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? |
1777 | ARPHRD_INFINIBAND : ARPHRD_ETHER; | 1935 | ARPHRD_INFINIBAND : ARPHRD_ETHER; |
1778 | 1936 | ||
1779 | rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); | 1937 | rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
@@ -2758,6 +2916,102 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
2758 | return 0; | 2916 | return 0; |
2759 | } | 2917 | } |
2760 | 2918 | ||
2919 | static void iboe_mcast_work_handler(struct work_struct *work) | ||
2920 | { | ||
2921 | struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); | ||
2922 | struct cma_multicast *mc = mw->mc; | ||
2923 | struct ib_sa_multicast *m = mc->multicast.ib; | ||
2924 | |||
2925 | mc->multicast.ib->context = mc; | ||
2926 | cma_ib_mc_handler(0, m); | ||
2927 | kref_put(&mc->mcref, release_mc); | ||
2928 | kfree(mw); | ||
2929 | } | ||
2930 | |||
2931 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) | ||
2932 | { | ||
2933 | struct sockaddr_in *sin = (struct sockaddr_in *)addr; | ||
2934 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; | ||
2935 | |||
2936 | if (cma_any_addr(addr)) { | ||
2937 | memset(mgid, 0, sizeof *mgid); | ||
2938 | } else if (addr->sa_family == AF_INET6) { | ||
2939 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); | ||
2940 | } else { | ||
2941 | mgid->raw[0] = 0xff; | ||
2942 | mgid->raw[1] = 0x0e; | ||
2943 | mgid->raw[2] = 0; | ||
2944 | mgid->raw[3] = 0; | ||
2945 | mgid->raw[4] = 0; | ||
2946 | mgid->raw[5] = 0; | ||
2947 | mgid->raw[6] = 0; | ||
2948 | mgid->raw[7] = 0; | ||
2949 | mgid->raw[8] = 0; | ||
2950 | mgid->raw[9] = 0; | ||
2951 | mgid->raw[10] = 0xff; | ||
2952 | mgid->raw[11] = 0xff; | ||
2953 | *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; | ||
2954 | } | ||
2955 | } | ||
2956 | |||
2957 | static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | ||
2958 | struct cma_multicast *mc) | ||
2959 | { | ||
2960 | struct iboe_mcast_work *work; | ||
2961 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | ||
2962 | int err; | ||
2963 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; | ||
2964 | struct net_device *ndev = NULL; | ||
2965 | |||
2966 | if (cma_zero_addr((struct sockaddr *)&mc->addr)) | ||
2967 | return -EINVAL; | ||
2968 | |||
2969 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
2970 | if (!work) | ||
2971 | return -ENOMEM; | ||
2972 | |||
2973 | mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); | ||
2974 | if (!mc->multicast.ib) { | ||
2975 | err = -ENOMEM; | ||
2976 | goto out1; | ||
2977 | } | ||
2978 | |||
2979 | cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); | ||
2980 | |||
2981 | mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); | ||
2982 | if (id_priv->id.ps == RDMA_PS_UDP) | ||
2983 | mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); | ||
2984 | |||
2985 | if (dev_addr->bound_dev_if) | ||
2986 | ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); | ||
2987 | if (!ndev) { | ||
2988 | err = -ENODEV; | ||
2989 | goto out2; | ||
2990 | } | ||
2991 | mc->multicast.ib->rec.rate = iboe_get_rate(ndev); | ||
2992 | mc->multicast.ib->rec.hop_limit = 1; | ||
2993 | mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); | ||
2994 | dev_put(ndev); | ||
2995 | if (!mc->multicast.ib->rec.mtu) { | ||
2996 | err = -EINVAL; | ||
2997 | goto out2; | ||
2998 | } | ||
2999 | iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); | ||
3000 | work->id = id_priv; | ||
3001 | work->mc = mc; | ||
3002 | INIT_WORK(&work->work, iboe_mcast_work_handler); | ||
3003 | kref_get(&mc->mcref); | ||
3004 | queue_work(cma_wq, &work->work); | ||
3005 | |||
3006 | return 0; | ||
3007 | |||
3008 | out2: | ||
3009 | kfree(mc->multicast.ib); | ||
3010 | out1: | ||
3011 | kfree(work); | ||
3012 | return err; | ||
3013 | } | ||
3014 | |||
2761 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | 3015 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
2762 | void *context) | 3016 | void *context) |
2763 | { | 3017 | { |
@@ -2784,7 +3038,17 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
2784 | 3038 | ||
2785 | switch (rdma_node_get_transport(id->device->node_type)) { | 3039 | switch (rdma_node_get_transport(id->device->node_type)) { |
2786 | case RDMA_TRANSPORT_IB: | 3040 | case RDMA_TRANSPORT_IB: |
2787 | ret = cma_join_ib_multicast(id_priv, mc); | 3041 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
3042 | case IB_LINK_LAYER_INFINIBAND: | ||
3043 | ret = cma_join_ib_multicast(id_priv, mc); | ||
3044 | break; | ||
3045 | case IB_LINK_LAYER_ETHERNET: | ||
3046 | kref_init(&mc->mcref); | ||
3047 | ret = cma_iboe_join_multicast(id_priv, mc); | ||
3048 | break; | ||
3049 | default: | ||
3050 | ret = -EINVAL; | ||
3051 | } | ||
2788 | break; | 3052 | break; |
2789 | default: | 3053 | default: |
2790 | ret = -ENOSYS; | 3054 | ret = -ENOSYS; |
@@ -2817,8 +3081,19 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2817 | ib_detach_mcast(id->qp, | 3081 | ib_detach_mcast(id->qp, |
2818 | &mc->multicast.ib->rec.mgid, | 3082 | &mc->multicast.ib->rec.mgid, |
2819 | mc->multicast.ib->rec.mlid); | 3083 | mc->multicast.ib->rec.mlid); |
2820 | ib_sa_free_multicast(mc->multicast.ib); | 3084 | if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { |
2821 | kfree(mc); | 3085 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
3086 | case IB_LINK_LAYER_INFINIBAND: | ||
3087 | ib_sa_free_multicast(mc->multicast.ib); | ||
3088 | kfree(mc); | ||
3089 | break; | ||
3090 | case IB_LINK_LAYER_ETHERNET: | ||
3091 | kref_put(&mc->mcref, release_mc); | ||
3092 | break; | ||
3093 | default: | ||
3094 | break; | ||
3095 | } | ||
3096 | } | ||
2822 | return; | 3097 | return; |
2823 | } | 3098 | } |
2824 | } | 3099 | } |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index bfead5bc25f6..2a1e9ae134b4 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
@@ -506,6 +506,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id, | |||
506 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | 506 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); |
507 | if (!qp) { | 507 | if (!qp) { |
508 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 508 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
509 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
510 | wake_up_all(&cm_id_priv->connect_wait); | ||
509 | return -EINVAL; | 511 | return -EINVAL; |
510 | } | 512 | } |
511 | cm_id->device->iwcm->add_ref(qp); | 513 | cm_id->device->iwcm->add_ref(qp); |
@@ -565,6 +567,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
565 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | 567 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); |
566 | if (!qp) { | 568 | if (!qp) { |
567 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 569 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
570 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
571 | wake_up_all(&cm_id_priv->connect_wait); | ||
568 | return -EINVAL; | 572 | return -EINVAL; |
569 | } | 573 | } |
570 | cm_id->device->iwcm->add_ref(qp); | 574 | cm_id->device->iwcm->add_ref(qp); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ef1304f151dc..822cfdcd9f78 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | |||
2598 | struct ib_mad_private *recv; | 2598 | struct ib_mad_private *recv; |
2599 | struct ib_mad_list_head *mad_list; | 2599 | struct ib_mad_list_head *mad_list; |
2600 | 2600 | ||
2601 | if (!qp_info->qp) | ||
2602 | return; | ||
2603 | |||
2601 | while (!list_empty(&qp_info->recv_queue.list)) { | 2604 | while (!list_empty(&qp_info->recv_queue.list)) { |
2602 | 2605 | ||
2603 | mad_list = list_entry(qp_info->recv_queue.list.next, | 2606 | mad_list = list_entry(qp_info->recv_queue.list.next, |
@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
2639 | 2642 | ||
2640 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2643 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
2641 | qp = port_priv->qp_info[i].qp; | 2644 | qp = port_priv->qp_info[i].qp; |
2645 | if (!qp) | ||
2646 | continue; | ||
2647 | |||
2642 | /* | 2648 | /* |
2643 | * PKey index for QP1 is irrelevant but | 2649 | * PKey index for QP1 is irrelevant but |
2644 | * one is needed for the Reset to Init transition | 2650 | * one is needed for the Reset to Init transition |
@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
2680 | } | 2686 | } |
2681 | 2687 | ||
2682 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2688 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
2689 | if (!port_priv->qp_info[i].qp) | ||
2690 | continue; | ||
2691 | |||
2683 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | 2692 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
2684 | if (ret) { | 2693 | if (ret) { |
2685 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | 2694 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); |
@@ -2758,6 +2767,9 @@ error: | |||
2758 | 2767 | ||
2759 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) | 2768 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) |
2760 | { | 2769 | { |
2770 | if (!qp_info->qp) | ||
2771 | return; | ||
2772 | |||
2761 | ib_destroy_qp(qp_info->qp); | 2773 | ib_destroy_qp(qp_info->qp); |
2762 | kfree(qp_info->snoop_table); | 2774 | kfree(qp_info->snoop_table); |
2763 | } | 2775 | } |
@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2773 | struct ib_mad_port_private *port_priv; | 2785 | struct ib_mad_port_private *port_priv; |
2774 | unsigned long flags; | 2786 | unsigned long flags; |
2775 | char name[sizeof "ib_mad123"]; | 2787 | char name[sizeof "ib_mad123"]; |
2788 | int has_smi; | ||
2776 | 2789 | ||
2777 | /* Create new device info */ | 2790 | /* Create new device info */ |
2778 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 2791 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2788 | init_mad_qp(port_priv, &port_priv->qp_info[0]); | 2801 | init_mad_qp(port_priv, &port_priv->qp_info[0]); |
2789 | init_mad_qp(port_priv, &port_priv->qp_info[1]); | 2802 | init_mad_qp(port_priv, &port_priv->qp_info[1]); |
2790 | 2803 | ||
2791 | cq_size = (mad_sendq_size + mad_recvq_size) * 2; | 2804 | cq_size = mad_sendq_size + mad_recvq_size; |
2805 | has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; | ||
2806 | if (has_smi) | ||
2807 | cq_size *= 2; | ||
2808 | |||
2792 | port_priv->cq = ib_create_cq(port_priv->device, | 2809 | port_priv->cq = ib_create_cq(port_priv->device, |
2793 | ib_mad_thread_completion_handler, | 2810 | ib_mad_thread_completion_handler, |
2794 | NULL, port_priv, cq_size, 0); | 2811 | NULL, port_priv, cq_size, 0); |
@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device, | |||
2812 | goto error5; | 2829 | goto error5; |
2813 | } | 2830 | } |
2814 | 2831 | ||
2815 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | 2832 | if (has_smi) { |
2816 | if (ret) | 2833 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); |
2817 | goto error6; | 2834 | if (ret) |
2835 | goto error6; | ||
2836 | } | ||
2818 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); | 2837 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); |
2819 | if (ret) | 2838 | if (ret) |
2820 | goto error7; | 2839 | goto error7; |
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index a519801dcfb7..68b4162fd9d2 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler, | |||
774 | int index; | 774 | int index; |
775 | 775 | ||
776 | dev = container_of(handler, struct mcast_device, event_handler); | 776 | dev = container_of(handler, struct mcast_device, event_handler); |
777 | if (rdma_port_get_link_layer(dev->device, event->element.port_num) != | ||
778 | IB_LINK_LAYER_INFINIBAND) | ||
779 | return; | ||
780 | |||
777 | index = event->element.port_num - dev->start_port; | 781 | index = event->element.port_num - dev->start_port; |
778 | 782 | ||
779 | switch (event->event) { | 783 | switch (event->event) { |
@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device) | |||
796 | struct mcast_device *dev; | 800 | struct mcast_device *dev; |
797 | struct mcast_port *port; | 801 | struct mcast_port *port; |
798 | int i; | 802 | int i; |
803 | int count = 0; | ||
799 | 804 | ||
800 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | 805 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) |
801 | return; | 806 | return; |
@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device) | |||
813 | } | 818 | } |
814 | 819 | ||
815 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | 820 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { |
821 | if (rdma_port_get_link_layer(device, dev->start_port + i) != | ||
822 | IB_LINK_LAYER_INFINIBAND) | ||
823 | continue; | ||
816 | port = &dev->port[i]; | 824 | port = &dev->port[i]; |
817 | port->dev = dev; | 825 | port->dev = dev; |
818 | port->port_num = dev->start_port + i; | 826 | port->port_num = dev->start_port + i; |
@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device) | |||
820 | port->table = RB_ROOT; | 828 | port->table = RB_ROOT; |
821 | init_completion(&port->comp); | 829 | init_completion(&port->comp); |
822 | atomic_set(&port->refcount, 1); | 830 | atomic_set(&port->refcount, 1); |
831 | ++count; | ||
832 | } | ||
833 | |||
834 | if (!count) { | ||
835 | kfree(dev); | ||
836 | return; | ||
823 | } | 837 | } |
824 | 838 | ||
825 | dev->device = device; | 839 | dev->device = device; |
@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device) | |||
843 | flush_workqueue(mcast_wq); | 857 | flush_workqueue(mcast_wq); |
844 | 858 | ||
845 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | 859 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { |
846 | port = &dev->port[i]; | 860 | if (rdma_port_get_link_layer(device, dev->start_port + i) == |
847 | deref_port(port); | 861 | IB_LINK_LAYER_INFINIBAND) { |
848 | wait_for_completion(&port->comp); | 862 | port = &dev->port[i]; |
863 | deref_port(port); | ||
864 | wait_for_completion(&port->comp); | ||
865 | } | ||
849 | } | 866 | } |
850 | 867 | ||
851 | kfree(dev); | 868 | kfree(dev); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7e1ffd8ccd5c..91a660310b7c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
416 | struct ib_sa_port *port = | 416 | struct ib_sa_port *port = |
417 | &sa_dev->port[event->element.port_num - sa_dev->start_port]; | 417 | &sa_dev->port[event->element.port_num - sa_dev->start_port]; |
418 | 418 | ||
419 | if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND) | ||
420 | return; | ||
421 | |||
419 | spin_lock_irqsave(&port->ah_lock, flags); | 422 | spin_lock_irqsave(&port->ah_lock, flags); |
420 | if (port->sm_ah) | 423 | if (port->sm_ah) |
421 | kref_put(&port->sm_ah->ref, free_sm_ah); | 424 | kref_put(&port->sm_ah->ref, free_sm_ah); |
@@ -493,6 +496,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
493 | { | 496 | { |
494 | int ret; | 497 | int ret; |
495 | u16 gid_index; | 498 | u16 gid_index; |
499 | int force_grh; | ||
496 | 500 | ||
497 | memset(ah_attr, 0, sizeof *ah_attr); | 501 | memset(ah_attr, 0, sizeof *ah_attr); |
498 | ah_attr->dlid = be16_to_cpu(rec->dlid); | 502 | ah_attr->dlid = be16_to_cpu(rec->dlid); |
@@ -502,7 +506,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
502 | ah_attr->port_num = port_num; | 506 | ah_attr->port_num = port_num; |
503 | ah_attr->static_rate = rec->rate; | 507 | ah_attr->static_rate = rec->rate; |
504 | 508 | ||
505 | if (rec->hop_limit > 1) { | 509 | force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET; |
510 | |||
511 | if (rec->hop_limit > 1 || force_grh) { | ||
506 | ah_attr->ah_flags = IB_AH_GRH; | 512 | ah_attr->ah_flags = IB_AH_GRH; |
507 | ah_attr->grh.dgid = rec->dgid; | 513 | ah_attr->grh.dgid = rec->dgid; |
508 | 514 | ||
@@ -1007,7 +1013,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1007 | e = device->phys_port_cnt; | 1013 | e = device->phys_port_cnt; |
1008 | } | 1014 | } |
1009 | 1015 | ||
1010 | sa_dev = kmalloc(sizeof *sa_dev + | 1016 | sa_dev = kzalloc(sizeof *sa_dev + |
1011 | (e - s + 1) * sizeof (struct ib_sa_port), | 1017 | (e - s + 1) * sizeof (struct ib_sa_port), |
1012 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
1013 | if (!sa_dev) | 1019 | if (!sa_dev) |
@@ -1017,9 +1023,12 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1017 | sa_dev->end_port = e; | 1023 | sa_dev->end_port = e; |
1018 | 1024 | ||
1019 | for (i = 0; i <= e - s; ++i) { | 1025 | for (i = 0; i <= e - s; ++i) { |
1026 | spin_lock_init(&sa_dev->port[i].ah_lock); | ||
1027 | if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND) | ||
1028 | continue; | ||
1029 | |||
1020 | sa_dev->port[i].sm_ah = NULL; | 1030 | sa_dev->port[i].sm_ah = NULL; |
1021 | sa_dev->port[i].port_num = i + s; | 1031 | sa_dev->port[i].port_num = i + s; |
1022 | spin_lock_init(&sa_dev->port[i].ah_lock); | ||
1023 | 1032 | ||
1024 | sa_dev->port[i].agent = | 1033 | sa_dev->port[i].agent = |
1025 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | 1034 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, |
@@ -1045,13 +1054,15 @@ static void ib_sa_add_one(struct ib_device *device) | |||
1045 | goto err; | 1054 | goto err; |
1046 | 1055 | ||
1047 | for (i = 0; i <= e - s; ++i) | 1056 | for (i = 0; i <= e - s; ++i) |
1048 | update_sm_ah(&sa_dev->port[i].update_task); | 1057 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) |
1058 | update_sm_ah(&sa_dev->port[i].update_task); | ||
1049 | 1059 | ||
1050 | return; | 1060 | return; |
1051 | 1061 | ||
1052 | err: | 1062 | err: |
1053 | while (--i >= 0) | 1063 | while (--i >= 0) |
1054 | ib_unregister_mad_agent(sa_dev->port[i].agent); | 1064 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) |
1065 | ib_unregister_mad_agent(sa_dev->port[i].agent); | ||
1055 | 1066 | ||
1056 | kfree(sa_dev); | 1067 | kfree(sa_dev); |
1057 | 1068 | ||
@@ -1071,9 +1082,12 @@ static void ib_sa_remove_one(struct ib_device *device) | |||
1071 | flush_scheduled_work(); | 1082 | flush_scheduled_work(); |
1072 | 1083 | ||
1073 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { | 1084 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { |
1074 | ib_unregister_mad_agent(sa_dev->port[i].agent); | 1085 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { |
1075 | if (sa_dev->port[i].sm_ah) | 1086 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
1076 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | 1087 | if (sa_dev->port[i].sm_ah) |
1088 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | ||
1089 | } | ||
1090 | |||
1077 | } | 1091 | } |
1078 | 1092 | ||
1079 | kfree(sa_dev); | 1093 | kfree(sa_dev); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 3627300e2a10..9ab5df72df7b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
@@ -222,6 +222,19 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, | |||
222 | } | 222 | } |
223 | } | 223 | } |
224 | 224 | ||
225 | static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, | ||
226 | char *buf) | ||
227 | { | ||
228 | switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) { | ||
229 | case IB_LINK_LAYER_INFINIBAND: | ||
230 | return sprintf(buf, "%s\n", "InfiniBand"); | ||
231 | case IB_LINK_LAYER_ETHERNET: | ||
232 | return sprintf(buf, "%s\n", "Ethernet"); | ||
233 | default: | ||
234 | return sprintf(buf, "%s\n", "Unknown"); | ||
235 | } | ||
236 | } | ||
237 | |||
225 | static PORT_ATTR_RO(state); | 238 | static PORT_ATTR_RO(state); |
226 | static PORT_ATTR_RO(lid); | 239 | static PORT_ATTR_RO(lid); |
227 | static PORT_ATTR_RO(lid_mask_count); | 240 | static PORT_ATTR_RO(lid_mask_count); |
@@ -230,6 +243,7 @@ static PORT_ATTR_RO(sm_sl); | |||
230 | static PORT_ATTR_RO(cap_mask); | 243 | static PORT_ATTR_RO(cap_mask); |
231 | static PORT_ATTR_RO(rate); | 244 | static PORT_ATTR_RO(rate); |
232 | static PORT_ATTR_RO(phys_state); | 245 | static PORT_ATTR_RO(phys_state); |
246 | static PORT_ATTR_RO(link_layer); | ||
233 | 247 | ||
234 | static struct attribute *port_default_attrs[] = { | 248 | static struct attribute *port_default_attrs[] = { |
235 | &port_attr_state.attr, | 249 | &port_attr_state.attr, |
@@ -240,6 +254,7 @@ static struct attribute *port_default_attrs[] = { | |||
240 | &port_attr_cap_mask.attr, | 254 | &port_attr_cap_mask.attr, |
241 | &port_attr_rate.attr, | 255 | &port_attr_rate.attr, |
242 | &port_attr_phys_state.attr, | 256 | &port_attr_phys_state.attr, |
257 | &port_attr_link_layer.attr, | ||
243 | NULL | 258 | NULL |
244 | }; | 259 | }; |
245 | 260 | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ac7edc24165c..ca12acf38379 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/in6.h> | 40 | #include <linux/in6.h> |
41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
43 | #include <linux/sysctl.h> | ||
43 | 44 | ||
44 | #include <rdma/rdma_user_cm.h> | 45 | #include <rdma/rdma_user_cm.h> |
45 | #include <rdma/ib_marshall.h> | 46 | #include <rdma/ib_marshall.h> |
@@ -50,8 +51,24 @@ MODULE_AUTHOR("Sean Hefty"); | |||
50 | MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); | 51 | MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); |
51 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
52 | 53 | ||
53 | enum { | 54 | static unsigned int max_backlog = 1024; |
54 | UCMA_MAX_BACKLOG = 128 | 55 | |
56 | static struct ctl_table_header *ucma_ctl_table_hdr; | ||
57 | static ctl_table ucma_ctl_table[] = { | ||
58 | { | ||
59 | .procname = "max_backlog", | ||
60 | .data = &max_backlog, | ||
61 | .maxlen = sizeof max_backlog, | ||
62 | .mode = 0644, | ||
63 | .proc_handler = proc_dointvec, | ||
64 | }, | ||
65 | { } | ||
66 | }; | ||
67 | |||
68 | static struct ctl_path ucma_ctl_path[] = { | ||
69 | { .procname = "net" }, | ||
70 | { .procname = "rdma_ucm" }, | ||
71 | { } | ||
55 | }; | 72 | }; |
56 | 73 | ||
57 | struct ucma_file { | 74 | struct ucma_file { |
@@ -583,6 +600,42 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, | |||
583 | } | 600 | } |
584 | } | 601 | } |
585 | 602 | ||
603 | static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, | ||
604 | struct rdma_route *route) | ||
605 | { | ||
606 | struct rdma_dev_addr *dev_addr; | ||
607 | struct net_device *dev; | ||
608 | u16 vid = 0; | ||
609 | |||
610 | resp->num_paths = route->num_paths; | ||
611 | switch (route->num_paths) { | ||
612 | case 0: | ||
613 | dev_addr = &route->addr.dev_addr; | ||
614 | dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); | ||
615 | if (dev) { | ||
616 | vid = rdma_vlan_dev_vlan_id(dev); | ||
617 | dev_put(dev); | ||
618 | } | ||
619 | |||
620 | iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid, | ||
621 | dev_addr->dst_dev_addr, vid); | ||
622 | iboe_addr_get_sgid(dev_addr, | ||
623 | (union ib_gid *) &resp->ib_route[0].sgid); | ||
624 | resp->ib_route[0].pkey = cpu_to_be16(0xffff); | ||
625 | break; | ||
626 | case 2: | ||
627 | ib_copy_path_rec_to_user(&resp->ib_route[1], | ||
628 | &route->path_rec[1]); | ||
629 | /* fall through */ | ||
630 | case 1: | ||
631 | ib_copy_path_rec_to_user(&resp->ib_route[0], | ||
632 | &route->path_rec[0]); | ||
633 | break; | ||
634 | default: | ||
635 | break; | ||
636 | } | ||
637 | } | ||
638 | |||
586 | static ssize_t ucma_query_route(struct ucma_file *file, | 639 | static ssize_t ucma_query_route(struct ucma_file *file, |
587 | const char __user *inbuf, | 640 | const char __user *inbuf, |
588 | int in_len, int out_len) | 641 | int in_len, int out_len) |
@@ -617,12 +670,17 @@ static ssize_t ucma_query_route(struct ucma_file *file, | |||
617 | 670 | ||
618 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; | 671 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; |
619 | resp.port_num = ctx->cm_id->port_num; | 672 | resp.port_num = ctx->cm_id->port_num; |
620 | switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { | 673 | if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) { |
621 | case RDMA_TRANSPORT_IB: | 674 | switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) { |
622 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); | 675 | case IB_LINK_LAYER_INFINIBAND: |
623 | break; | 676 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); |
624 | default: | 677 | break; |
625 | break; | 678 | case IB_LINK_LAYER_ETHERNET: |
679 | ucma_copy_iboe_route(&resp, &ctx->cm_id->route); | ||
680 | break; | ||
681 | default: | ||
682 | break; | ||
683 | } | ||
626 | } | 684 | } |
627 | 685 | ||
628 | out: | 686 | out: |
@@ -686,8 +744,8 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, | |||
686 | if (IS_ERR(ctx)) | 744 | if (IS_ERR(ctx)) |
687 | return PTR_ERR(ctx); | 745 | return PTR_ERR(ctx); |
688 | 746 | ||
689 | ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ? | 747 | ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? |
690 | cmd.backlog : UCMA_MAX_BACKLOG; | 748 | cmd.backlog : max_backlog; |
691 | ret = rdma_listen(ctx->cm_id, ctx->backlog); | 749 | ret = rdma_listen(ctx->cm_id, ctx->backlog); |
692 | ucma_put_ctx(ctx); | 750 | ucma_put_ctx(ctx); |
693 | return ret; | 751 | return ret; |
@@ -1279,16 +1337,26 @@ static int __init ucma_init(void) | |||
1279 | ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); | 1337 | ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); |
1280 | if (ret) { | 1338 | if (ret) { |
1281 | printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); | 1339 | printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); |
1282 | goto err; | 1340 | goto err1; |
1341 | } | ||
1342 | |||
1343 | ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); | ||
1344 | if (!ucma_ctl_table_hdr) { | ||
1345 | printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); | ||
1346 | ret = -ENOMEM; | ||
1347 | goto err2; | ||
1283 | } | 1348 | } |
1284 | return 0; | 1349 | return 0; |
1285 | err: | 1350 | err2: |
1351 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | ||
1352 | err1: | ||
1286 | misc_deregister(&ucma_misc); | 1353 | misc_deregister(&ucma_misc); |
1287 | return ret; | 1354 | return ret; |
1288 | } | 1355 | } |
1289 | 1356 | ||
1290 | static void __exit ucma_cleanup(void) | 1357 | static void __exit ucma_cleanup(void) |
1291 | { | 1358 | { |
1359 | unregister_sysctl_table(ucma_ctl_table_hdr); | ||
1292 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | 1360 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); |
1293 | misc_deregister(&ucma_misc); | 1361 | misc_deregister(&ucma_misc); |
1294 | idr_destroy(&ctx_idr); | 1362 | idr_destroy(&ctx_idr); |
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index 650b501eb142..bb7e19280821 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c | |||
@@ -33,6 +33,7 @@ | |||
33 | 33 | ||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
36 | #include <linux/if_ether.h> | ||
36 | 37 | ||
37 | #include <rdma/ib_pack.h> | 38 | #include <rdma/ib_pack.h> |
38 | 39 | ||
@@ -80,6 +81,40 @@ static const struct ib_field lrh_table[] = { | |||
80 | .size_bits = 16 } | 81 | .size_bits = 16 } |
81 | }; | 82 | }; |
82 | 83 | ||
84 | static const struct ib_field eth_table[] = { | ||
85 | { STRUCT_FIELD(eth, dmac_h), | ||
86 | .offset_words = 0, | ||
87 | .offset_bits = 0, | ||
88 | .size_bits = 32 }, | ||
89 | { STRUCT_FIELD(eth, dmac_l), | ||
90 | .offset_words = 1, | ||
91 | .offset_bits = 0, | ||
92 | .size_bits = 16 }, | ||
93 | { STRUCT_FIELD(eth, smac_h), | ||
94 | .offset_words = 1, | ||
95 | .offset_bits = 16, | ||
96 | .size_bits = 16 }, | ||
97 | { STRUCT_FIELD(eth, smac_l), | ||
98 | .offset_words = 2, | ||
99 | .offset_bits = 0, | ||
100 | .size_bits = 32 }, | ||
101 | { STRUCT_FIELD(eth, type), | ||
102 | .offset_words = 3, | ||
103 | .offset_bits = 0, | ||
104 | .size_bits = 16 } | ||
105 | }; | ||
106 | |||
107 | static const struct ib_field vlan_table[] = { | ||
108 | { STRUCT_FIELD(vlan, tag), | ||
109 | .offset_words = 0, | ||
110 | .offset_bits = 0, | ||
111 | .size_bits = 16 }, | ||
112 | { STRUCT_FIELD(vlan, type), | ||
113 | .offset_words = 0, | ||
114 | .offset_bits = 16, | ||
115 | .size_bits = 16 } | ||
116 | }; | ||
117 | |||
83 | static const struct ib_field grh_table[] = { | 118 | static const struct ib_field grh_table[] = { |
84 | { STRUCT_FIELD(grh, ip_version), | 119 | { STRUCT_FIELD(grh, ip_version), |
85 | .offset_words = 0, | 120 | .offset_words = 0, |
@@ -180,38 +215,43 @@ static const struct ib_field deth_table[] = { | |||
180 | /** | 215 | /** |
181 | * ib_ud_header_init - Initialize UD header structure | 216 | * ib_ud_header_init - Initialize UD header structure |
182 | * @payload_bytes:Length of packet payload | 217 | * @payload_bytes:Length of packet payload |
218 | * @lrh_present: specify if LRH is present | ||
219 | * @eth_present: specify if Eth header is present | ||
220 | * @vlan_present: packet is tagged vlan | ||
183 | * @grh_present:GRH flag (if non-zero, GRH will be included) | 221 | * @grh_present:GRH flag (if non-zero, GRH will be included) |
184 | * @immediate_present: specify if immediate data should be used | 222 | * @immediate_present: specify if immediate data is present |
185 | * @header:Structure to initialize | 223 | * @header:Structure to initialize |
186 | * | ||
187 | * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, | ||
188 | * lrh.packet_length, grh.ip_version, grh.payload_length, | ||
189 | * grh.next_header, bth.opcode, bth.pad_count and | ||
190 | * bth.transport_header_version fields of a &struct ib_ud_header given | ||
191 | * the payload length and whether a GRH will be included. | ||
192 | */ | 224 | */ |
193 | void ib_ud_header_init(int payload_bytes, | 225 | void ib_ud_header_init(int payload_bytes, |
226 | int lrh_present, | ||
227 | int eth_present, | ||
228 | int vlan_present, | ||
194 | int grh_present, | 229 | int grh_present, |
195 | int immediate_present, | 230 | int immediate_present, |
196 | struct ib_ud_header *header) | 231 | struct ib_ud_header *header) |
197 | { | 232 | { |
198 | u16 packet_length; | ||
199 | |||
200 | memset(header, 0, sizeof *header); | 233 | memset(header, 0, sizeof *header); |
201 | 234 | ||
202 | header->lrh.link_version = 0; | 235 | if (lrh_present) { |
203 | header->lrh.link_next_header = | 236 | u16 packet_length; |
204 | grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; | 237 | |
205 | packet_length = (IB_LRH_BYTES + | 238 | header->lrh.link_version = 0; |
206 | IB_BTH_BYTES + | 239 | header->lrh.link_next_header = |
207 | IB_DETH_BYTES + | 240 | grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; |
208 | payload_bytes + | 241 | packet_length = (IB_LRH_BYTES + |
209 | 4 + /* ICRC */ | 242 | IB_BTH_BYTES + |
210 | 3) / 4; /* round up */ | 243 | IB_DETH_BYTES + |
211 | 244 | (grh_present ? IB_GRH_BYTES : 0) + | |
212 | header->grh_present = grh_present; | 245 | payload_bytes + |
246 | 4 + /* ICRC */ | ||
247 | 3) / 4; /* round up */ | ||
248 | header->lrh.packet_length = cpu_to_be16(packet_length); | ||
249 | } | ||
250 | |||
251 | if (vlan_present) | ||
252 | header->eth.type = cpu_to_be16(ETH_P_8021Q); | ||
253 | |||
213 | if (grh_present) { | 254 | if (grh_present) { |
214 | packet_length += IB_GRH_BYTES / 4; | ||
215 | header->grh.ip_version = 6; | 255 | header->grh.ip_version = 6; |
216 | header->grh.payload_length = | 256 | header->grh.payload_length = |
217 | cpu_to_be16((IB_BTH_BYTES + | 257 | cpu_to_be16((IB_BTH_BYTES + |
@@ -222,19 +262,52 @@ void ib_ud_header_init(int payload_bytes, | |||
222 | header->grh.next_header = 0x1b; | 262 | header->grh.next_header = 0x1b; |
223 | } | 263 | } |
224 | 264 | ||
225 | header->lrh.packet_length = cpu_to_be16(packet_length); | ||
226 | |||
227 | header->immediate_present = immediate_present; | ||
228 | if (immediate_present) | 265 | if (immediate_present) |
229 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | 266 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
230 | else | 267 | else |
231 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; | 268 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
232 | header->bth.pad_count = (4 - payload_bytes) & 3; | 269 | header->bth.pad_count = (4 - payload_bytes) & 3; |
233 | header->bth.transport_header_version = 0; | 270 | header->bth.transport_header_version = 0; |
271 | |||
272 | header->lrh_present = lrh_present; | ||
273 | header->eth_present = eth_present; | ||
274 | header->vlan_present = vlan_present; | ||
275 | header->grh_present = grh_present; | ||
276 | header->immediate_present = immediate_present; | ||
234 | } | 277 | } |
235 | EXPORT_SYMBOL(ib_ud_header_init); | 278 | EXPORT_SYMBOL(ib_ud_header_init); |
236 | 279 | ||
237 | /** | 280 | /** |
281 | * ib_lrh_header_pack - Pack LRH header struct into wire format | ||
282 | * @lrh:unpacked LRH header struct | ||
283 | * @buf:Buffer to pack into | ||
284 | * | ||
285 | * ib_lrh_header_pack() packs the LRH header structure @lrh into | ||
286 | * wire format in the buffer @buf. | ||
287 | */ | ||
288 | int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf) | ||
289 | { | ||
290 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf); | ||
291 | return 0; | ||
292 | } | ||
293 | EXPORT_SYMBOL(ib_lrh_header_pack); | ||
294 | |||
295 | /** | ||
296 | * ib_lrh_header_unpack - Unpack LRH structure from wire format | ||
297 | * @lrh:unpacked LRH header struct | ||
298 | * @buf:Buffer to pack into | ||
299 | * | ||
300 | * ib_lrh_header_unpack() unpacks the LRH header structure from | ||
301 | * wire format (in buf) into @lrh. | ||
302 | */ | ||
303 | int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh) | ||
304 | { | ||
305 | ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh); | ||
306 | return 0; | ||
307 | } | ||
308 | EXPORT_SYMBOL(ib_lrh_header_unpack); | ||
309 | |||
310 | /** | ||
238 | * ib_ud_header_pack - Pack UD header struct into wire format | 311 | * ib_ud_header_pack - Pack UD header struct into wire format |
239 | * @header:UD header struct | 312 | * @header:UD header struct |
240 | * @buf:Buffer to pack into | 313 | * @buf:Buffer to pack into |
@@ -247,10 +320,21 @@ int ib_ud_header_pack(struct ib_ud_header *header, | |||
247 | { | 320 | { |
248 | int len = 0; | 321 | int len = 0; |
249 | 322 | ||
250 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), | 323 | if (header->lrh_present) { |
251 | &header->lrh, buf); | 324 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), |
252 | len += IB_LRH_BYTES; | 325 | &header->lrh, buf + len); |
253 | 326 | len += IB_LRH_BYTES; | |
327 | } | ||
328 | if (header->eth_present) { | ||
329 | ib_pack(eth_table, ARRAY_SIZE(eth_table), | ||
330 | &header->eth, buf + len); | ||
331 | len += IB_ETH_BYTES; | ||
332 | } | ||
333 | if (header->vlan_present) { | ||
334 | ib_pack(vlan_table, ARRAY_SIZE(vlan_table), | ||
335 | &header->vlan, buf + len); | ||
336 | len += IB_VLAN_BYTES; | ||
337 | } | ||
254 | if (header->grh_present) { | 338 | if (header->grh_present) { |
255 | ib_pack(grh_table, ARRAY_SIZE(grh_table), | 339 | ib_pack(grh_table, ARRAY_SIZE(grh_table), |
256 | &header->grh, buf + len); | 340 | &header->grh, buf + len); |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 5fa856909511..cd1996d0ad08 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1022,7 +1022,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
1022 | 1022 | ||
1023 | port->ib_dev = device; | 1023 | port->ib_dev = device; |
1024 | port->port_num = port_num; | 1024 | port->port_num = port_num; |
1025 | init_MUTEX(&port->sm_sem); | 1025 | sema_init(&port->sm_sem, 1); |
1026 | mutex_init(&port->file_mutex); | 1026 | mutex_init(&port->file_mutex); |
1027 | INIT_LIST_HEAD(&port->file_list); | 1027 | INIT_LIST_HEAD(&port->file_list); |
1028 | 1028 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6fcfbeb24a23..b342248aec05 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
@@ -460,6 +460,8 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, | |||
460 | resp.active_width = attr.active_width; | 460 | resp.active_width = attr.active_width; |
461 | resp.active_speed = attr.active_speed; | 461 | resp.active_speed = attr.active_speed; |
462 | resp.phys_state = attr.phys_state; | 462 | resp.phys_state = attr.phys_state; |
463 | resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, | ||
464 | cmd.port_num); | ||
463 | 465 | ||
464 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 466 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
465 | &resp, sizeof resp)) | 467 | &resp, sizeof resp)) |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index e0fa22238715..af7a8b08b2e9 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
@@ -94,6 +94,22 @@ rdma_node_get_transport(enum rdma_node_type node_type) | |||
94 | } | 94 | } |
95 | EXPORT_SYMBOL(rdma_node_get_transport); | 95 | EXPORT_SYMBOL(rdma_node_get_transport); |
96 | 96 | ||
97 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) | ||
98 | { | ||
99 | if (device->get_link_layer) | ||
100 | return device->get_link_layer(device, port_num); | ||
101 | |||
102 | switch (rdma_node_get_transport(device->node_type)) { | ||
103 | case RDMA_TRANSPORT_IB: | ||
104 | return IB_LINK_LAYER_INFINIBAND; | ||
105 | case RDMA_TRANSPORT_IWARP: | ||
106 | return IB_LINK_LAYER_ETHERNET; | ||
107 | default: | ||
108 | return IB_LINK_LAYER_UNSPECIFIED; | ||
109 | } | ||
110 | } | ||
111 | EXPORT_SYMBOL(rdma_port_get_link_layer); | ||
112 | |||
97 | /* Protection domains */ | 113 | /* Protection domains */ |
98 | 114 | ||
99 | struct ib_pd *ib_alloc_pd(struct ib_device *device) | 115 | struct ib_pd *ib_alloc_pd(struct ib_device *device) |