diff options
73 files changed, 2641 insertions, 719 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 668682d1f5fa..f94cfc6f4c5d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -2951,7 +2951,7 @@ M: Roland Dreier <rolandd@cisco.com> | |||
| 2951 | M: Sean Hefty <sean.hefty@intel.com> | 2951 | M: Sean Hefty <sean.hefty@intel.com> |
| 2952 | M: Hal Rosenstock <hal.rosenstock@gmail.com> | 2952 | M: Hal Rosenstock <hal.rosenstock@gmail.com> |
| 2953 | L: linux-rdma@vger.kernel.org | 2953 | L: linux-rdma@vger.kernel.org |
| 2954 | W: http://www.openib.org/ | 2954 | W: http://www.openfabrics.org/ |
| 2955 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 2955 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
| 2956 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git | 2956 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git |
| 2957 | S: Supported | 2957 | S: Supported |
| @@ -5035,6 +5035,16 @@ W: http://www.kernel.dk | |||
| 5035 | S: Maintained | 5035 | S: Maintained |
| 5036 | F: drivers/scsi/sr* | 5036 | F: drivers/scsi/sr* |
| 5037 | 5037 | ||
| 5038 | SCSI RDMA PROTOCOL (SRP) INITIATOR | ||
| 5039 | M: David Dillow <dillowda@ornl.gov> | ||
| 5040 | L: linux-rdma@vger.kernel.org | ||
| 5041 | S: Supported | ||
| 5042 | W: http://www.openfabrics.org | ||
| 5043 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | ||
| 5044 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dad/srp-initiator.git | ||
| 5045 | F: drivers/infiniband/ulp/srp/ | ||
| 5046 | F: include/scsi/srp.h | ||
| 5047 | |||
| 5038 | SCSI SG DRIVER | 5048 | SCSI SG DRIVER |
| 5039 | M: Doug Gilbert <dgilbert@interlog.com> | 5049 | M: Doug Gilbert <dgilbert@interlog.com> |
| 5040 | L: linux-scsi@vger.kernel.org | 5050 | L: linux-scsi@vger.kernel.org |
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index ae7c2880e624..91916a8d5de4 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c | |||
| @@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num) | |||
| 59 | struct ib_agent_port_private *entry; | 59 | struct ib_agent_port_private *entry; |
| 60 | 60 | ||
| 61 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { | 61 | list_for_each_entry(entry, &ib_agent_port_list, port_list) { |
| 62 | if (entry->agent[0]->device == device && | 62 | if (entry->agent[1]->device == device && |
| 63 | entry->agent[0]->port_num == port_num) | 63 | entry->agent[1]->port_num == port_num) |
| 64 | return entry; | 64 | return entry; |
| 65 | } | 65 | } |
| 66 | return NULL; | 66 | return NULL; |
| @@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 155 | goto error1; | 155 | goto error1; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | /* Obtain send only MAD agent for SMI QP */ | 158 | if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) { |
| 159 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, | 159 | /* Obtain send only MAD agent for SMI QP */ |
| 160 | IB_QPT_SMI, NULL, 0, | 160 | port_priv->agent[0] = ib_register_mad_agent(device, port_num, |
| 161 | &agent_send_handler, | 161 | IB_QPT_SMI, NULL, 0, |
| 162 | NULL, NULL); | 162 | &agent_send_handler, |
| 163 | if (IS_ERR(port_priv->agent[0])) { | 163 | NULL, NULL); |
| 164 | ret = PTR_ERR(port_priv->agent[0]); | 164 | if (IS_ERR(port_priv->agent[0])) { |
| 165 | goto error2; | 165 | ret = PTR_ERR(port_priv->agent[0]); |
| 166 | goto error2; | ||
| 167 | } | ||
| 166 | } | 168 | } |
| 167 | 169 | ||
| 168 | /* Obtain send only MAD agent for GSI QP */ | 170 | /* Obtain send only MAD agent for GSI QP */ |
| @@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num) | |||
| 182 | return 0; | 184 | return 0; |
| 183 | 185 | ||
| 184 | error3: | 186 | error3: |
| 185 | ib_unregister_mad_agent(port_priv->agent[0]); | 187 | if (port_priv->agent[0]) |
| 188 | ib_unregister_mad_agent(port_priv->agent[0]); | ||
| 186 | error2: | 189 | error2: |
| 187 | kfree(port_priv); | 190 | kfree(port_priv); |
| 188 | error1: | 191 | error1: |
| @@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num) | |||
| 205 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); | 208 | spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); |
| 206 | 209 | ||
| 207 | ib_unregister_mad_agent(port_priv->agent[1]); | 210 | ib_unregister_mad_agent(port_priv->agent[1]); |
| 208 | ib_unregister_mad_agent(port_priv->agent[0]); | 211 | if (port_priv->agent[0]) |
| 212 | ib_unregister_mad_agent(port_priv->agent[0]); | ||
| 213 | |||
| 209 | kfree(port_priv); | 214 | kfree(port_priv); |
| 210 | return 0; | 215 | return 0; |
| 211 | } | 216 | } |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b930b8110a63..6884da24fde1 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -59,6 +59,7 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
| 59 | #define CMA_CM_RESPONSE_TIMEOUT 20 | 59 | #define CMA_CM_RESPONSE_TIMEOUT 20 |
| 60 | #define CMA_MAX_CM_RETRIES 15 | 60 | #define CMA_MAX_CM_RETRIES 15 |
| 61 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) | 61 | #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) |
| 62 | #define CMA_IBOE_PACKET_LIFETIME 18 | ||
| 62 | 63 | ||
| 63 | static void cma_add_one(struct ib_device *device); | 64 | static void cma_add_one(struct ib_device *device); |
| 64 | static void cma_remove_one(struct ib_device *device); | 65 | static void cma_remove_one(struct ib_device *device); |
| @@ -157,6 +158,7 @@ struct cma_multicast { | |||
| 157 | struct list_head list; | 158 | struct list_head list; |
| 158 | void *context; | 159 | void *context; |
| 159 | struct sockaddr_storage addr; | 160 | struct sockaddr_storage addr; |
| 161 | struct kref mcref; | ||
| 160 | }; | 162 | }; |
| 161 | 163 | ||
| 162 | struct cma_work { | 164 | struct cma_work { |
| @@ -173,6 +175,12 @@ struct cma_ndev_work { | |||
| 173 | struct rdma_cm_event event; | 175 | struct rdma_cm_event event; |
| 174 | }; | 176 | }; |
| 175 | 177 | ||
| 178 | struct iboe_mcast_work { | ||
| 179 | struct work_struct work; | ||
| 180 | struct rdma_id_private *id; | ||
| 181 | struct cma_multicast *mc; | ||
| 182 | }; | ||
| 183 | |||
| 176 | union cma_ip_addr { | 184 | union cma_ip_addr { |
| 177 | struct in6_addr ip6; | 185 | struct in6_addr ip6; |
| 178 | struct { | 186 | struct { |
| @@ -281,6 +289,8 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, | |||
| 281 | atomic_inc(&cma_dev->refcount); | 289 | atomic_inc(&cma_dev->refcount); |
| 282 | id_priv->cma_dev = cma_dev; | 290 | id_priv->cma_dev = cma_dev; |
| 283 | id_priv->id.device = cma_dev->device; | 291 | id_priv->id.device = cma_dev->device; |
| 292 | id_priv->id.route.addr.dev_addr.transport = | ||
| 293 | rdma_node_get_transport(cma_dev->device->node_type); | ||
| 284 | list_add_tail(&id_priv->list, &cma_dev->id_list); | 294 | list_add_tail(&id_priv->list, &cma_dev->id_list); |
| 285 | } | 295 | } |
| 286 | 296 | ||
| @@ -290,6 +300,14 @@ static inline void cma_deref_dev(struct cma_device *cma_dev) | |||
| 290 | complete(&cma_dev->comp); | 300 | complete(&cma_dev->comp); |
| 291 | } | 301 | } |
| 292 | 302 | ||
| 303 | static inline void release_mc(struct kref *kref) | ||
| 304 | { | ||
| 305 | struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); | ||
| 306 | |||
| 307 | kfree(mc->multicast.ib); | ||
| 308 | kfree(mc); | ||
| 309 | } | ||
| 310 | |||
| 293 | static void cma_detach_from_dev(struct rdma_id_private *id_priv) | 311 | static void cma_detach_from_dev(struct rdma_id_private *id_priv) |
| 294 | { | 312 | { |
| 295 | list_del(&id_priv->list); | 313 | list_del(&id_priv->list); |
| @@ -323,22 +341,63 @@ static int cma_set_qkey(struct rdma_id_private *id_priv) | |||
| 323 | return ret; | 341 | return ret; |
| 324 | } | 342 | } |
| 325 | 343 | ||
| 344 | static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_num) | ||
| 345 | { | ||
| 346 | int i; | ||
| 347 | int err; | ||
| 348 | struct ib_port_attr props; | ||
| 349 | union ib_gid tmp; | ||
| 350 | |||
| 351 | err = ib_query_port(device, port_num, &props); | ||
| 352 | if (err) | ||
| 353 | return 1; | ||
| 354 | |||
| 355 | for (i = 0; i < props.gid_tbl_len; ++i) { | ||
| 356 | err = ib_query_gid(device, port_num, i, &tmp); | ||
| 357 | if (err) | ||
| 358 | return 1; | ||
| 359 | if (!memcmp(&tmp, gid, sizeof tmp)) | ||
| 360 | return 0; | ||
| 361 | } | ||
| 362 | |||
| 363 | return -EAGAIN; | ||
| 364 | } | ||
| 365 | |||
| 326 | static int cma_acquire_dev(struct rdma_id_private *id_priv) | 366 | static int cma_acquire_dev(struct rdma_id_private *id_priv) |
| 327 | { | 367 | { |
| 328 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | 368 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
| 329 | struct cma_device *cma_dev; | 369 | struct cma_device *cma_dev; |
| 330 | union ib_gid gid; | 370 | union ib_gid gid, iboe_gid; |
| 331 | int ret = -ENODEV; | 371 | int ret = -ENODEV; |
| 372 | u8 port; | ||
| 373 | enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? | ||
| 374 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; | ||
| 332 | 375 | ||
| 333 | rdma_addr_get_sgid(dev_addr, &gid); | 376 | iboe_addr_get_sgid(dev_addr, &iboe_gid); |
| 377 | memcpy(&gid, dev_addr->src_dev_addr + | ||
| 378 | rdma_addr_gid_offset(dev_addr), sizeof gid); | ||
| 334 | list_for_each_entry(cma_dev, &dev_list, list) { | 379 | list_for_each_entry(cma_dev, &dev_list, list) { |
| 335 | ret = ib_find_cached_gid(cma_dev->device, &gid, | 380 | for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { |
| 336 | &id_priv->id.port_num, NULL); | 381 | if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { |
| 337 | if (!ret) { | 382 | if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && |
| 338 | cma_attach_to_dev(id_priv, cma_dev); | 383 | rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) |
| 339 | break; | 384 | ret = find_gid_port(cma_dev->device, &iboe_gid, port); |
| 385 | else | ||
| 386 | ret = find_gid_port(cma_dev->device, &gid, port); | ||
| 387 | |||
| 388 | if (!ret) { | ||
| 389 | id_priv->id.port_num = port; | ||
| 390 | goto out; | ||
| 391 | } else if (ret == 1) | ||
| 392 | break; | ||
| 393 | } | ||
| 340 | } | 394 | } |
| 341 | } | 395 | } |
| 396 | |||
| 397 | out: | ||
| 398 | if (!ret) | ||
| 399 | cma_attach_to_dev(id_priv, cma_dev); | ||
| 400 | |||
| 342 | return ret; | 401 | return ret; |
| 343 | } | 402 | } |
| 344 | 403 | ||
| @@ -556,10 +615,16 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, | |||
| 556 | { | 615 | { |
| 557 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | 616 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; |
| 558 | int ret; | 617 | int ret; |
| 618 | u16 pkey; | ||
| 619 | |||
| 620 | if (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) == | ||
| 621 | IB_LINK_LAYER_INFINIBAND) | ||
| 622 | pkey = ib_addr_get_pkey(dev_addr); | ||
| 623 | else | ||
| 624 | pkey = 0xffff; | ||
| 559 | 625 | ||
| 560 | ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, | 626 | ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, |
| 561 | ib_addr_get_pkey(dev_addr), | 627 | pkey, &qp_attr->pkey_index); |
| 562 | &qp_attr->pkey_index); | ||
| 563 | if (ret) | 628 | if (ret) |
| 564 | return ret; | 629 | return ret; |
| 565 | 630 | ||
| @@ -737,8 +802,8 @@ static inline int cma_user_data_offset(enum rdma_port_space ps) | |||
| 737 | 802 | ||
| 738 | static void cma_cancel_route(struct rdma_id_private *id_priv) | 803 | static void cma_cancel_route(struct rdma_id_private *id_priv) |
| 739 | { | 804 | { |
| 740 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | 805 | switch (rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num)) { |
| 741 | case RDMA_TRANSPORT_IB: | 806 | case IB_LINK_LAYER_INFINIBAND: |
| 742 | if (id_priv->query) | 807 | if (id_priv->query) |
| 743 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); | 808 | ib_sa_cancel_query(id_priv->query_id, id_priv->query); |
| 744 | break; | 809 | break; |
| @@ -816,8 +881,17 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) | |||
| 816 | mc = container_of(id_priv->mc_list.next, | 881 | mc = container_of(id_priv->mc_list.next, |
| 817 | struct cma_multicast, list); | 882 | struct cma_multicast, list); |
| 818 | list_del(&mc->list); | 883 | list_del(&mc->list); |
| 819 | ib_sa_free_multicast(mc->multicast.ib); | 884 | switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { |
| 820 | kfree(mc); | 885 | case IB_LINK_LAYER_INFINIBAND: |
| 886 | ib_sa_free_multicast(mc->multicast.ib); | ||
| 887 | kfree(mc); | ||
| 888 | break; | ||
| 889 | case IB_LINK_LAYER_ETHERNET: | ||
| 890 | kref_put(&mc->mcref, release_mc); | ||
| 891 | break; | ||
| 892 | default: | ||
| 893 | break; | ||
| 894 | } | ||
| 821 | } | 895 | } |
| 822 | } | 896 | } |
| 823 | 897 | ||
| @@ -833,7 +907,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
| 833 | mutex_lock(&lock); | 907 | mutex_lock(&lock); |
| 834 | if (id_priv->cma_dev) { | 908 | if (id_priv->cma_dev) { |
| 835 | mutex_unlock(&lock); | 909 | mutex_unlock(&lock); |
| 836 | switch (rdma_node_get_transport(id->device->node_type)) { | 910 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
| 837 | case RDMA_TRANSPORT_IB: | 911 | case RDMA_TRANSPORT_IB: |
| 838 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | 912 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) |
| 839 | ib_destroy_cm_id(id_priv->cm_id.ib); | 913 | ib_destroy_cm_id(id_priv->cm_id.ib); |
| @@ -1708,6 +1782,81 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
| 1708 | return 0; | 1782 | return 0; |
| 1709 | } | 1783 | } |
| 1710 | 1784 | ||
| 1785 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | ||
| 1786 | { | ||
| 1787 | struct rdma_route *route = &id_priv->id.route; | ||
| 1788 | struct rdma_addr *addr = &route->addr; | ||
| 1789 | struct cma_work *work; | ||
| 1790 | int ret; | ||
| 1791 | struct sockaddr_in *src_addr = (struct sockaddr_in *)&route->addr.src_addr; | ||
| 1792 | struct sockaddr_in *dst_addr = (struct sockaddr_in *)&route->addr.dst_addr; | ||
| 1793 | struct net_device *ndev = NULL; | ||
| 1794 | u16 vid; | ||
| 1795 | |||
| 1796 | if (src_addr->sin_family != dst_addr->sin_family) | ||
| 1797 | return -EINVAL; | ||
| 1798 | |||
| 1799 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
| 1800 | if (!work) | ||
| 1801 | return -ENOMEM; | ||
| 1802 | |||
| 1803 | work->id = id_priv; | ||
| 1804 | INIT_WORK(&work->work, cma_work_handler); | ||
| 1805 | |||
| 1806 | route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); | ||
| 1807 | if (!route->path_rec) { | ||
| 1808 | ret = -ENOMEM; | ||
| 1809 | goto err1; | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | route->num_paths = 1; | ||
| 1813 | |||
| 1814 | if (addr->dev_addr.bound_dev_if) | ||
| 1815 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | ||
| 1816 | if (!ndev) { | ||
| 1817 | ret = -ENODEV; | ||
| 1818 | goto err2; | ||
| 1819 | } | ||
| 1820 | |||
| 1821 | vid = rdma_vlan_dev_vlan_id(ndev); | ||
| 1822 | |||
| 1823 | iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); | ||
| 1824 | iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); | ||
| 1825 | |||
| 1826 | route->path_rec->hop_limit = 1; | ||
| 1827 | route->path_rec->reversible = 1; | ||
| 1828 | route->path_rec->pkey = cpu_to_be16(0xffff); | ||
| 1829 | route->path_rec->mtu_selector = IB_SA_EQ; | ||
| 1830 | route->path_rec->sl = id_priv->tos >> 5; | ||
| 1831 | |||
| 1832 | route->path_rec->mtu = iboe_get_mtu(ndev->mtu); | ||
| 1833 | route->path_rec->rate_selector = IB_SA_EQ; | ||
| 1834 | route->path_rec->rate = iboe_get_rate(ndev); | ||
| 1835 | dev_put(ndev); | ||
| 1836 | route->path_rec->packet_life_time_selector = IB_SA_EQ; | ||
| 1837 | route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; | ||
| 1838 | if (!route->path_rec->mtu) { | ||
| 1839 | ret = -EINVAL; | ||
| 1840 | goto err2; | ||
| 1841 | } | ||
| 1842 | |||
| 1843 | work->old_state = CMA_ROUTE_QUERY; | ||
| 1844 | work->new_state = CMA_ROUTE_RESOLVED; | ||
| 1845 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | ||
| 1846 | work->event.status = 0; | ||
| 1847 | |||
| 1848 | queue_work(cma_wq, &work->work); | ||
| 1849 | |||
| 1850 | return 0; | ||
| 1851 | |||
| 1852 | err2: | ||
| 1853 | kfree(route->path_rec); | ||
| 1854 | route->path_rec = NULL; | ||
| 1855 | err1: | ||
| 1856 | kfree(work); | ||
| 1857 | return ret; | ||
| 1858 | } | ||
| 1859 | |||
| 1711 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | 1860 | int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) |
| 1712 | { | 1861 | { |
| 1713 | struct rdma_id_private *id_priv; | 1862 | struct rdma_id_private *id_priv; |
| @@ -1720,7 +1869,16 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
| 1720 | atomic_inc(&id_priv->refcount); | 1869 | atomic_inc(&id_priv->refcount); |
| 1721 | switch (rdma_node_get_transport(id->device->node_type)) { | 1870 | switch (rdma_node_get_transport(id->device->node_type)) { |
| 1722 | case RDMA_TRANSPORT_IB: | 1871 | case RDMA_TRANSPORT_IB: |
| 1723 | ret = cma_resolve_ib_route(id_priv, timeout_ms); | 1872 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
| 1873 | case IB_LINK_LAYER_INFINIBAND: | ||
| 1874 | ret = cma_resolve_ib_route(id_priv, timeout_ms); | ||
| 1875 | break; | ||
| 1876 | case IB_LINK_LAYER_ETHERNET: | ||
| 1877 | ret = cma_resolve_iboe_route(id_priv); | ||
| 1878 | break; | ||
| 1879 | default: | ||
| 1880 | ret = -ENOSYS; | ||
| 1881 | } | ||
| 1724 | break; | 1882 | break; |
| 1725 | case RDMA_TRANSPORT_IWARP: | 1883 | case RDMA_TRANSPORT_IWARP: |
| 1726 | ret = cma_resolve_iw_route(id_priv, timeout_ms); | 1884 | ret = cma_resolve_iw_route(id_priv, timeout_ms); |
| @@ -1773,7 +1931,7 @@ port_found: | |||
| 1773 | goto out; | 1931 | goto out; |
| 1774 | 1932 | ||
| 1775 | id_priv->id.route.addr.dev_addr.dev_type = | 1933 | id_priv->id.route.addr.dev_addr.dev_type = |
| 1776 | (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB) ? | 1934 | (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? |
| 1777 | ARPHRD_INFINIBAND : ARPHRD_ETHER; | 1935 | ARPHRD_INFINIBAND : ARPHRD_ETHER; |
| 1778 | 1936 | ||
| 1779 | rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); | 1937 | rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
| @@ -2758,6 +2916,102 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
| 2758 | return 0; | 2916 | return 0; |
| 2759 | } | 2917 | } |
| 2760 | 2918 | ||
| 2919 | static void iboe_mcast_work_handler(struct work_struct *work) | ||
| 2920 | { | ||
| 2921 | struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work); | ||
| 2922 | struct cma_multicast *mc = mw->mc; | ||
| 2923 | struct ib_sa_multicast *m = mc->multicast.ib; | ||
| 2924 | |||
| 2925 | mc->multicast.ib->context = mc; | ||
| 2926 | cma_ib_mc_handler(0, m); | ||
| 2927 | kref_put(&mc->mcref, release_mc); | ||
| 2928 | kfree(mw); | ||
| 2929 | } | ||
| 2930 | |||
| 2931 | static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid) | ||
| 2932 | { | ||
| 2933 | struct sockaddr_in *sin = (struct sockaddr_in *)addr; | ||
| 2934 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; | ||
| 2935 | |||
| 2936 | if (cma_any_addr(addr)) { | ||
| 2937 | memset(mgid, 0, sizeof *mgid); | ||
| 2938 | } else if (addr->sa_family == AF_INET6) { | ||
| 2939 | memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); | ||
| 2940 | } else { | ||
| 2941 | mgid->raw[0] = 0xff; | ||
| 2942 | mgid->raw[1] = 0x0e; | ||
| 2943 | mgid->raw[2] = 0; | ||
| 2944 | mgid->raw[3] = 0; | ||
| 2945 | mgid->raw[4] = 0; | ||
| 2946 | mgid->raw[5] = 0; | ||
| 2947 | mgid->raw[6] = 0; | ||
| 2948 | mgid->raw[7] = 0; | ||
| 2949 | mgid->raw[8] = 0; | ||
| 2950 | mgid->raw[9] = 0; | ||
| 2951 | mgid->raw[10] = 0xff; | ||
| 2952 | mgid->raw[11] = 0xff; | ||
| 2953 | *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; | ||
| 2954 | } | ||
| 2955 | } | ||
| 2956 | |||
| 2957 | static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | ||
| 2958 | struct cma_multicast *mc) | ||
| 2959 | { | ||
| 2960 | struct iboe_mcast_work *work; | ||
| 2961 | struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; | ||
| 2962 | int err; | ||
| 2963 | struct sockaddr *addr = (struct sockaddr *)&mc->addr; | ||
| 2964 | struct net_device *ndev = NULL; | ||
| 2965 | |||
| 2966 | if (cma_zero_addr((struct sockaddr *)&mc->addr)) | ||
| 2967 | return -EINVAL; | ||
| 2968 | |||
| 2969 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
| 2970 | if (!work) | ||
| 2971 | return -ENOMEM; | ||
| 2972 | |||
| 2973 | mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL); | ||
| 2974 | if (!mc->multicast.ib) { | ||
| 2975 | err = -ENOMEM; | ||
| 2976 | goto out1; | ||
| 2977 | } | ||
| 2978 | |||
| 2979 | cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid); | ||
| 2980 | |||
| 2981 | mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff); | ||
| 2982 | if (id_priv->id.ps == RDMA_PS_UDP) | ||
| 2983 | mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); | ||
| 2984 | |||
| 2985 | if (dev_addr->bound_dev_if) | ||
| 2986 | ndev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); | ||
| 2987 | if (!ndev) { | ||
| 2988 | err = -ENODEV; | ||
| 2989 | goto out2; | ||
| 2990 | } | ||
| 2991 | mc->multicast.ib->rec.rate = iboe_get_rate(ndev); | ||
| 2992 | mc->multicast.ib->rec.hop_limit = 1; | ||
| 2993 | mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->mtu); | ||
| 2994 | dev_put(ndev); | ||
| 2995 | if (!mc->multicast.ib->rec.mtu) { | ||
| 2996 | err = -EINVAL; | ||
| 2997 | goto out2; | ||
| 2998 | } | ||
| 2999 | iboe_addr_get_sgid(dev_addr, &mc->multicast.ib->rec.port_gid); | ||
| 3000 | work->id = id_priv; | ||
| 3001 | work->mc = mc; | ||
| 3002 | INIT_WORK(&work->work, iboe_mcast_work_handler); | ||
| 3003 | kref_get(&mc->mcref); | ||
| 3004 | queue_work(cma_wq, &work->work); | ||
| 3005 | |||
| 3006 | return 0; | ||
| 3007 | |||
| 3008 | out2: | ||
| 3009 | kfree(mc->multicast.ib); | ||
| 3010 | out1: | ||
| 3011 | kfree(work); | ||
| 3012 | return err; | ||
| 3013 | } | ||
| 3014 | |||
| 2761 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | 3015 | int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, |
| 2762 | void *context) | 3016 | void *context) |
| 2763 | { | 3017 | { |
| @@ -2784,7 +3038,17 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
| 2784 | 3038 | ||
| 2785 | switch (rdma_node_get_transport(id->device->node_type)) { | 3039 | switch (rdma_node_get_transport(id->device->node_type)) { |
| 2786 | case RDMA_TRANSPORT_IB: | 3040 | case RDMA_TRANSPORT_IB: |
| 2787 | ret = cma_join_ib_multicast(id_priv, mc); | 3041 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
| 3042 | case IB_LINK_LAYER_INFINIBAND: | ||
| 3043 | ret = cma_join_ib_multicast(id_priv, mc); | ||
| 3044 | break; | ||
| 3045 | case IB_LINK_LAYER_ETHERNET: | ||
| 3046 | kref_init(&mc->mcref); | ||
| 3047 | ret = cma_iboe_join_multicast(id_priv, mc); | ||
| 3048 | break; | ||
| 3049 | default: | ||
| 3050 | ret = -EINVAL; | ||
| 3051 | } | ||
| 2788 | break; | 3052 | break; |
| 2789 | default: | 3053 | default: |
| 2790 | ret = -ENOSYS; | 3054 | ret = -ENOSYS; |
| @@ -2817,8 +3081,19 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) | |||
| 2817 | ib_detach_mcast(id->qp, | 3081 | ib_detach_mcast(id->qp, |
| 2818 | &mc->multicast.ib->rec.mgid, | 3082 | &mc->multicast.ib->rec.mgid, |
| 2819 | mc->multicast.ib->rec.mlid); | 3083 | mc->multicast.ib->rec.mlid); |
| 2820 | ib_sa_free_multicast(mc->multicast.ib); | 3084 | if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { |
| 2821 | kfree(mc); | 3085 | switch (rdma_port_get_link_layer(id->device, id->port_num)) { |
| 3086 | case IB_LINK_LAYER_INFINIBAND: | ||
| 3087 | ib_sa_free_multicast(mc->multicast.ib); | ||
| 3088 | kfree(mc); | ||
| 3089 | break; | ||
| 3090 | case IB_LINK_LAYER_ETHERNET: | ||
| 3091 | kref_put(&mc->mcref, release_mc); | ||
| 3092 | break; | ||
| 3093 | default: | ||
| 3094 | break; | ||
| 3095 | } | ||
| 3096 | } | ||
| 2822 | return; | 3097 | return; |
| 2823 | } | 3098 | } |
| 2824 | } | 3099 | } |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index bfead5bc25f6..2a1e9ae134b4 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
| @@ -506,6 +506,8 @@ int iw_cm_accept(struct iw_cm_id *cm_id, | |||
| 506 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | 506 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); |
| 507 | if (!qp) { | 507 | if (!qp) { |
| 508 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 508 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
| 509 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
| 510 | wake_up_all(&cm_id_priv->connect_wait); | ||
| 509 | return -EINVAL; | 511 | return -EINVAL; |
| 510 | } | 512 | } |
| 511 | cm_id->device->iwcm->add_ref(qp); | 513 | cm_id->device->iwcm->add_ref(qp); |
| @@ -565,6 +567,8 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) | |||
| 565 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); | 567 | qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); |
| 566 | if (!qp) { | 568 | if (!qp) { |
| 567 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 569 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
| 570 | clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); | ||
| 571 | wake_up_all(&cm_id_priv->connect_wait); | ||
| 568 | return -EINVAL; | 572 | return -EINVAL; |
| 569 | } | 573 | } |
| 570 | cm_id->device->iwcm->add_ref(qp); | 574 | cm_id->device->iwcm->add_ref(qp); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index ef1304f151dc..822cfdcd9f78 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info) | |||
| 2598 | struct ib_mad_private *recv; | 2598 | struct ib_mad_private *recv; |
| 2599 | struct ib_mad_list_head *mad_list; | 2599 | struct ib_mad_list_head *mad_list; |
| 2600 | 2600 | ||
| 2601 | if (!qp_info->qp) | ||
| 2602 | return; | ||
| 2603 | |||
| 2601 | while (!list_empty(&qp_info->recv_queue.list)) { | 2604 | while (!list_empty(&qp_info->recv_queue.list)) { |
| 2602 | 2605 | ||
| 2603 | mad_list = list_entry(qp_info->recv_queue.list.next, | 2606 | mad_list = list_entry(qp_info->recv_queue.list.next, |
| @@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2639 | 2642 | ||
| 2640 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2643 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
| 2641 | qp = port_priv->qp_info[i].qp; | 2644 | qp = port_priv->qp_info[i].qp; |
| 2645 | if (!qp) | ||
| 2646 | continue; | ||
| 2647 | |||
| 2642 | /* | 2648 | /* |
| 2643 | * PKey index for QP1 is irrelevant but | 2649 | * PKey index for QP1 is irrelevant but |
| 2644 | * one is needed for the Reset to Init transition | 2650 | * one is needed for the Reset to Init transition |
| @@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv) | |||
| 2680 | } | 2686 | } |
| 2681 | 2687 | ||
| 2682 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { | 2688 | for (i = 0; i < IB_MAD_QPS_CORE; i++) { |
| 2689 | if (!port_priv->qp_info[i].qp) | ||
| 2690 | continue; | ||
| 2691 | |||
| 2683 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); | 2692 | ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL); |
| 2684 | if (ret) { | 2693 | if (ret) { |
| 2685 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); | 2694 | printk(KERN_ERR PFX "Couldn't post receive WRs\n"); |
| @@ -2758,6 +2767,9 @@ error: | |||
| 2758 | 2767 | ||
| 2759 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) | 2768 | static void destroy_mad_qp(struct ib_mad_qp_info *qp_info) |
| 2760 | { | 2769 | { |
| 2770 | if (!qp_info->qp) | ||
| 2771 | return; | ||
| 2772 | |||
| 2761 | ib_destroy_qp(qp_info->qp); | 2773 | ib_destroy_qp(qp_info->qp); |
| 2762 | kfree(qp_info->snoop_table); | 2774 | kfree(qp_info->snoop_table); |
| 2763 | } | 2775 | } |
| @@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2773 | struct ib_mad_port_private *port_priv; | 2785 | struct ib_mad_port_private *port_priv; |
| 2774 | unsigned long flags; | 2786 | unsigned long flags; |
| 2775 | char name[sizeof "ib_mad123"]; | 2787 | char name[sizeof "ib_mad123"]; |
| 2788 | int has_smi; | ||
| 2776 | 2789 | ||
| 2777 | /* Create new device info */ | 2790 | /* Create new device info */ |
| 2778 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); | 2791 | port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); |
| @@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2788 | init_mad_qp(port_priv, &port_priv->qp_info[0]); | 2801 | init_mad_qp(port_priv, &port_priv->qp_info[0]); |
| 2789 | init_mad_qp(port_priv, &port_priv->qp_info[1]); | 2802 | init_mad_qp(port_priv, &port_priv->qp_info[1]); |
| 2790 | 2803 | ||
| 2791 | cq_size = (mad_sendq_size + mad_recvq_size) * 2; | 2804 | cq_size = mad_sendq_size + mad_recvq_size; |
| 2805 | has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND; | ||
| 2806 | if (has_smi) | ||
| 2807 | cq_size *= 2; | ||
| 2808 | |||
| 2792 | port_priv->cq = ib_create_cq(port_priv->device, | 2809 | port_priv->cq = ib_create_cq(port_priv->device, |
| 2793 | ib_mad_thread_completion_handler, | 2810 | ib_mad_thread_completion_handler, |
| 2794 | NULL, port_priv, cq_size, 0); | 2811 | NULL, port_priv, cq_size, 0); |
| @@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device, | |||
| 2812 | goto error5; | 2829 | goto error5; |
| 2813 | } | 2830 | } |
| 2814 | 2831 | ||
| 2815 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); | 2832 | if (has_smi) { |
| 2816 | if (ret) | 2833 | ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI); |
| 2817 | goto error6; | 2834 | if (ret) |
| 2835 | goto error6; | ||
| 2836 | } | ||
| 2818 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); | 2837 | ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI); |
| 2819 | if (ret) | 2838 | if (ret) |
| 2820 | goto error7; | 2839 | goto error7; |
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index a519801dcfb7..68b4162fd9d2 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c | |||
| @@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler, | |||
| 774 | int index; | 774 | int index; |
| 775 | 775 | ||
| 776 | dev = container_of(handler, struct mcast_device, event_handler); | 776 | dev = container_of(handler, struct mcast_device, event_handler); |
| 777 | if (rdma_port_get_link_layer(dev->device, event->element.port_num) != | ||
| 778 | IB_LINK_LAYER_INFINIBAND) | ||
| 779 | return; | ||
| 780 | |||
| 777 | index = event->element.port_num - dev->start_port; | 781 | index = event->element.port_num - dev->start_port; |
| 778 | 782 | ||
| 779 | switch (event->event) { | 783 | switch (event->event) { |
| @@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device) | |||
| 796 | struct mcast_device *dev; | 800 | struct mcast_device *dev; |
| 797 | struct mcast_port *port; | 801 | struct mcast_port *port; |
| 798 | int i; | 802 | int i; |
| 803 | int count = 0; | ||
| 799 | 804 | ||
| 800 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) | 805 | if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB) |
| 801 | return; | 806 | return; |
| @@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device) | |||
| 813 | } | 818 | } |
| 814 | 819 | ||
| 815 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | 820 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { |
| 821 | if (rdma_port_get_link_layer(device, dev->start_port + i) != | ||
| 822 | IB_LINK_LAYER_INFINIBAND) | ||
| 823 | continue; | ||
| 816 | port = &dev->port[i]; | 824 | port = &dev->port[i]; |
| 817 | port->dev = dev; | 825 | port->dev = dev; |
| 818 | port->port_num = dev->start_port + i; | 826 | port->port_num = dev->start_port + i; |
| @@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device) | |||
| 820 | port->table = RB_ROOT; | 828 | port->table = RB_ROOT; |
| 821 | init_completion(&port->comp); | 829 | init_completion(&port->comp); |
| 822 | atomic_set(&port->refcount, 1); | 830 | atomic_set(&port->refcount, 1); |
| 831 | ++count; | ||
| 832 | } | ||
| 833 | |||
| 834 | if (!count) { | ||
| 835 | kfree(dev); | ||
| 836 | return; | ||
| 823 | } | 837 | } |
| 824 | 838 | ||
| 825 | dev->device = device; | 839 | dev->device = device; |
| @@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device) | |||
| 843 | flush_workqueue(mcast_wq); | 857 | flush_workqueue(mcast_wq); |
| 844 | 858 | ||
| 845 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { | 859 | for (i = 0; i <= dev->end_port - dev->start_port; i++) { |
| 846 | port = &dev->port[i]; | 860 | if (rdma_port_get_link_layer(device, dev->start_port + i) == |
| 847 | deref_port(port); | 861 | IB_LINK_LAYER_INFINIBAND) { |
| 848 | wait_for_completion(&port->comp); | 862 | port = &dev->port[i]; |
| 863 | deref_port(port); | ||
| 864 | wait_for_completion(&port->comp); | ||
| 865 | } | ||
| 849 | } | 866 | } |
| 850 | 867 | ||
| 851 | kfree(dev); | 868 | kfree(dev); |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 7e1ffd8ccd5c..91a660310b7c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
| @@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event | |||
| 416 | struct ib_sa_port *port = | 416 | struct ib_sa_port *port = |
| 417 | &sa_dev->port[event->element.port_num - sa_dev->start_port]; | 417 | &sa_dev->port[event->element.port_num - sa_dev->start_port]; |
| 418 | 418 | ||
| 419 | if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND) | ||
| 420 | return; | ||
| 421 | |||
| 419 | spin_lock_irqsave(&port->ah_lock, flags); | 422 | spin_lock_irqsave(&port->ah_lock, flags); |
| 420 | if (port->sm_ah) | 423 | if (port->sm_ah) |
| 421 | kref_put(&port->sm_ah->ref, free_sm_ah); | 424 | kref_put(&port->sm_ah->ref, free_sm_ah); |
| @@ -493,6 +496,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
| 493 | { | 496 | { |
| 494 | int ret; | 497 | int ret; |
| 495 | u16 gid_index; | 498 | u16 gid_index; |
| 499 | int force_grh; | ||
| 496 | 500 | ||
| 497 | memset(ah_attr, 0, sizeof *ah_attr); | 501 | memset(ah_attr, 0, sizeof *ah_attr); |
| 498 | ah_attr->dlid = be16_to_cpu(rec->dlid); | 502 | ah_attr->dlid = be16_to_cpu(rec->dlid); |
| @@ -502,7 +506,9 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, | |||
| 502 | ah_attr->port_num = port_num; | 506 | ah_attr->port_num = port_num; |
| 503 | ah_attr->static_rate = rec->rate; | 507 | ah_attr->static_rate = rec->rate; |
| 504 | 508 | ||
| 505 | if (rec->hop_limit > 1) { | 509 | force_grh = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_ETHERNET; |
| 510 | |||
| 511 | if (rec->hop_limit > 1 || force_grh) { | ||
| 506 | ah_attr->ah_flags = IB_AH_GRH; | 512 | ah_attr->ah_flags = IB_AH_GRH; |
| 507 | ah_attr->grh.dgid = rec->dgid; | 513 | ah_attr->grh.dgid = rec->dgid; |
| 508 | 514 | ||
| @@ -1007,7 +1013,7 @@ static void ib_sa_add_one(struct ib_device *device) | |||
| 1007 | e = device->phys_port_cnt; | 1013 | e = device->phys_port_cnt; |
| 1008 | } | 1014 | } |
| 1009 | 1015 | ||
| 1010 | sa_dev = kmalloc(sizeof *sa_dev + | 1016 | sa_dev = kzalloc(sizeof *sa_dev + |
| 1011 | (e - s + 1) * sizeof (struct ib_sa_port), | 1017 | (e - s + 1) * sizeof (struct ib_sa_port), |
| 1012 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
| 1013 | if (!sa_dev) | 1019 | if (!sa_dev) |
| @@ -1017,9 +1023,12 @@ static void ib_sa_add_one(struct ib_device *device) | |||
| 1017 | sa_dev->end_port = e; | 1023 | sa_dev->end_port = e; |
| 1018 | 1024 | ||
| 1019 | for (i = 0; i <= e - s; ++i) { | 1025 | for (i = 0; i <= e - s; ++i) { |
| 1026 | spin_lock_init(&sa_dev->port[i].ah_lock); | ||
| 1027 | if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND) | ||
| 1028 | continue; | ||
| 1029 | |||
| 1020 | sa_dev->port[i].sm_ah = NULL; | 1030 | sa_dev->port[i].sm_ah = NULL; |
| 1021 | sa_dev->port[i].port_num = i + s; | 1031 | sa_dev->port[i].port_num = i + s; |
| 1022 | spin_lock_init(&sa_dev->port[i].ah_lock); | ||
| 1023 | 1032 | ||
| 1024 | sa_dev->port[i].agent = | 1033 | sa_dev->port[i].agent = |
| 1025 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, | 1034 | ib_register_mad_agent(device, i + s, IB_QPT_GSI, |
| @@ -1045,13 +1054,15 @@ static void ib_sa_add_one(struct ib_device *device) | |||
| 1045 | goto err; | 1054 | goto err; |
| 1046 | 1055 | ||
| 1047 | for (i = 0; i <= e - s; ++i) | 1056 | for (i = 0; i <= e - s; ++i) |
| 1048 | update_sm_ah(&sa_dev->port[i].update_task); | 1057 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) |
| 1058 | update_sm_ah(&sa_dev->port[i].update_task); | ||
| 1049 | 1059 | ||
| 1050 | return; | 1060 | return; |
| 1051 | 1061 | ||
| 1052 | err: | 1062 | err: |
| 1053 | while (--i >= 0) | 1063 | while (--i >= 0) |
| 1054 | ib_unregister_mad_agent(sa_dev->port[i].agent); | 1064 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) |
| 1065 | ib_unregister_mad_agent(sa_dev->port[i].agent); | ||
| 1055 | 1066 | ||
| 1056 | kfree(sa_dev); | 1067 | kfree(sa_dev); |
| 1057 | 1068 | ||
| @@ -1071,9 +1082,12 @@ static void ib_sa_remove_one(struct ib_device *device) | |||
| 1071 | flush_scheduled_work(); | 1082 | flush_scheduled_work(); |
| 1072 | 1083 | ||
| 1073 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { | 1084 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { |
| 1074 | ib_unregister_mad_agent(sa_dev->port[i].agent); | 1085 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { |
| 1075 | if (sa_dev->port[i].sm_ah) | 1086 | ib_unregister_mad_agent(sa_dev->port[i].agent); |
| 1076 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | 1087 | if (sa_dev->port[i].sm_ah) |
| 1088 | kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); | ||
| 1089 | } | ||
| 1090 | |||
| 1077 | } | 1091 | } |
| 1078 | 1092 | ||
| 1079 | kfree(sa_dev); | 1093 | kfree(sa_dev); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 3627300e2a10..9ab5df72df7b 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -222,6 +222,19 @@ static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, | |||
| 222 | } | 222 | } |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, | ||
| 226 | char *buf) | ||
| 227 | { | ||
| 228 | switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) { | ||
| 229 | case IB_LINK_LAYER_INFINIBAND: | ||
| 230 | return sprintf(buf, "%s\n", "InfiniBand"); | ||
| 231 | case IB_LINK_LAYER_ETHERNET: | ||
| 232 | return sprintf(buf, "%s\n", "Ethernet"); | ||
| 233 | default: | ||
| 234 | return sprintf(buf, "%s\n", "Unknown"); | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 225 | static PORT_ATTR_RO(state); | 238 | static PORT_ATTR_RO(state); |
| 226 | static PORT_ATTR_RO(lid); | 239 | static PORT_ATTR_RO(lid); |
| 227 | static PORT_ATTR_RO(lid_mask_count); | 240 | static PORT_ATTR_RO(lid_mask_count); |
| @@ -230,6 +243,7 @@ static PORT_ATTR_RO(sm_sl); | |||
| 230 | static PORT_ATTR_RO(cap_mask); | 243 | static PORT_ATTR_RO(cap_mask); |
| 231 | static PORT_ATTR_RO(rate); | 244 | static PORT_ATTR_RO(rate); |
| 232 | static PORT_ATTR_RO(phys_state); | 245 | static PORT_ATTR_RO(phys_state); |
| 246 | static PORT_ATTR_RO(link_layer); | ||
| 233 | 247 | ||
| 234 | static struct attribute *port_default_attrs[] = { | 248 | static struct attribute *port_default_attrs[] = { |
| 235 | &port_attr_state.attr, | 249 | &port_attr_state.attr, |
| @@ -240,6 +254,7 @@ static struct attribute *port_default_attrs[] = { | |||
| 240 | &port_attr_cap_mask.attr, | 254 | &port_attr_cap_mask.attr, |
| 241 | &port_attr_rate.attr, | 255 | &port_attr_rate.attr, |
| 242 | &port_attr_phys_state.attr, | 256 | &port_attr_phys_state.attr, |
| 257 | &port_attr_link_layer.attr, | ||
| 243 | NULL | 258 | NULL |
| 244 | }; | 259 | }; |
| 245 | 260 | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ac7edc24165c..ca12acf38379 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/in6.h> | 40 | #include <linux/in6.h> |
| 41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
| 42 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
| 43 | #include <linux/sysctl.h> | ||
| 43 | 44 | ||
| 44 | #include <rdma/rdma_user_cm.h> | 45 | #include <rdma/rdma_user_cm.h> |
| 45 | #include <rdma/ib_marshall.h> | 46 | #include <rdma/ib_marshall.h> |
| @@ -50,8 +51,24 @@ MODULE_AUTHOR("Sean Hefty"); | |||
| 50 | MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); | 51 | MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); |
| 51 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
| 52 | 53 | ||
| 53 | enum { | 54 | static unsigned int max_backlog = 1024; |
| 54 | UCMA_MAX_BACKLOG = 128 | 55 | |
| 56 | static struct ctl_table_header *ucma_ctl_table_hdr; | ||
| 57 | static ctl_table ucma_ctl_table[] = { | ||
| 58 | { | ||
| 59 | .procname = "max_backlog", | ||
| 60 | .data = &max_backlog, | ||
| 61 | .maxlen = sizeof max_backlog, | ||
| 62 | .mode = 0644, | ||
| 63 | .proc_handler = proc_dointvec, | ||
| 64 | }, | ||
| 65 | { } | ||
| 66 | }; | ||
| 67 | |||
| 68 | static struct ctl_path ucma_ctl_path[] = { | ||
| 69 | { .procname = "net" }, | ||
| 70 | { .procname = "rdma_ucm" }, | ||
| 71 | { } | ||
| 55 | }; | 72 | }; |
| 56 | 73 | ||
| 57 | struct ucma_file { | 74 | struct ucma_file { |
| @@ -583,6 +600,42 @@ static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, | |||
| 583 | } | 600 | } |
| 584 | } | 601 | } |
| 585 | 602 | ||
| 603 | static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, | ||
| 604 | struct rdma_route *route) | ||
| 605 | { | ||
| 606 | struct rdma_dev_addr *dev_addr; | ||
| 607 | struct net_device *dev; | ||
| 608 | u16 vid = 0; | ||
| 609 | |||
| 610 | resp->num_paths = route->num_paths; | ||
| 611 | switch (route->num_paths) { | ||
| 612 | case 0: | ||
| 613 | dev_addr = &route->addr.dev_addr; | ||
| 614 | dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); | ||
| 615 | if (dev) { | ||
| 616 | vid = rdma_vlan_dev_vlan_id(dev); | ||
| 617 | dev_put(dev); | ||
| 618 | } | ||
| 619 | |||
| 620 | iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid, | ||
| 621 | dev_addr->dst_dev_addr, vid); | ||
| 622 | iboe_addr_get_sgid(dev_addr, | ||
| 623 | (union ib_gid *) &resp->ib_route[0].sgid); | ||
| 624 | resp->ib_route[0].pkey = cpu_to_be16(0xffff); | ||
| 625 | break; | ||
| 626 | case 2: | ||
| 627 | ib_copy_path_rec_to_user(&resp->ib_route[1], | ||
| 628 | &route->path_rec[1]); | ||
| 629 | /* fall through */ | ||
| 630 | case 1: | ||
| 631 | ib_copy_path_rec_to_user(&resp->ib_route[0], | ||
| 632 | &route->path_rec[0]); | ||
| 633 | break; | ||
| 634 | default: | ||
| 635 | break; | ||
| 636 | } | ||
| 637 | } | ||
| 638 | |||
| 586 | static ssize_t ucma_query_route(struct ucma_file *file, | 639 | static ssize_t ucma_query_route(struct ucma_file *file, |
| 587 | const char __user *inbuf, | 640 | const char __user *inbuf, |
| 588 | int in_len, int out_len) | 641 | int in_len, int out_len) |
| @@ -617,12 +670,17 @@ static ssize_t ucma_query_route(struct ucma_file *file, | |||
| 617 | 670 | ||
| 618 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; | 671 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; |
| 619 | resp.port_num = ctx->cm_id->port_num; | 672 | resp.port_num = ctx->cm_id->port_num; |
| 620 | switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { | 673 | if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) { |
| 621 | case RDMA_TRANSPORT_IB: | 674 | switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) { |
| 622 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); | 675 | case IB_LINK_LAYER_INFINIBAND: |
| 623 | break; | 676 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); |
| 624 | default: | 677 | break; |
| 625 | break; | 678 | case IB_LINK_LAYER_ETHERNET: |
| 679 | ucma_copy_iboe_route(&resp, &ctx->cm_id->route); | ||
| 680 | break; | ||
| 681 | default: | ||
| 682 | break; | ||
| 683 | } | ||
| 626 | } | 684 | } |
| 627 | 685 | ||
| 628 | out: | 686 | out: |
| @@ -686,8 +744,8 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, | |||
| 686 | if (IS_ERR(ctx)) | 744 | if (IS_ERR(ctx)) |
| 687 | return PTR_ERR(ctx); | 745 | return PTR_ERR(ctx); |
| 688 | 746 | ||
| 689 | ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ? | 747 | ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? |
| 690 | cmd.backlog : UCMA_MAX_BACKLOG; | 748 | cmd.backlog : max_backlog; |
| 691 | ret = rdma_listen(ctx->cm_id, ctx->backlog); | 749 | ret = rdma_listen(ctx->cm_id, ctx->backlog); |
| 692 | ucma_put_ctx(ctx); | 750 | ucma_put_ctx(ctx); |
| 693 | return ret; | 751 | return ret; |
| @@ -1279,16 +1337,26 @@ static int __init ucma_init(void) | |||
| 1279 | ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); | 1337 | ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); |
| 1280 | if (ret) { | 1338 | if (ret) { |
| 1281 | printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); | 1339 | printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); |
| 1282 | goto err; | 1340 | goto err1; |
| 1341 | } | ||
| 1342 | |||
| 1343 | ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); | ||
| 1344 | if (!ucma_ctl_table_hdr) { | ||
| 1345 | printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); | ||
| 1346 | ret = -ENOMEM; | ||
| 1347 | goto err2; | ||
| 1283 | } | 1348 | } |
| 1284 | return 0; | 1349 | return 0; |
| 1285 | err: | 1350 | err2: |
| 1351 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | ||
| 1352 | err1: | ||
| 1286 | misc_deregister(&ucma_misc); | 1353 | misc_deregister(&ucma_misc); |
| 1287 | return ret; | 1354 | return ret; |
| 1288 | } | 1355 | } |
| 1289 | 1356 | ||
| 1290 | static void __exit ucma_cleanup(void) | 1357 | static void __exit ucma_cleanup(void) |
| 1291 | { | 1358 | { |
| 1359 | unregister_sysctl_table(ucma_ctl_table_hdr); | ||
| 1292 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); | 1360 | device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); |
| 1293 | misc_deregister(&ucma_misc); | 1361 | misc_deregister(&ucma_misc); |
| 1294 | idr_destroy(&ctx_idr); | 1362 | idr_destroy(&ctx_idr); |
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index 650b501eb142..bb7e19280821 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| 35 | #include <linux/string.h> | 35 | #include <linux/string.h> |
| 36 | #include <linux/if_ether.h> | ||
| 36 | 37 | ||
| 37 | #include <rdma/ib_pack.h> | 38 | #include <rdma/ib_pack.h> |
| 38 | 39 | ||
| @@ -80,6 +81,40 @@ static const struct ib_field lrh_table[] = { | |||
| 80 | .size_bits = 16 } | 81 | .size_bits = 16 } |
| 81 | }; | 82 | }; |
| 82 | 83 | ||
| 84 | static const struct ib_field eth_table[] = { | ||
| 85 | { STRUCT_FIELD(eth, dmac_h), | ||
| 86 | .offset_words = 0, | ||
| 87 | .offset_bits = 0, | ||
| 88 | .size_bits = 32 }, | ||
| 89 | { STRUCT_FIELD(eth, dmac_l), | ||
| 90 | .offset_words = 1, | ||
| 91 | .offset_bits = 0, | ||
| 92 | .size_bits = 16 }, | ||
| 93 | { STRUCT_FIELD(eth, smac_h), | ||
| 94 | .offset_words = 1, | ||
| 95 | .offset_bits = 16, | ||
| 96 | .size_bits = 16 }, | ||
| 97 | { STRUCT_FIELD(eth, smac_l), | ||
| 98 | .offset_words = 2, | ||
| 99 | .offset_bits = 0, | ||
| 100 | .size_bits = 32 }, | ||
| 101 | { STRUCT_FIELD(eth, type), | ||
| 102 | .offset_words = 3, | ||
| 103 | .offset_bits = 0, | ||
| 104 | .size_bits = 16 } | ||
| 105 | }; | ||
| 106 | |||
| 107 | static const struct ib_field vlan_table[] = { | ||
| 108 | { STRUCT_FIELD(vlan, tag), | ||
| 109 | .offset_words = 0, | ||
| 110 | .offset_bits = 0, | ||
| 111 | .size_bits = 16 }, | ||
| 112 | { STRUCT_FIELD(vlan, type), | ||
| 113 | .offset_words = 0, | ||
| 114 | .offset_bits = 16, | ||
| 115 | .size_bits = 16 } | ||
| 116 | }; | ||
| 117 | |||
| 83 | static const struct ib_field grh_table[] = { | 118 | static const struct ib_field grh_table[] = { |
| 84 | { STRUCT_FIELD(grh, ip_version), | 119 | { STRUCT_FIELD(grh, ip_version), |
| 85 | .offset_words = 0, | 120 | .offset_words = 0, |
| @@ -180,38 +215,43 @@ static const struct ib_field deth_table[] = { | |||
| 180 | /** | 215 | /** |
| 181 | * ib_ud_header_init - Initialize UD header structure | 216 | * ib_ud_header_init - Initialize UD header structure |
| 182 | * @payload_bytes:Length of packet payload | 217 | * @payload_bytes:Length of packet payload |
| 218 | * @lrh_present: specify if LRH is present | ||
| 219 | * @eth_present: specify if Eth header is present | ||
| 220 | * @vlan_present: packet is tagged vlan | ||
| 183 | * @grh_present:GRH flag (if non-zero, GRH will be included) | 221 | * @grh_present:GRH flag (if non-zero, GRH will be included) |
| 184 | * @immediate_present: specify if immediate data should be used | 222 | * @immediate_present: specify if immediate data is present |
| 185 | * @header:Structure to initialize | 223 | * @header:Structure to initialize |
| 186 | * | ||
| 187 | * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, | ||
| 188 | * lrh.packet_length, grh.ip_version, grh.payload_length, | ||
| 189 | * grh.next_header, bth.opcode, bth.pad_count and | ||
| 190 | * bth.transport_header_version fields of a &struct ib_ud_header given | ||
| 191 | * the payload length and whether a GRH will be included. | ||
| 192 | */ | 224 | */ |
| 193 | void ib_ud_header_init(int payload_bytes, | 225 | void ib_ud_header_init(int payload_bytes, |
| 226 | int lrh_present, | ||
| 227 | int eth_present, | ||
| 228 | int vlan_present, | ||
| 194 | int grh_present, | 229 | int grh_present, |
| 195 | int immediate_present, | 230 | int immediate_present, |
| 196 | struct ib_ud_header *header) | 231 | struct ib_ud_header *header) |
| 197 | { | 232 | { |
| 198 | u16 packet_length; | ||
| 199 | |||
| 200 | memset(header, 0, sizeof *header); | 233 | memset(header, 0, sizeof *header); |
| 201 | 234 | ||
| 202 | header->lrh.link_version = 0; | 235 | if (lrh_present) { |
| 203 | header->lrh.link_next_header = | 236 | u16 packet_length; |
| 204 | grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; | 237 | |
| 205 | packet_length = (IB_LRH_BYTES + | 238 | header->lrh.link_version = 0; |
| 206 | IB_BTH_BYTES + | 239 | header->lrh.link_next_header = |
| 207 | IB_DETH_BYTES + | 240 | grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; |
| 208 | payload_bytes + | 241 | packet_length = (IB_LRH_BYTES + |
| 209 | 4 + /* ICRC */ | 242 | IB_BTH_BYTES + |
| 210 | 3) / 4; /* round up */ | 243 | IB_DETH_BYTES + |
| 211 | 244 | (grh_present ? IB_GRH_BYTES : 0) + | |
| 212 | header->grh_present = grh_present; | 245 | payload_bytes + |
| 246 | 4 + /* ICRC */ | ||
| 247 | 3) / 4; /* round up */ | ||
| 248 | header->lrh.packet_length = cpu_to_be16(packet_length); | ||
| 249 | } | ||
| 250 | |||
| 251 | if (vlan_present) | ||
| 252 | header->eth.type = cpu_to_be16(ETH_P_8021Q); | ||
| 253 | |||
| 213 | if (grh_present) { | 254 | if (grh_present) { |
| 214 | packet_length += IB_GRH_BYTES / 4; | ||
| 215 | header->grh.ip_version = 6; | 255 | header->grh.ip_version = 6; |
| 216 | header->grh.payload_length = | 256 | header->grh.payload_length = |
| 217 | cpu_to_be16((IB_BTH_BYTES + | 257 | cpu_to_be16((IB_BTH_BYTES + |
| @@ -222,19 +262,52 @@ void ib_ud_header_init(int payload_bytes, | |||
| 222 | header->grh.next_header = 0x1b; | 262 | header->grh.next_header = 0x1b; |
| 223 | } | 263 | } |
| 224 | 264 | ||
| 225 | header->lrh.packet_length = cpu_to_be16(packet_length); | ||
| 226 | |||
| 227 | header->immediate_present = immediate_present; | ||
| 228 | if (immediate_present) | 265 | if (immediate_present) |
| 229 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; | 266 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; |
| 230 | else | 267 | else |
| 231 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; | 268 | header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; |
| 232 | header->bth.pad_count = (4 - payload_bytes) & 3; | 269 | header->bth.pad_count = (4 - payload_bytes) & 3; |
| 233 | header->bth.transport_header_version = 0; | 270 | header->bth.transport_header_version = 0; |
| 271 | |||
| 272 | header->lrh_present = lrh_present; | ||
| 273 | header->eth_present = eth_present; | ||
| 274 | header->vlan_present = vlan_present; | ||
| 275 | header->grh_present = grh_present; | ||
| 276 | header->immediate_present = immediate_present; | ||
| 234 | } | 277 | } |
| 235 | EXPORT_SYMBOL(ib_ud_header_init); | 278 | EXPORT_SYMBOL(ib_ud_header_init); |
| 236 | 279 | ||
| 237 | /** | 280 | /** |
| 281 | * ib_lrh_header_pack - Pack LRH header struct into wire format | ||
| 282 | * @lrh:unpacked LRH header struct | ||
| 283 | * @buf:Buffer to pack into | ||
| 284 | * | ||
| 285 | * ib_lrh_header_pack() packs the LRH header structure @lrh into | ||
| 286 | * wire format in the buffer @buf. | ||
| 287 | */ | ||
| 288 | int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf) | ||
| 289 | { | ||
| 290 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf); | ||
| 291 | return 0; | ||
| 292 | } | ||
| 293 | EXPORT_SYMBOL(ib_lrh_header_pack); | ||
| 294 | |||
| 295 | /** | ||
| 296 | * ib_lrh_header_unpack - Unpack LRH structure from wire format | ||
| 297 | * @lrh:unpacked LRH header struct | ||
| 298 | * @buf:Buffer to pack into | ||
| 299 | * | ||
| 300 | * ib_lrh_header_unpack() unpacks the LRH header structure from | ||
| 301 | * wire format (in buf) into @lrh. | ||
| 302 | */ | ||
| 303 | int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh) | ||
| 304 | { | ||
| 305 | ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh); | ||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | EXPORT_SYMBOL(ib_lrh_header_unpack); | ||
| 309 | |||
| 310 | /** | ||
| 238 | * ib_ud_header_pack - Pack UD header struct into wire format | 311 | * ib_ud_header_pack - Pack UD header struct into wire format |
| 239 | * @header:UD header struct | 312 | * @header:UD header struct |
| 240 | * @buf:Buffer to pack into | 313 | * @buf:Buffer to pack into |
| @@ -247,10 +320,21 @@ int ib_ud_header_pack(struct ib_ud_header *header, | |||
| 247 | { | 320 | { |
| 248 | int len = 0; | 321 | int len = 0; |
| 249 | 322 | ||
| 250 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), | 323 | if (header->lrh_present) { |
| 251 | &header->lrh, buf); | 324 | ib_pack(lrh_table, ARRAY_SIZE(lrh_table), |
| 252 | len += IB_LRH_BYTES; | 325 | &header->lrh, buf + len); |
| 253 | 326 | len += IB_LRH_BYTES; | |
| 327 | } | ||
| 328 | if (header->eth_present) { | ||
| 329 | ib_pack(eth_table, ARRAY_SIZE(eth_table), | ||
| 330 | &header->eth, buf + len); | ||
| 331 | len += IB_ETH_BYTES; | ||
| 332 | } | ||
| 333 | if (header->vlan_present) { | ||
| 334 | ib_pack(vlan_table, ARRAY_SIZE(vlan_table), | ||
| 335 | &header->vlan, buf + len); | ||
| 336 | len += IB_VLAN_BYTES; | ||
| 337 | } | ||
| 254 | if (header->grh_present) { | 338 | if (header->grh_present) { |
| 255 | ib_pack(grh_table, ARRAY_SIZE(grh_table), | 339 | ib_pack(grh_table, ARRAY_SIZE(grh_table), |
| 256 | &header->grh, buf + len); | 340 | &header->grh, buf + len); |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 5fa856909511..cd1996d0ad08 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
| @@ -1022,7 +1022,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, | |||
| 1022 | 1022 | ||
| 1023 | port->ib_dev = device; | 1023 | port->ib_dev = device; |
| 1024 | port->port_num = port_num; | 1024 | port->port_num = port_num; |
| 1025 | init_MUTEX(&port->sm_sem); | 1025 | sema_init(&port->sm_sem, 1); |
| 1026 | mutex_init(&port->file_mutex); | 1026 | mutex_init(&port->file_mutex); |
| 1027 | INIT_LIST_HEAD(&port->file_list); | 1027 | INIT_LIST_HEAD(&port->file_list); |
| 1028 | 1028 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6fcfbeb24a23..b342248aec05 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -460,6 +460,8 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file, | |||
| 460 | resp.active_width = attr.active_width; | 460 | resp.active_width = attr.active_width; |
| 461 | resp.active_speed = attr.active_speed; | 461 | resp.active_speed = attr.active_speed; |
| 462 | resp.phys_state = attr.phys_state; | 462 | resp.phys_state = attr.phys_state; |
| 463 | resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev, | ||
| 464 | cmd.port_num); | ||
| 463 | 465 | ||
| 464 | if (copy_to_user((void __user *) (unsigned long) cmd.response, | 466 | if (copy_to_user((void __user *) (unsigned long) cmd.response, |
| 465 | &resp, sizeof resp)) | 467 | &resp, sizeof resp)) |
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index e0fa22238715..af7a8b08b2e9 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -94,6 +94,22 @@ rdma_node_get_transport(enum rdma_node_type node_type) | |||
| 94 | } | 94 | } |
| 95 | EXPORT_SYMBOL(rdma_node_get_transport); | 95 | EXPORT_SYMBOL(rdma_node_get_transport); |
| 96 | 96 | ||
| 97 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) | ||
| 98 | { | ||
| 99 | if (device->get_link_layer) | ||
| 100 | return device->get_link_layer(device, port_num); | ||
| 101 | |||
| 102 | switch (rdma_node_get_transport(device->node_type)) { | ||
| 103 | case RDMA_TRANSPORT_IB: | ||
| 104 | return IB_LINK_LAYER_INFINIBAND; | ||
| 105 | case RDMA_TRANSPORT_IWARP: | ||
| 106 | return IB_LINK_LAYER_ETHERNET; | ||
| 107 | default: | ||
| 108 | return IB_LINK_LAYER_UNSPECIFIED; | ||
| 109 | } | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL(rdma_port_get_link_layer); | ||
| 112 | |||
| 97 | /* Protection domains */ | 113 | /* Protection domains */ |
| 98 | 114 | ||
| 99 | struct ib_pd *ib_alloc_pd(struct ib_device *device) | 115 | struct ib_pd *ib_alloc_pd(struct ib_device *device) |
diff --git a/drivers/infiniband/hw/amso1100/Kbuild b/drivers/infiniband/hw/amso1100/Kbuild index 06964c4af849..950dfabcd89d 100644 --- a/drivers/infiniband/hw/amso1100/Kbuild +++ b/drivers/infiniband/hw/amso1100/Kbuild | |||
| @@ -1,6 +1,4 @@ | |||
| 1 | ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG | 1 | ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG |
| 2 | EXTRA_CFLAGS += -DDEBUG | ||
| 3 | endif | ||
| 4 | 2 | ||
| 5 | obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o | 3 | obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o |
| 6 | 4 | ||
diff --git a/drivers/infiniband/hw/amso1100/c2_intr.c b/drivers/infiniband/hw/amso1100/c2_intr.c index 3b5095470cb3..0ebe4e806b86 100644 --- a/drivers/infiniband/hw/amso1100/c2_intr.c +++ b/drivers/infiniband/hw/amso1100/c2_intr.c | |||
| @@ -62,8 +62,8 @@ void c2_rnic_interrupt(struct c2_dev *c2dev) | |||
| 62 | static void handle_mq(struct c2_dev *c2dev, u32 mq_index) | 62 | static void handle_mq(struct c2_dev *c2dev, u32 mq_index) |
| 63 | { | 63 | { |
| 64 | if (c2dev->qptr_array[mq_index] == NULL) { | 64 | if (c2dev->qptr_array[mq_index] == NULL) { |
| 65 | pr_debug(KERN_INFO "handle_mq: stray activity for mq_index=%d\n", | 65 | pr_debug("handle_mq: stray activity for mq_index=%d\n", |
| 66 | mq_index); | 66 | mq_index); |
| 67 | return; | 67 | return; |
| 68 | } | 68 | } |
| 69 | 69 | ||
diff --git a/drivers/infiniband/hw/cxgb3/Makefile b/drivers/infiniband/hw/cxgb3/Makefile index 7e7b5a66f042..621619c794e5 100644 --- a/drivers/infiniband/hw/cxgb3/Makefile +++ b/drivers/infiniband/hw/cxgb3/Makefile | |||
| @@ -1,10 +1,8 @@ | |||
| 1 | EXTRA_CFLAGS += -Idrivers/net/cxgb3 | 1 | ccflags-y := -Idrivers/net/cxgb3 |
| 2 | 2 | ||
| 3 | obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o | 3 | obj-$(CONFIG_INFINIBAND_CXGB3) += iw_cxgb3.o |
| 4 | 4 | ||
| 5 | iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \ | 5 | iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \ |
| 6 | iwch_provider.o iwch.o cxio_hal.o cxio_resource.o | 6 | iwch_provider.o iwch.o cxio_hal.o cxio_resource.o |
| 7 | 7 | ||
| 8 | ifdef CONFIG_INFINIBAND_CXGB3_DEBUG | 8 | ccflags-$(CONFIG_INFINIBAND_CXGB3_DEBUG) += -DDEBUG |
| 9 | EXTRA_CFLAGS += -DDEBUG | ||
| 10 | endif | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 005b7b52bc1e..09dda0b8740e 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
| @@ -160,6 +160,7 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) | |||
| 160 | struct rdma_cq_setup setup; | 160 | struct rdma_cq_setup setup; |
| 161 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); | 161 | int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); |
| 162 | 162 | ||
| 163 | size += 1; /* one extra page for storing cq-in-err state */ | ||
| 163 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); | 164 | cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); |
| 164 | if (!cq->cqid) | 165 | if (!cq->cqid) |
| 165 | return -ENOMEM; | 166 | return -ENOMEM; |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index e5ddb63e7d23..4bb997aa39d0 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h | |||
| @@ -728,6 +728,22 @@ struct t3_cq { | |||
| 728 | #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ | 728 | #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \ |
| 729 | CQE_GENBIT(*cqe)) | 729 | CQE_GENBIT(*cqe)) |
| 730 | 730 | ||
| 731 | struct t3_cq_status_page { | ||
| 732 | u32 cq_err; | ||
| 733 | }; | ||
| 734 | |||
| 735 | static inline int cxio_cq_in_error(struct t3_cq *cq) | ||
| 736 | { | ||
| 737 | return ((struct t3_cq_status_page *) | ||
| 738 | &cq->queue[1 << cq->size_log2])->cq_err; | ||
| 739 | } | ||
| 740 | |||
| 741 | static inline void cxio_set_cq_in_error(struct t3_cq *cq) | ||
| 742 | { | ||
| 743 | ((struct t3_cq_status_page *) | ||
| 744 | &cq->queue[1 << cq->size_log2])->cq_err = 1; | ||
| 745 | } | ||
| 746 | |||
| 731 | static inline void cxio_set_wq_in_error(struct t3_wq *wq) | 747 | static inline void cxio_set_wq_in_error(struct t3_wq *wq) |
| 732 | { | 748 | { |
| 733 | wq->queue->wq_in_err.err |= 1; | 749 | wq->queue->wq_in_err.err |= 1; |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 13c88871dc3b..d02dcc6e5963 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
| @@ -1093,8 +1093,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
| 1093 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); | 1093 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); |
| 1094 | 1094 | ||
| 1095 | if (credits == 0) { | 1095 | if (credits == 0) { |
| 1096 | PDBG(KERN_ERR "%s 0 credit ack ep %p state %u\n", | 1096 | PDBG("%s 0 credit ack ep %p state %u\n", |
| 1097 | __func__, ep, state_read(&ep->com)); | 1097 | __func__, ep, state_read(&ep->com)); |
| 1098 | return CPL_RET_BUF_DONE; | 1098 | return CPL_RET_BUF_DONE; |
| 1099 | } | 1099 | } |
| 1100 | 1100 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 6afc89e7572c..71e0d845da3d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c | |||
| @@ -76,6 +76,14 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
| 76 | atomic_inc(&qhp->refcnt); | 76 | atomic_inc(&qhp->refcnt); |
| 77 | spin_unlock(&rnicp->lock); | 77 | spin_unlock(&rnicp->lock); |
| 78 | 78 | ||
| 79 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | ||
| 80 | attrs.next_state = IWCH_QP_STATE_TERMINATE; | ||
| 81 | iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, | ||
| 82 | &attrs, 1); | ||
| 83 | if (send_term) | ||
| 84 | iwch_post_terminate(qhp, rsp_msg); | ||
| 85 | } | ||
| 86 | |||
| 79 | event.event = ib_event; | 87 | event.event = ib_event; |
| 80 | event.device = chp->ibcq.device; | 88 | event.device = chp->ibcq.device; |
| 81 | if (ib_event == IB_EVENT_CQ_ERR) | 89 | if (ib_event == IB_EVENT_CQ_ERR) |
| @@ -86,13 +94,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, | |||
| 86 | if (qhp->ibqp.event_handler) | 94 | if (qhp->ibqp.event_handler) |
| 87 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); | 95 | (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); |
| 88 | 96 | ||
| 89 | if (qhp->attr.state == IWCH_QP_STATE_RTS) { | 97 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
| 90 | attrs.next_state = IWCH_QP_STATE_TERMINATE; | ||
| 91 | iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE, | ||
| 92 | &attrs, 1); | ||
| 93 | if (send_term) | ||
| 94 | iwch_post_terminate(qhp, rsp_msg); | ||
| 95 | } | ||
| 96 | 98 | ||
| 97 | if (atomic_dec_and_test(&qhp->refcnt)) | 99 | if (atomic_dec_and_test(&qhp->refcnt)) |
| 98 | wake_up(&qhp->wait); | 100 | wake_up(&qhp->wait); |
| @@ -179,7 +181,6 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) | |||
| 179 | case TPT_ERR_BOUND: | 181 | case TPT_ERR_BOUND: |
| 180 | case TPT_ERR_INVALIDATE_SHARED_MR: | 182 | case TPT_ERR_INVALIDATE_SHARED_MR: |
| 181 | case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: | 183 | case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND: |
| 182 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | ||
| 183 | post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); | 184 | post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1); |
| 184 | break; | 185 | break; |
| 185 | 186 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index fca0b4b747e4..2e2741307af4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -154,6 +154,8 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
| 154 | struct iwch_create_cq_resp uresp; | 154 | struct iwch_create_cq_resp uresp; |
| 155 | struct iwch_create_cq_req ureq; | 155 | struct iwch_create_cq_req ureq; |
| 156 | struct iwch_ucontext *ucontext = NULL; | 156 | struct iwch_ucontext *ucontext = NULL; |
| 157 | static int warned; | ||
| 158 | size_t resplen; | ||
| 157 | 159 | ||
| 158 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | 160 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); |
| 159 | rhp = to_iwch_dev(ibdev); | 161 | rhp = to_iwch_dev(ibdev); |
| @@ -217,15 +219,26 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve | |||
| 217 | uresp.key = ucontext->key; | 219 | uresp.key = ucontext->key; |
| 218 | ucontext->key += PAGE_SIZE; | 220 | ucontext->key += PAGE_SIZE; |
| 219 | spin_unlock(&ucontext->mmap_lock); | 221 | spin_unlock(&ucontext->mmap_lock); |
| 220 | if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) { | 222 | mm->key = uresp.key; |
| 223 | mm->addr = virt_to_phys(chp->cq.queue); | ||
| 224 | if (udata->outlen < sizeof uresp) { | ||
| 225 | if (!warned++) | ||
| 226 | printk(KERN_WARNING MOD "Warning - " | ||
| 227 | "downlevel libcxgb3 (non-fatal).\n"); | ||
| 228 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | ||
| 229 | sizeof(struct t3_cqe)); | ||
| 230 | resplen = sizeof(struct iwch_create_cq_resp_v0); | ||
| 231 | } else { | ||
| 232 | mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) * | ||
| 233 | sizeof(struct t3_cqe)); | ||
| 234 | uresp.memsize = mm->len; | ||
| 235 | resplen = sizeof uresp; | ||
| 236 | } | ||
| 237 | if (ib_copy_to_udata(udata, &uresp, resplen)) { | ||
| 221 | kfree(mm); | 238 | kfree(mm); |
| 222 | iwch_destroy_cq(&chp->ibcq); | 239 | iwch_destroy_cq(&chp->ibcq); |
| 223 | return ERR_PTR(-EFAULT); | 240 | return ERR_PTR(-EFAULT); |
| 224 | } | 241 | } |
| 225 | mm->key = uresp.key; | ||
| 226 | mm->addr = virt_to_phys(chp->cq.queue); | ||
| 227 | mm->len = PAGE_ALIGN((1UL << uresp.size_log2) * | ||
| 228 | sizeof (struct t3_cqe)); | ||
| 229 | insert_mmap(ucontext, mm); | 242 | insert_mmap(ucontext, mm); |
| 230 | } | 243 | } |
| 231 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", | 244 | PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n", |
| @@ -1414,6 +1427,7 @@ int iwch_register_device(struct iwch_dev *dev) | |||
| 1414 | dev->ibdev.post_send = iwch_post_send; | 1427 | dev->ibdev.post_send = iwch_post_send; |
| 1415 | dev->ibdev.post_recv = iwch_post_receive; | 1428 | dev->ibdev.post_recv = iwch_post_receive; |
| 1416 | dev->ibdev.get_protocol_stats = iwch_get_mib; | 1429 | dev->ibdev.get_protocol_stats = iwch_get_mib; |
| 1430 | dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; | ||
| 1417 | 1431 | ||
| 1418 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); | 1432 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); |
| 1419 | if (!dev->ibdev.iwcm) | 1433 | if (!dev->ibdev.iwcm) |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index c64d27bf2c15..0993137181d7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
| @@ -802,14 +802,12 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) | |||
| 802 | /* | 802 | /* |
| 803 | * Assumes qhp lock is held. | 803 | * Assumes qhp lock is held. |
| 804 | */ | 804 | */ |
| 805 | static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 805 | static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, |
| 806 | struct iwch_cq *schp, unsigned long *flag) | ||
| 806 | { | 807 | { |
| 807 | struct iwch_cq *rchp, *schp; | ||
| 808 | int count; | 808 | int count; |
| 809 | int flushed; | 809 | int flushed; |
| 810 | 810 | ||
| 811 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | ||
| 812 | schp = get_chp(qhp->rhp, qhp->attr.scq); | ||
| 813 | 811 | ||
| 814 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 812 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
| 815 | /* take a ref on the qhp since we must release the lock */ | 813 | /* take a ref on the qhp since we must release the lock */ |
| @@ -847,10 +845,23 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) | |||
| 847 | 845 | ||
| 848 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) | 846 | static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) |
| 849 | { | 847 | { |
| 850 | if (qhp->ibqp.uobject) | 848 | struct iwch_cq *rchp, *schp; |
| 849 | |||
| 850 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | ||
| 851 | schp = get_chp(qhp->rhp, qhp->attr.scq); | ||
| 852 | |||
| 853 | if (qhp->ibqp.uobject) { | ||
| 851 | cxio_set_wq_in_error(&qhp->wq); | 854 | cxio_set_wq_in_error(&qhp->wq); |
| 852 | else | 855 | cxio_set_cq_in_error(&rchp->cq); |
| 853 | __flush_qp(qhp, flag); | 856 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
| 857 | if (schp != rchp) { | ||
| 858 | cxio_set_cq_in_error(&schp->cq); | ||
| 859 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
| 860 | schp->ibcq.cq_context); | ||
| 861 | } | ||
| 862 | return; | ||
| 863 | } | ||
| 864 | __flush_qp(qhp, rchp, schp, flag); | ||
| 854 | } | 865 | } |
| 855 | 866 | ||
| 856 | 867 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_user.h b/drivers/infiniband/hw/cxgb3/iwch_user.h index cb7086f558c1..a277c31fcaf7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_user.h +++ b/drivers/infiniband/hw/cxgb3/iwch_user.h | |||
| @@ -45,10 +45,18 @@ struct iwch_create_cq_req { | |||
| 45 | __u64 user_rptr_addr; | 45 | __u64 user_rptr_addr; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | struct iwch_create_cq_resp_v0 { | ||
| 49 | __u64 key; | ||
| 50 | __u32 cqid; | ||
| 51 | __u32 size_log2; | ||
| 52 | }; | ||
| 53 | |||
| 48 | struct iwch_create_cq_resp { | 54 | struct iwch_create_cq_resp { |
| 49 | __u64 key; | 55 | __u64 key; |
| 50 | __u32 cqid; | 56 | __u32 cqid; |
| 51 | __u32 size_log2; | 57 | __u32 size_log2; |
| 58 | __u32 memsize; | ||
| 59 | __u32 reserved; | ||
| 52 | }; | 60 | }; |
| 53 | 61 | ||
| 54 | struct iwch_create_qp_resp { | 62 | struct iwch_create_qp_resp { |
diff --git a/drivers/infiniband/hw/cxgb4/Makefile b/drivers/infiniband/hw/cxgb4/Makefile index e31a499f0172..cd20b1342aec 100644 --- a/drivers/infiniband/hw/cxgb4/Makefile +++ b/drivers/infiniband/hw/cxgb4/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | EXTRA_CFLAGS += -Idrivers/net/cxgb4 | 1 | ccflags-y := -Idrivers/net/cxgb4 |
| 2 | 2 | ||
| 3 | obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o | 3 | obj-$(CONFIG_INFINIBAND_CXGB4) += iw_cxgb4.o |
| 4 | 4 | ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 32d352a88d50..0dc62b1438be 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -117,9 +117,9 @@ static int rcv_win = 256 * 1024; | |||
| 117 | module_param(rcv_win, int, 0644); | 117 | module_param(rcv_win, int, 0644); |
| 118 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); | 118 | MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); |
| 119 | 119 | ||
| 120 | static int snd_win = 32 * 1024; | 120 | static int snd_win = 128 * 1024; |
| 121 | module_param(snd_win, int, 0644); | 121 | module_param(snd_win, int, 0644); |
| 122 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); | 122 | MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); |
| 123 | 123 | ||
| 124 | static struct workqueue_struct *workq; | 124 | static struct workqueue_struct *workq; |
| 125 | 125 | ||
| @@ -172,7 +172,7 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, | |||
| 172 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); | 172 | error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); |
| 173 | if (error < 0) | 173 | if (error < 0) |
| 174 | kfree_skb(skb); | 174 | kfree_skb(skb); |
| 175 | return error; | 175 | return error < 0 ? error : 0; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | 178 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) |
| @@ -187,7 +187,7 @@ int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) | |||
| 187 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); | 187 | error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); |
| 188 | if (error < 0) | 188 | if (error < 0) |
| 189 | kfree_skb(skb); | 189 | kfree_skb(skb); |
| 190 | return error; | 190 | return error < 0 ? error : 0; |
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | 193 | static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) |
| @@ -219,12 +219,11 @@ static void set_emss(struct c4iw_ep *ep, u16 opt) | |||
| 219 | 219 | ||
| 220 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) | 220 | static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) |
| 221 | { | 221 | { |
| 222 | unsigned long flags; | ||
| 223 | enum c4iw_ep_state state; | 222 | enum c4iw_ep_state state; |
| 224 | 223 | ||
| 225 | spin_lock_irqsave(&epc->lock, flags); | 224 | mutex_lock(&epc->mutex); |
| 226 | state = epc->state; | 225 | state = epc->state; |
| 227 | spin_unlock_irqrestore(&epc->lock, flags); | 226 | mutex_unlock(&epc->mutex); |
| 228 | return state; | 227 | return state; |
| 229 | } | 228 | } |
| 230 | 229 | ||
| @@ -235,12 +234,10 @@ static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | |||
| 235 | 234 | ||
| 236 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) | 235 | static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) |
| 237 | { | 236 | { |
| 238 | unsigned long flags; | 237 | mutex_lock(&epc->mutex); |
| 239 | |||
| 240 | spin_lock_irqsave(&epc->lock, flags); | ||
| 241 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); | 238 | PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); |
| 242 | __state_set(epc, new); | 239 | __state_set(epc, new); |
| 243 | spin_unlock_irqrestore(&epc->lock, flags); | 240 | mutex_unlock(&epc->mutex); |
| 244 | return; | 241 | return; |
| 245 | } | 242 | } |
| 246 | 243 | ||
| @@ -251,8 +248,8 @@ static void *alloc_ep(int size, gfp_t gfp) | |||
| 251 | epc = kzalloc(size, gfp); | 248 | epc = kzalloc(size, gfp); |
| 252 | if (epc) { | 249 | if (epc) { |
| 253 | kref_init(&epc->kref); | 250 | kref_init(&epc->kref); |
| 254 | spin_lock_init(&epc->lock); | 251 | mutex_init(&epc->mutex); |
| 255 | init_waitqueue_head(&epc->waitq); | 252 | c4iw_init_wr_wait(&epc->wr_wait); |
| 256 | } | 253 | } |
| 257 | PDBG("%s alloc ep %p\n", __func__, epc); | 254 | PDBG("%s alloc ep %p\n", __func__, epc); |
| 258 | return epc; | 255 | return epc; |
| @@ -1131,7 +1128,6 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1131 | { | 1128 | { |
| 1132 | struct c4iw_ep *ep; | 1129 | struct c4iw_ep *ep; |
| 1133 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); | 1130 | struct cpl_abort_rpl_rss *rpl = cplhdr(skb); |
| 1134 | unsigned long flags; | ||
| 1135 | int release = 0; | 1131 | int release = 0; |
| 1136 | unsigned int tid = GET_TID(rpl); | 1132 | unsigned int tid = GET_TID(rpl); |
| 1137 | struct tid_info *t = dev->rdev.lldi.tids; | 1133 | struct tid_info *t = dev->rdev.lldi.tids; |
| @@ -1139,7 +1135,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1139 | ep = lookup_tid(t, tid); | 1135 | ep = lookup_tid(t, tid); |
| 1140 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1136 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1141 | BUG_ON(!ep); | 1137 | BUG_ON(!ep); |
| 1142 | spin_lock_irqsave(&ep->com.lock, flags); | 1138 | mutex_lock(&ep->com.mutex); |
| 1143 | switch (ep->com.state) { | 1139 | switch (ep->com.state) { |
| 1144 | case ABORTING: | 1140 | case ABORTING: |
| 1145 | __state_set(&ep->com, DEAD); | 1141 | __state_set(&ep->com, DEAD); |
| @@ -1150,7 +1146,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1150 | __func__, ep, ep->com.state); | 1146 | __func__, ep, ep->com.state); |
| 1151 | break; | 1147 | break; |
| 1152 | } | 1148 | } |
| 1153 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1149 | mutex_unlock(&ep->com.mutex); |
| 1154 | 1150 | ||
| 1155 | if (release) | 1151 | if (release) |
| 1156 | release_ep_resources(ep); | 1152 | release_ep_resources(ep); |
| @@ -1213,9 +1209,9 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1213 | } | 1209 | } |
| 1214 | PDBG("%s ep %p status %d error %d\n", __func__, ep, | 1210 | PDBG("%s ep %p status %d error %d\n", __func__, ep, |
| 1215 | rpl->status, status2errno(rpl->status)); | 1211 | rpl->status, status2errno(rpl->status)); |
| 1216 | ep->com.rpl_err = status2errno(rpl->status); | 1212 | ep->com.wr_wait.ret = status2errno(rpl->status); |
| 1217 | ep->com.rpl_done = 1; | 1213 | ep->com.wr_wait.done = 1; |
| 1218 | wake_up(&ep->com.waitq); | 1214 | wake_up(&ep->com.wr_wait.wait); |
| 1219 | 1215 | ||
| 1220 | return 0; | 1216 | return 0; |
| 1221 | } | 1217 | } |
| @@ -1249,9 +1245,9 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1249 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); | 1245 | struct c4iw_listen_ep *ep = lookup_stid(t, stid); |
| 1250 | 1246 | ||
| 1251 | PDBG("%s ep %p\n", __func__, ep); | 1247 | PDBG("%s ep %p\n", __func__, ep); |
| 1252 | ep->com.rpl_err = status2errno(rpl->status); | 1248 | ep->com.wr_wait.ret = status2errno(rpl->status); |
| 1253 | ep->com.rpl_done = 1; | 1249 | ep->com.wr_wait.done = 1; |
| 1254 | wake_up(&ep->com.waitq); | 1250 | wake_up(&ep->com.wr_wait.wait); |
| 1255 | return 0; | 1251 | return 0; |
| 1256 | } | 1252 | } |
| 1257 | 1253 | ||
| @@ -1478,7 +1474,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1478 | struct cpl_peer_close *hdr = cplhdr(skb); | 1474 | struct cpl_peer_close *hdr = cplhdr(skb); |
| 1479 | struct c4iw_ep *ep; | 1475 | struct c4iw_ep *ep; |
| 1480 | struct c4iw_qp_attributes attrs; | 1476 | struct c4iw_qp_attributes attrs; |
| 1481 | unsigned long flags; | ||
| 1482 | int disconnect = 1; | 1477 | int disconnect = 1; |
| 1483 | int release = 0; | 1478 | int release = 0; |
| 1484 | int closing = 0; | 1479 | int closing = 0; |
| @@ -1489,7 +1484,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1489 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1484 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
| 1490 | dst_confirm(ep->dst); | 1485 | dst_confirm(ep->dst); |
| 1491 | 1486 | ||
| 1492 | spin_lock_irqsave(&ep->com.lock, flags); | 1487 | mutex_lock(&ep->com.mutex); |
| 1493 | switch (ep->com.state) { | 1488 | switch (ep->com.state) { |
| 1494 | case MPA_REQ_WAIT: | 1489 | case MPA_REQ_WAIT: |
| 1495 | __state_set(&ep->com, CLOSING); | 1490 | __state_set(&ep->com, CLOSING); |
| @@ -1507,17 +1502,17 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1507 | * in rdma connection migration (see c4iw_accept_cr()). | 1502 | * in rdma connection migration (see c4iw_accept_cr()). |
| 1508 | */ | 1503 | */ |
| 1509 | __state_set(&ep->com, CLOSING); | 1504 | __state_set(&ep->com, CLOSING); |
| 1510 | ep->com.rpl_done = 1; | 1505 | ep->com.wr_wait.done = 1; |
| 1511 | ep->com.rpl_err = -ECONNRESET; | 1506 | ep->com.wr_wait.ret = -ECONNRESET; |
| 1512 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1507 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
| 1513 | wake_up(&ep->com.waitq); | 1508 | wake_up(&ep->com.wr_wait.wait); |
| 1514 | break; | 1509 | break; |
| 1515 | case MPA_REP_SENT: | 1510 | case MPA_REP_SENT: |
| 1516 | __state_set(&ep->com, CLOSING); | 1511 | __state_set(&ep->com, CLOSING); |
| 1517 | ep->com.rpl_done = 1; | 1512 | ep->com.wr_wait.done = 1; |
| 1518 | ep->com.rpl_err = -ECONNRESET; | 1513 | ep->com.wr_wait.ret = -ECONNRESET; |
| 1519 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | 1514 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); |
| 1520 | wake_up(&ep->com.waitq); | 1515 | wake_up(&ep->com.wr_wait.wait); |
| 1521 | break; | 1516 | break; |
| 1522 | case FPDU_MODE: | 1517 | case FPDU_MODE: |
| 1523 | start_ep_timer(ep); | 1518 | start_ep_timer(ep); |
| @@ -1550,7 +1545,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1550 | default: | 1545 | default: |
| 1551 | BUG_ON(1); | 1546 | BUG_ON(1); |
| 1552 | } | 1547 | } |
| 1553 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1548 | mutex_unlock(&ep->com.mutex); |
| 1554 | if (closing) { | 1549 | if (closing) { |
| 1555 | attrs.next_state = C4IW_QP_STATE_CLOSING; | 1550 | attrs.next_state = C4IW_QP_STATE_CLOSING; |
| 1556 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1551 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
| @@ -1581,7 +1576,6 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1581 | struct c4iw_qp_attributes attrs; | 1576 | struct c4iw_qp_attributes attrs; |
| 1582 | int ret; | 1577 | int ret; |
| 1583 | int release = 0; | 1578 | int release = 0; |
| 1584 | unsigned long flags; | ||
| 1585 | struct tid_info *t = dev->rdev.lldi.tids; | 1579 | struct tid_info *t = dev->rdev.lldi.tids; |
| 1586 | unsigned int tid = GET_TID(req); | 1580 | unsigned int tid = GET_TID(req); |
| 1587 | 1581 | ||
| @@ -1591,9 +1585,17 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1591 | ep->hwtid); | 1585 | ep->hwtid); |
| 1592 | return 0; | 1586 | return 0; |
| 1593 | } | 1587 | } |
| 1594 | spin_lock_irqsave(&ep->com.lock, flags); | ||
| 1595 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | 1588 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
| 1596 | ep->com.state); | 1589 | ep->com.state); |
| 1590 | |||
| 1591 | /* | ||
| 1592 | * Wake up any threads in rdma_init() or rdma_fini(). | ||
| 1593 | */ | ||
| 1594 | ep->com.wr_wait.done = 1; | ||
| 1595 | ep->com.wr_wait.ret = -ECONNRESET; | ||
| 1596 | wake_up(&ep->com.wr_wait.wait); | ||
| 1597 | |||
| 1598 | mutex_lock(&ep->com.mutex); | ||
| 1597 | switch (ep->com.state) { | 1599 | switch (ep->com.state) { |
| 1598 | case CONNECTING: | 1600 | case CONNECTING: |
| 1599 | break; | 1601 | break; |
| @@ -1605,23 +1607,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1605 | connect_reply_upcall(ep, -ECONNRESET); | 1607 | connect_reply_upcall(ep, -ECONNRESET); |
| 1606 | break; | 1608 | break; |
| 1607 | case MPA_REP_SENT: | 1609 | case MPA_REP_SENT: |
| 1608 | ep->com.rpl_done = 1; | ||
| 1609 | ep->com.rpl_err = -ECONNRESET; | ||
| 1610 | PDBG("waking up ep %p\n", ep); | ||
| 1611 | wake_up(&ep->com.waitq); | ||
| 1612 | break; | 1610 | break; |
| 1613 | case MPA_REQ_RCVD: | 1611 | case MPA_REQ_RCVD: |
| 1614 | |||
| 1615 | /* | ||
| 1616 | * We're gonna mark this puppy DEAD, but keep | ||
| 1617 | * the reference on it until the ULP accepts or | ||
| 1618 | * rejects the CR. Also wake up anyone waiting | ||
| 1619 | * in rdma connection migration (see c4iw_accept_cr()). | ||
| 1620 | */ | ||
| 1621 | ep->com.rpl_done = 1; | ||
| 1622 | ep->com.rpl_err = -ECONNRESET; | ||
| 1623 | PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); | ||
| 1624 | wake_up(&ep->com.waitq); | ||
| 1625 | break; | 1612 | break; |
| 1626 | case MORIBUND: | 1613 | case MORIBUND: |
| 1627 | case CLOSING: | 1614 | case CLOSING: |
| @@ -1644,7 +1631,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1644 | break; | 1631 | break; |
| 1645 | case DEAD: | 1632 | case DEAD: |
| 1646 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); | 1633 | PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); |
| 1647 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1634 | mutex_unlock(&ep->com.mutex); |
| 1648 | return 0; | 1635 | return 0; |
| 1649 | default: | 1636 | default: |
| 1650 | BUG_ON(1); | 1637 | BUG_ON(1); |
| @@ -1655,7 +1642,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1655 | __state_set(&ep->com, DEAD); | 1642 | __state_set(&ep->com, DEAD); |
| 1656 | release = 1; | 1643 | release = 1; |
| 1657 | } | 1644 | } |
| 1658 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1645 | mutex_unlock(&ep->com.mutex); |
| 1659 | 1646 | ||
| 1660 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); | 1647 | rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); |
| 1661 | if (!rpl_skb) { | 1648 | if (!rpl_skb) { |
| @@ -1681,7 +1668,6 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1681 | struct c4iw_ep *ep; | 1668 | struct c4iw_ep *ep; |
| 1682 | struct c4iw_qp_attributes attrs; | 1669 | struct c4iw_qp_attributes attrs; |
| 1683 | struct cpl_close_con_rpl *rpl = cplhdr(skb); | 1670 | struct cpl_close_con_rpl *rpl = cplhdr(skb); |
| 1684 | unsigned long flags; | ||
| 1685 | int release = 0; | 1671 | int release = 0; |
| 1686 | struct tid_info *t = dev->rdev.lldi.tids; | 1672 | struct tid_info *t = dev->rdev.lldi.tids; |
| 1687 | unsigned int tid = GET_TID(rpl); | 1673 | unsigned int tid = GET_TID(rpl); |
| @@ -1692,7 +1678,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1692 | BUG_ON(!ep); | 1678 | BUG_ON(!ep); |
| 1693 | 1679 | ||
| 1694 | /* The cm_id may be null if we failed to connect */ | 1680 | /* The cm_id may be null if we failed to connect */ |
| 1695 | spin_lock_irqsave(&ep->com.lock, flags); | 1681 | mutex_lock(&ep->com.mutex); |
| 1696 | switch (ep->com.state) { | 1682 | switch (ep->com.state) { |
| 1697 | case CLOSING: | 1683 | case CLOSING: |
| 1698 | __state_set(&ep->com, MORIBUND); | 1684 | __state_set(&ep->com, MORIBUND); |
| @@ -1717,7 +1703,7 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1717 | BUG_ON(1); | 1703 | BUG_ON(1); |
| 1718 | break; | 1704 | break; |
| 1719 | } | 1705 | } |
| 1720 | spin_unlock_irqrestore(&ep->com.lock, flags); | 1706 | mutex_unlock(&ep->com.mutex); |
| 1721 | if (release) | 1707 | if (release) |
| 1722 | release_ep_resources(ep); | 1708 | release_ep_resources(ep); |
| 1723 | return 0; | 1709 | return 0; |
| @@ -1725,23 +1711,24 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1725 | 1711 | ||
| 1726 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) | 1712 | static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) |
| 1727 | { | 1713 | { |
| 1728 | struct c4iw_ep *ep; | 1714 | struct cpl_rdma_terminate *rpl = cplhdr(skb); |
| 1729 | struct cpl_rdma_terminate *term = cplhdr(skb); | ||
| 1730 | struct tid_info *t = dev->rdev.lldi.tids; | 1715 | struct tid_info *t = dev->rdev.lldi.tids; |
| 1731 | unsigned int tid = GET_TID(term); | 1716 | unsigned int tid = GET_TID(rpl); |
| 1717 | struct c4iw_ep *ep; | ||
| 1718 | struct c4iw_qp_attributes attrs; | ||
| 1732 | 1719 | ||
| 1733 | ep = lookup_tid(t, tid); | 1720 | ep = lookup_tid(t, tid); |
| 1721 | BUG_ON(!ep); | ||
| 1734 | 1722 | ||
| 1735 | if (state_read(&ep->com) != FPDU_MODE) | 1723 | if (ep->com.qp) { |
| 1736 | return 0; | 1724 | printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, |
| 1725 | ep->com.qp->wq.sq.qid); | ||
| 1726 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | ||
| 1727 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | ||
| 1728 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); | ||
| 1729 | } else | ||
| 1730 | printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); | ||
| 1737 | 1731 | ||
| 1738 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | ||
| 1739 | skb_pull(skb, sizeof *term); | ||
| 1740 | PDBG("%s saving %d bytes of term msg\n", __func__, skb->len); | ||
| 1741 | skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer, | ||
| 1742 | skb->len); | ||
| 1743 | ep->com.qp->attr.terminate_msg_len = skb->len; | ||
| 1744 | ep->com.qp->attr.is_terminate_local = 0; | ||
| 1745 | return 0; | 1732 | return 0; |
| 1746 | } | 1733 | } |
| 1747 | 1734 | ||
| @@ -1762,8 +1749,8 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 1762 | ep = lookup_tid(t, tid); | 1749 | ep = lookup_tid(t, tid); |
| 1763 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); | 1750 | PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); |
| 1764 | if (credits == 0) { | 1751 | if (credits == 0) { |
| 1765 | PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n", | 1752 | PDBG("%s 0 credit ack ep %p tid %u state %u\n", |
| 1766 | __func__, ep, ep->hwtid, state_read(&ep->com)); | 1753 | __func__, ep, ep->hwtid, state_read(&ep->com)); |
| 1767 | return 0; | 1754 | return 0; |
| 1768 | } | 1755 | } |
| 1769 | 1756 | ||
| @@ -2042,6 +2029,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
| 2042 | } | 2029 | } |
| 2043 | 2030 | ||
| 2044 | state_set(&ep->com, LISTEN); | 2031 | state_set(&ep->com, LISTEN); |
| 2032 | c4iw_init_wr_wait(&ep->com.wr_wait); | ||
| 2045 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, | 2033 | err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, |
| 2046 | ep->com.local_addr.sin_addr.s_addr, | 2034 | ep->com.local_addr.sin_addr.s_addr, |
| 2047 | ep->com.local_addr.sin_port, | 2035 | ep->com.local_addr.sin_port, |
| @@ -2050,15 +2038,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) | |||
| 2050 | goto fail3; | 2038 | goto fail3; |
| 2051 | 2039 | ||
| 2052 | /* wait for pass_open_rpl */ | 2040 | /* wait for pass_open_rpl */ |
| 2053 | wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); | 2041 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
| 2054 | if (ep->com.rpl_done) | 2042 | __func__); |
| 2055 | err = ep->com.rpl_err; | ||
| 2056 | else { | ||
| 2057 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 2058 | pci_name(ep->com.dev->rdev.lldi.pdev)); | ||
| 2059 | ep->com.dev->rdev.flags = T4_FATAL_ERROR; | ||
| 2060 | err = -EIO; | ||
| 2061 | } | ||
| 2062 | if (!err) { | 2043 | if (!err) { |
| 2063 | cm_id->provider_data = ep; | 2044 | cm_id->provider_data = ep; |
| 2064 | goto out; | 2045 | goto out; |
| @@ -2082,20 +2063,12 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) | |||
| 2082 | 2063 | ||
| 2083 | might_sleep(); | 2064 | might_sleep(); |
| 2084 | state_set(&ep->com, DEAD); | 2065 | state_set(&ep->com, DEAD); |
| 2085 | ep->com.rpl_done = 0; | 2066 | c4iw_init_wr_wait(&ep->com.wr_wait); |
| 2086 | ep->com.rpl_err = 0; | ||
| 2087 | err = listen_stop(ep); | 2067 | err = listen_stop(ep); |
| 2088 | if (err) | 2068 | if (err) |
| 2089 | goto done; | 2069 | goto done; |
| 2090 | wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO); | 2070 | err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, |
| 2091 | if (ep->com.rpl_done) | 2071 | __func__); |
| 2092 | err = ep->com.rpl_err; | ||
| 2093 | else { | ||
| 2094 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 2095 | pci_name(ep->com.dev->rdev.lldi.pdev)); | ||
| 2096 | ep->com.dev->rdev.flags = T4_FATAL_ERROR; | ||
| 2097 | err = -EIO; | ||
| 2098 | } | ||
| 2099 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); | 2072 | cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); |
| 2100 | done: | 2073 | done: |
| 2101 | cm_id->rem_ref(cm_id); | 2074 | cm_id->rem_ref(cm_id); |
| @@ -2106,12 +2079,11 @@ done: | |||
| 2106 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | 2079 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) |
| 2107 | { | 2080 | { |
| 2108 | int ret = 0; | 2081 | int ret = 0; |
| 2109 | unsigned long flags; | ||
| 2110 | int close = 0; | 2082 | int close = 0; |
| 2111 | int fatal = 0; | 2083 | int fatal = 0; |
| 2112 | struct c4iw_rdev *rdev; | 2084 | struct c4iw_rdev *rdev; |
| 2113 | 2085 | ||
| 2114 | spin_lock_irqsave(&ep->com.lock, flags); | 2086 | mutex_lock(&ep->com.mutex); |
| 2115 | 2087 | ||
| 2116 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, | 2088 | PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, |
| 2117 | states[ep->com.state], abrupt); | 2089 | states[ep->com.state], abrupt); |
| @@ -2158,7 +2130,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
| 2158 | break; | 2130 | break; |
| 2159 | } | 2131 | } |
| 2160 | 2132 | ||
| 2161 | spin_unlock_irqrestore(&ep->com.lock, flags); | 2133 | mutex_unlock(&ep->com.mutex); |
| 2162 | if (close) { | 2134 | if (close) { |
| 2163 | if (abrupt) | 2135 | if (abrupt) |
| 2164 | ret = abort_connection(ep, NULL, gfp); | 2136 | ret = abort_connection(ep, NULL, gfp); |
| @@ -2172,6 +2144,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) | |||
| 2172 | return ret; | 2144 | return ret; |
| 2173 | } | 2145 | } |
| 2174 | 2146 | ||
| 2147 | static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) | ||
| 2148 | { | ||
| 2149 | struct cpl_fw6_msg *rpl = cplhdr(skb); | ||
| 2150 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | ||
| 2151 | return 0; | ||
| 2152 | } | ||
| 2153 | |||
| 2175 | /* | 2154 | /* |
| 2176 | * These are the real handlers that are called from a | 2155 | * These are the real handlers that are called from a |
| 2177 | * work queue. | 2156 | * work queue. |
| @@ -2190,7 +2169,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { | |||
| 2190 | [CPL_ABORT_REQ_RSS] = peer_abort, | 2169 | [CPL_ABORT_REQ_RSS] = peer_abort, |
| 2191 | [CPL_CLOSE_CON_RPL] = close_con_rpl, | 2170 | [CPL_CLOSE_CON_RPL] = close_con_rpl, |
| 2192 | [CPL_RDMA_TERMINATE] = terminate, | 2171 | [CPL_RDMA_TERMINATE] = terminate, |
| 2193 | [CPL_FW4_ACK] = fw4_ack | 2172 | [CPL_FW4_ACK] = fw4_ack, |
| 2173 | [CPL_FW6_MSG] = async_event | ||
| 2194 | }; | 2174 | }; |
| 2195 | 2175 | ||
| 2196 | static void process_timeout(struct c4iw_ep *ep) | 2176 | static void process_timeout(struct c4iw_ep *ep) |
| @@ -2198,7 +2178,7 @@ static void process_timeout(struct c4iw_ep *ep) | |||
| 2198 | struct c4iw_qp_attributes attrs; | 2178 | struct c4iw_qp_attributes attrs; |
| 2199 | int abort = 1; | 2179 | int abort = 1; |
| 2200 | 2180 | ||
| 2201 | spin_lock_irq(&ep->com.lock); | 2181 | mutex_lock(&ep->com.mutex); |
| 2202 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, | 2182 | PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, |
| 2203 | ep->com.state); | 2183 | ep->com.state); |
| 2204 | switch (ep->com.state) { | 2184 | switch (ep->com.state) { |
| @@ -2225,7 +2205,7 @@ static void process_timeout(struct c4iw_ep *ep) | |||
| 2225 | WARN_ON(1); | 2205 | WARN_ON(1); |
| 2226 | abort = 0; | 2206 | abort = 0; |
| 2227 | } | 2207 | } |
| 2228 | spin_unlock_irq(&ep->com.lock); | 2208 | mutex_unlock(&ep->com.mutex); |
| 2229 | if (abort) | 2209 | if (abort) |
| 2230 | abort_connection(ep, NULL, GFP_KERNEL); | 2210 | abort_connection(ep, NULL, GFP_KERNEL); |
| 2231 | c4iw_put_ep(&ep->com); | 2211 | c4iw_put_ep(&ep->com); |
| @@ -2309,6 +2289,7 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2309 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " | 2289 | printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " |
| 2310 | "for tid %u\n", rpl->status, GET_TID(rpl)); | 2290 | "for tid %u\n", rpl->status, GET_TID(rpl)); |
| 2311 | } | 2291 | } |
| 2292 | kfree_skb(skb); | ||
| 2312 | return 0; | 2293 | return 0; |
| 2313 | } | 2294 | } |
| 2314 | 2295 | ||
| @@ -2323,20 +2304,25 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) | |||
| 2323 | switch (rpl->type) { | 2304 | switch (rpl->type) { |
| 2324 | case 1: | 2305 | case 1: |
| 2325 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); | 2306 | ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); |
| 2326 | wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1]; | 2307 | wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; |
| 2327 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); | 2308 | PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); |
| 2328 | if (wr_waitp) { | 2309 | if (wr_waitp) { |
| 2329 | wr_waitp->ret = ret; | 2310 | if (ret) |
| 2311 | wr_waitp->ret = -ret; | ||
| 2312 | else | ||
| 2313 | wr_waitp->ret = 0; | ||
| 2330 | wr_waitp->done = 1; | 2314 | wr_waitp->done = 1; |
| 2331 | wake_up(&wr_waitp->wait); | 2315 | wake_up(&wr_waitp->wait); |
| 2332 | } | 2316 | } |
| 2317 | kfree_skb(skb); | ||
| 2333 | break; | 2318 | break; |
| 2334 | case 2: | 2319 | case 2: |
| 2335 | c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); | 2320 | sched(dev, skb); |
| 2336 | break; | 2321 | break; |
| 2337 | default: | 2322 | default: |
| 2338 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, | 2323 | printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, |
| 2339 | rpl->type); | 2324 | rpl->type); |
| 2325 | kfree_skb(skb); | ||
| 2340 | break; | 2326 | break; |
| 2341 | } | 2327 | } |
| 2342 | return 0; | 2328 | return 0; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index b3daf39eed4a..8d8f8add6fcd 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
| @@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 55 | V_FW_RI_RES_WR_NRES(1) | | 55 | V_FW_RI_RES_WR_NRES(1) | |
| 56 | FW_WR_COMPL(1)); | 56 | FW_WR_COMPL(1)); |
| 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
| 58 | res_wr->cookie = (u64)&wr_wait; | 58 | res_wr->cookie = (unsigned long) &wr_wait; |
| 59 | res = res_wr->res; | 59 | res = res_wr->res; |
| 60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
| 61 | res->u.cq.op = FW_RI_RES_OP_RESET; | 61 | res->u.cq.op = FW_RI_RES_OP_RESET; |
| @@ -64,14 +64,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 64 | c4iw_init_wr_wait(&wr_wait); | 64 | c4iw_init_wr_wait(&wr_wait); |
| 65 | ret = c4iw_ofld_send(rdev, skb); | 65 | ret = c4iw_ofld_send(rdev, skb); |
| 66 | if (!ret) { | 66 | if (!ret) { |
| 67 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 67 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
| 68 | if (!wr_wait.done) { | ||
| 69 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 70 | pci_name(rdev->lldi.pdev)); | ||
| 71 | rdev->flags = T4_FATAL_ERROR; | ||
| 72 | ret = -EIO; | ||
| 73 | } else | ||
| 74 | ret = wr_wait.ret; | ||
| 75 | } | 68 | } |
| 76 | 69 | ||
| 77 | kfree(cq->sw_queue); | 70 | kfree(cq->sw_queue); |
| @@ -132,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 132 | V_FW_RI_RES_WR_NRES(1) | | 125 | V_FW_RI_RES_WR_NRES(1) | |
| 133 | FW_WR_COMPL(1)); | 126 | FW_WR_COMPL(1)); |
| 134 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
| 135 | res_wr->cookie = (u64)&wr_wait; | 128 | res_wr->cookie = (unsigned long) &wr_wait; |
| 136 | res = res_wr->res; | 129 | res = res_wr->res; |
| 137 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
| 138 | res->u.cq.op = FW_RI_RES_OP_WRITE; | 131 | res->u.cq.op = FW_RI_RES_OP_WRITE; |
| @@ -157,14 +150,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 157 | if (ret) | 150 | if (ret) |
| 158 | goto err4; | 151 | goto err4; |
| 159 | PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); | 152 | PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait); |
| 160 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 153 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
| 161 | if (!wr_wait.done) { | ||
| 162 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 163 | pci_name(rdev->lldi.pdev)); | ||
| 164 | rdev->flags = T4_FATAL_ERROR; | ||
| 165 | ret = -EIO; | ||
| 166 | } else | ||
| 167 | ret = wr_wait.ret; | ||
| 168 | if (ret) | 154 | if (ret) |
| 169 | goto err4; | 155 | goto err4; |
| 170 | 156 | ||
| @@ -476,6 +462,11 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, | |||
| 476 | goto proc_cqe; | 462 | goto proc_cqe; |
| 477 | } | 463 | } |
| 478 | 464 | ||
| 465 | if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) { | ||
| 466 | ret = -EAGAIN; | ||
| 467 | goto skip_cqe; | ||
| 468 | } | ||
| 469 | |||
| 479 | /* | 470 | /* |
| 480 | * RECV completion. | 471 | * RECV completion. |
| 481 | */ | 472 | */ |
| @@ -696,6 +687,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
| 696 | case T4_ERR_MSN_RANGE: | 687 | case T4_ERR_MSN_RANGE: |
| 697 | case T4_ERR_IRD_OVERFLOW: | 688 | case T4_ERR_IRD_OVERFLOW: |
| 698 | case T4_ERR_OPCODE: | 689 | case T4_ERR_OPCODE: |
| 690 | case T4_ERR_INTERNAL_ERR: | ||
| 699 | wc->status = IB_WC_FATAL_ERR; | 691 | wc->status = IB_WC_FATAL_ERR; |
| 700 | break; | 692 | break; |
| 701 | case T4_ERR_SWFLUSH: | 693 | case T4_ERR_SWFLUSH: |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 9bbf491d5d9e..54fbc1118abe 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
| @@ -49,29 +49,33 @@ static DEFINE_MUTEX(dev_mutex); | |||
| 49 | 49 | ||
| 50 | static struct dentry *c4iw_debugfs_root; | 50 | static struct dentry *c4iw_debugfs_root; |
| 51 | 51 | ||
| 52 | struct debugfs_qp_data { | 52 | struct c4iw_debugfs_data { |
| 53 | struct c4iw_dev *devp; | 53 | struct c4iw_dev *devp; |
| 54 | char *buf; | 54 | char *buf; |
| 55 | int bufsize; | 55 | int bufsize; |
| 56 | int pos; | 56 | int pos; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | static int count_qps(int id, void *p, void *data) | 59 | static int count_idrs(int id, void *p, void *data) |
| 60 | { | 60 | { |
| 61 | struct c4iw_qp *qp = p; | ||
| 62 | int *countp = data; | 61 | int *countp = data; |
| 63 | 62 | ||
| 64 | if (id != qp->wq.sq.qid) | ||
| 65 | return 0; | ||
| 66 | |||
| 67 | *countp = *countp + 1; | 63 | *countp = *countp + 1; |
| 68 | return 0; | 64 | return 0; |
| 69 | } | 65 | } |
| 70 | 66 | ||
| 71 | static int dump_qps(int id, void *p, void *data) | 67 | static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, |
| 68 | loff_t *ppos) | ||
| 69 | { | ||
| 70 | struct c4iw_debugfs_data *d = file->private_data; | ||
| 71 | |||
| 72 | return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); | ||
| 73 | } | ||
| 74 | |||
| 75 | static int dump_qp(int id, void *p, void *data) | ||
| 72 | { | 76 | { |
| 73 | struct c4iw_qp *qp = p; | 77 | struct c4iw_qp *qp = p; |
| 74 | struct debugfs_qp_data *qpd = data; | 78 | struct c4iw_debugfs_data *qpd = data; |
| 75 | int space; | 79 | int space; |
| 76 | int cc; | 80 | int cc; |
| 77 | 81 | ||
| @@ -101,7 +105,7 @@ static int dump_qps(int id, void *p, void *data) | |||
| 101 | 105 | ||
| 102 | static int qp_release(struct inode *inode, struct file *file) | 106 | static int qp_release(struct inode *inode, struct file *file) |
| 103 | { | 107 | { |
| 104 | struct debugfs_qp_data *qpd = file->private_data; | 108 | struct c4iw_debugfs_data *qpd = file->private_data; |
| 105 | if (!qpd) { | 109 | if (!qpd) { |
| 106 | printk(KERN_INFO "%s null qpd?\n", __func__); | 110 | printk(KERN_INFO "%s null qpd?\n", __func__); |
| 107 | return 0; | 111 | return 0; |
| @@ -113,7 +117,7 @@ static int qp_release(struct inode *inode, struct file *file) | |||
| 113 | 117 | ||
| 114 | static int qp_open(struct inode *inode, struct file *file) | 118 | static int qp_open(struct inode *inode, struct file *file) |
| 115 | { | 119 | { |
| 116 | struct debugfs_qp_data *qpd; | 120 | struct c4iw_debugfs_data *qpd; |
| 117 | int ret = 0; | 121 | int ret = 0; |
| 118 | int count = 1; | 122 | int count = 1; |
| 119 | 123 | ||
| @@ -126,7 +130,7 @@ static int qp_open(struct inode *inode, struct file *file) | |||
| 126 | qpd->pos = 0; | 130 | qpd->pos = 0; |
| 127 | 131 | ||
| 128 | spin_lock_irq(&qpd->devp->lock); | 132 | spin_lock_irq(&qpd->devp->lock); |
| 129 | idr_for_each(&qpd->devp->qpidr, count_qps, &count); | 133 | idr_for_each(&qpd->devp->qpidr, count_idrs, &count); |
| 130 | spin_unlock_irq(&qpd->devp->lock); | 134 | spin_unlock_irq(&qpd->devp->lock); |
| 131 | 135 | ||
| 132 | qpd->bufsize = count * 128; | 136 | qpd->bufsize = count * 128; |
| @@ -137,7 +141,7 @@ static int qp_open(struct inode *inode, struct file *file) | |||
| 137 | } | 141 | } |
| 138 | 142 | ||
| 139 | spin_lock_irq(&qpd->devp->lock); | 143 | spin_lock_irq(&qpd->devp->lock); |
| 140 | idr_for_each(&qpd->devp->qpidr, dump_qps, qpd); | 144 | idr_for_each(&qpd->devp->qpidr, dump_qp, qpd); |
| 141 | spin_unlock_irq(&qpd->devp->lock); | 145 | spin_unlock_irq(&qpd->devp->lock); |
| 142 | 146 | ||
| 143 | qpd->buf[qpd->pos++] = 0; | 147 | qpd->buf[qpd->pos++] = 0; |
| @@ -149,43 +153,86 @@ out: | |||
| 149 | return ret; | 153 | return ret; |
| 150 | } | 154 | } |
| 151 | 155 | ||
| 152 | static ssize_t qp_read(struct file *file, char __user *buf, size_t count, | 156 | static const struct file_operations qp_debugfs_fops = { |
| 153 | loff_t *ppos) | 157 | .owner = THIS_MODULE, |
| 158 | .open = qp_open, | ||
| 159 | .release = qp_release, | ||
| 160 | .read = debugfs_read, | ||
| 161 | .llseek = default_llseek, | ||
| 162 | }; | ||
| 163 | |||
| 164 | static int dump_stag(int id, void *p, void *data) | ||
| 154 | { | 165 | { |
| 155 | struct debugfs_qp_data *qpd = file->private_data; | 166 | struct c4iw_debugfs_data *stagd = data; |
| 156 | loff_t pos = *ppos; | 167 | int space; |
| 157 | loff_t avail = qpd->pos; | 168 | int cc; |
| 158 | 169 | ||
| 159 | if (pos < 0) | 170 | space = stagd->bufsize - stagd->pos - 1; |
| 160 | return -EINVAL; | 171 | if (space == 0) |
| 161 | if (pos >= avail) | 172 | return 1; |
| 173 | |||
| 174 | cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); | ||
| 175 | if (cc < space) | ||
| 176 | stagd->pos += cc; | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | static int stag_release(struct inode *inode, struct file *file) | ||
| 181 | { | ||
| 182 | struct c4iw_debugfs_data *stagd = file->private_data; | ||
| 183 | if (!stagd) { | ||
| 184 | printk(KERN_INFO "%s null stagd?\n", __func__); | ||
| 162 | return 0; | 185 | return 0; |
| 163 | if (count > avail - pos) | 186 | } |
| 164 | count = avail - pos; | 187 | kfree(stagd->buf); |
| 188 | kfree(stagd); | ||
| 189 | return 0; | ||
| 190 | } | ||
| 165 | 191 | ||
| 166 | while (count) { | 192 | static int stag_open(struct inode *inode, struct file *file) |
| 167 | size_t len = 0; | 193 | { |
| 194 | struct c4iw_debugfs_data *stagd; | ||
| 195 | int ret = 0; | ||
| 196 | int count = 1; | ||
| 168 | 197 | ||
| 169 | len = min((int)count, (int)qpd->pos - (int)pos); | 198 | stagd = kmalloc(sizeof *stagd, GFP_KERNEL); |
| 170 | if (copy_to_user(buf, qpd->buf + pos, len)) | 199 | if (!stagd) { |
| 171 | return -EFAULT; | 200 | ret = -ENOMEM; |
| 172 | if (len == 0) | 201 | goto out; |
| 173 | return -EINVAL; | 202 | } |
| 203 | stagd->devp = inode->i_private; | ||
| 204 | stagd->pos = 0; | ||
| 205 | |||
| 206 | spin_lock_irq(&stagd->devp->lock); | ||
| 207 | idr_for_each(&stagd->devp->mmidr, count_idrs, &count); | ||
| 208 | spin_unlock_irq(&stagd->devp->lock); | ||
| 174 | 209 | ||
| 175 | buf += len; | 210 | stagd->bufsize = count * sizeof("0x12345678\n"); |
| 176 | pos += len; | 211 | stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); |
| 177 | count -= len; | 212 | if (!stagd->buf) { |
| 213 | ret = -ENOMEM; | ||
| 214 | goto err1; | ||
| 178 | } | 215 | } |
| 179 | count = pos - *ppos; | 216 | |
| 180 | *ppos = pos; | 217 | spin_lock_irq(&stagd->devp->lock); |
| 181 | return count; | 218 | idr_for_each(&stagd->devp->mmidr, dump_stag, stagd); |
| 219 | spin_unlock_irq(&stagd->devp->lock); | ||
| 220 | |||
| 221 | stagd->buf[stagd->pos++] = 0; | ||
| 222 | file->private_data = stagd; | ||
| 223 | goto out; | ||
| 224 | err1: | ||
| 225 | kfree(stagd); | ||
| 226 | out: | ||
| 227 | return ret; | ||
| 182 | } | 228 | } |
| 183 | 229 | ||
| 184 | static const struct file_operations qp_debugfs_fops = { | 230 | static const struct file_operations stag_debugfs_fops = { |
| 185 | .owner = THIS_MODULE, | 231 | .owner = THIS_MODULE, |
| 186 | .open = qp_open, | 232 | .open = stag_open, |
| 187 | .release = qp_release, | 233 | .release = stag_release, |
| 188 | .read = qp_read, | 234 | .read = debugfs_read, |
| 235 | .llseek = default_llseek, | ||
| 189 | }; | 236 | }; |
| 190 | 237 | ||
| 191 | static int setup_debugfs(struct c4iw_dev *devp) | 238 | static int setup_debugfs(struct c4iw_dev *devp) |
| @@ -199,6 +246,11 @@ static int setup_debugfs(struct c4iw_dev *devp) | |||
| 199 | (void *)devp, &qp_debugfs_fops); | 246 | (void *)devp, &qp_debugfs_fops); |
| 200 | if (de && de->d_inode) | 247 | if (de && de->d_inode) |
| 201 | de->d_inode->i_size = 4096; | 248 | de->d_inode->i_size = 4096; |
| 249 | |||
| 250 | de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root, | ||
| 251 | (void *)devp, &stag_debugfs_fops); | ||
| 252 | if (de && de->d_inode) | ||
| 253 | de->d_inode->i_size = 4096; | ||
| 202 | return 0; | 254 | return 0; |
| 203 | } | 255 | } |
| 204 | 256 | ||
| @@ -290,7 +342,14 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
| 290 | printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); | 342 | printk(KERN_ERR MOD "error %d initializing rqt pool\n", err); |
| 291 | goto err3; | 343 | goto err3; |
| 292 | } | 344 | } |
| 345 | err = c4iw_ocqp_pool_create(rdev); | ||
| 346 | if (err) { | ||
| 347 | printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err); | ||
| 348 | goto err4; | ||
| 349 | } | ||
| 293 | return 0; | 350 | return 0; |
| 351 | err4: | ||
| 352 | c4iw_rqtpool_destroy(rdev); | ||
| 294 | err3: | 353 | err3: |
| 295 | c4iw_pblpool_destroy(rdev); | 354 | c4iw_pblpool_destroy(rdev); |
| 296 | err2: | 355 | err2: |
| @@ -317,6 +376,7 @@ static void c4iw_remove(struct c4iw_dev *dev) | |||
| 317 | idr_destroy(&dev->cqidr); | 376 | idr_destroy(&dev->cqidr); |
| 318 | idr_destroy(&dev->qpidr); | 377 | idr_destroy(&dev->qpidr); |
| 319 | idr_destroy(&dev->mmidr); | 378 | idr_destroy(&dev->mmidr); |
| 379 | iounmap(dev->rdev.oc_mw_kva); | ||
| 320 | ib_dealloc_device(&dev->ibdev); | 380 | ib_dealloc_device(&dev->ibdev); |
| 321 | } | 381 | } |
| 322 | 382 | ||
| @@ -332,6 +392,17 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
| 332 | } | 392 | } |
| 333 | devp->rdev.lldi = *infop; | 393 | devp->rdev.lldi = *infop; |
| 334 | 394 | ||
| 395 | devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) + | ||
| 396 | (pci_resource_len(devp->rdev.lldi.pdev, 2) - | ||
| 397 | roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size)); | ||
| 398 | devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, | ||
| 399 | devp->rdev.lldi.vr->ocq.size); | ||
| 400 | |||
| 401 | printk(KERN_INFO MOD "ocq memory: " | ||
| 402 | "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", | ||
| 403 | devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, | ||
| 404 | devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); | ||
| 405 | |||
| 335 | mutex_lock(&dev_mutex); | 406 | mutex_lock(&dev_mutex); |
| 336 | 407 | ||
| 337 | ret = c4iw_rdev_open(&devp->rdev); | 408 | ret = c4iw_rdev_open(&devp->rdev); |
| @@ -383,46 +454,6 @@ out: | |||
| 383 | return dev; | 454 | return dev; |
| 384 | } | 455 | } |
| 385 | 456 | ||
| 386 | static struct sk_buff *t4_pktgl_to_skb(const struct pkt_gl *gl, | ||
| 387 | unsigned int skb_len, | ||
| 388 | unsigned int pull_len) | ||
| 389 | { | ||
| 390 | struct sk_buff *skb; | ||
| 391 | struct skb_shared_info *ssi; | ||
| 392 | |||
| 393 | if (gl->tot_len <= 512) { | ||
| 394 | skb = alloc_skb(gl->tot_len, GFP_ATOMIC); | ||
| 395 | if (unlikely(!skb)) | ||
| 396 | goto out; | ||
| 397 | __skb_put(skb, gl->tot_len); | ||
| 398 | skb_copy_to_linear_data(skb, gl->va, gl->tot_len); | ||
| 399 | } else { | ||
| 400 | skb = alloc_skb(skb_len, GFP_ATOMIC); | ||
| 401 | if (unlikely(!skb)) | ||
| 402 | goto out; | ||
| 403 | __skb_put(skb, pull_len); | ||
| 404 | skb_copy_to_linear_data(skb, gl->va, pull_len); | ||
| 405 | |||
| 406 | ssi = skb_shinfo(skb); | ||
| 407 | ssi->frags[0].page = gl->frags[0].page; | ||
| 408 | ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; | ||
| 409 | ssi->frags[0].size = gl->frags[0].size - pull_len; | ||
| 410 | if (gl->nfrags > 1) | ||
| 411 | memcpy(&ssi->frags[1], &gl->frags[1], | ||
| 412 | (gl->nfrags - 1) * sizeof(skb_frag_t)); | ||
| 413 | ssi->nr_frags = gl->nfrags; | ||
| 414 | |||
| 415 | skb->len = gl->tot_len; | ||
| 416 | skb->data_len = skb->len - pull_len; | ||
| 417 | skb->truesize += skb->data_len; | ||
| 418 | |||
| 419 | /* Get a reference for the last page, we don't own it */ | ||
| 420 | get_page(gl->frags[gl->nfrags - 1].page); | ||
| 421 | } | ||
| 422 | out: | ||
| 423 | return skb; | ||
| 424 | } | ||
| 425 | |||
| 426 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | 457 | static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, |
| 427 | const struct pkt_gl *gl) | 458 | const struct pkt_gl *gl) |
| 428 | { | 459 | { |
| @@ -447,7 +478,7 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, | |||
| 447 | c4iw_ev_handler(dev, qid); | 478 | c4iw_ev_handler(dev, qid); |
| 448 | return 0; | 479 | return 0; |
| 449 | } else { | 480 | } else { |
| 450 | skb = t4_pktgl_to_skb(gl, 128, 128); | 481 | skb = cxgb4_pktgl_to_skb(gl, 128, 128); |
| 451 | if (unlikely(!skb)) | 482 | if (unlikely(!skb)) |
| 452 | goto nomem; | 483 | goto nomem; |
| 453 | } | 484 | } |
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index 491e76a0327f..c13041a0aeba 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
| @@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, | |||
| 60 | if (qhp->attr.state == C4IW_QP_STATE_RTS) { | 60 | if (qhp->attr.state == C4IW_QP_STATE_RTS) { |
| 61 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 61 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
| 62 | c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, | 62 | c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, |
| 63 | &attrs, 1); | 63 | &attrs, 0); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | event.event = ib_event; | 66 | event.event = ib_event; |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index ed459b8f800f..16032cdb4337 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #include <linux/timer.h> | 46 | #include <linux/timer.h> |
| 47 | #include <linux/io.h> | 47 | #include <linux/io.h> |
| 48 | #include <linux/kfifo.h> | 48 | #include <linux/kfifo.h> |
| 49 | #include <linux/mutex.h> | ||
| 49 | 50 | ||
| 50 | #include <asm/byteorder.h> | 51 | #include <asm/byteorder.h> |
| 51 | 52 | ||
| @@ -79,21 +80,6 @@ static inline void *cplhdr(struct sk_buff *skb) | |||
| 79 | return skb->data; | 80 | return skb->data; |
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | #define C4IW_WR_TO (10*HZ) | ||
| 83 | |||
| 84 | struct c4iw_wr_wait { | ||
| 85 | wait_queue_head_t wait; | ||
| 86 | int done; | ||
| 87 | int ret; | ||
| 88 | }; | ||
| 89 | |||
| 90 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | ||
| 91 | { | ||
| 92 | wr_waitp->ret = 0; | ||
| 93 | wr_waitp->done = 0; | ||
| 94 | init_waitqueue_head(&wr_waitp->wait); | ||
| 95 | } | ||
| 96 | |||
| 97 | struct c4iw_resource { | 83 | struct c4iw_resource { |
| 98 | struct kfifo tpt_fifo; | 84 | struct kfifo tpt_fifo; |
| 99 | spinlock_t tpt_fifo_lock; | 85 | spinlock_t tpt_fifo_lock; |
| @@ -127,8 +113,11 @@ struct c4iw_rdev { | |||
| 127 | struct c4iw_dev_ucontext uctx; | 113 | struct c4iw_dev_ucontext uctx; |
| 128 | struct gen_pool *pbl_pool; | 114 | struct gen_pool *pbl_pool; |
| 129 | struct gen_pool *rqt_pool; | 115 | struct gen_pool *rqt_pool; |
| 116 | struct gen_pool *ocqp_pool; | ||
| 130 | u32 flags; | 117 | u32 flags; |
| 131 | struct cxgb4_lld_info lldi; | 118 | struct cxgb4_lld_info lldi; |
| 119 | unsigned long oc_mw_pa; | ||
| 120 | void __iomem *oc_mw_kva; | ||
| 132 | }; | 121 | }; |
| 133 | 122 | ||
| 134 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) | 123 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) |
| @@ -141,6 +130,44 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |||
| 141 | return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); | 130 | return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); |
| 142 | } | 131 | } |
| 143 | 132 | ||
| 133 | #define C4IW_WR_TO (10*HZ) | ||
| 134 | |||
| 135 | struct c4iw_wr_wait { | ||
| 136 | wait_queue_head_t wait; | ||
| 137 | int done; | ||
| 138 | int ret; | ||
| 139 | }; | ||
| 140 | |||
| 141 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | ||
| 142 | { | ||
| 143 | wr_waitp->ret = 0; | ||
| 144 | wr_waitp->done = 0; | ||
| 145 | init_waitqueue_head(&wr_waitp->wait); | ||
| 146 | } | ||
| 147 | |||
| 148 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | ||
| 149 | struct c4iw_wr_wait *wr_waitp, | ||
| 150 | u32 hwtid, u32 qpid, | ||
| 151 | const char *func) | ||
| 152 | { | ||
| 153 | unsigned to = C4IW_WR_TO; | ||
| 154 | do { | ||
| 155 | |||
| 156 | wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); | ||
| 157 | if (!wr_waitp->done) { | ||
| 158 | printk(KERN_ERR MOD "%s - Device %s not responding - " | ||
| 159 | "tid %u qpid %u\n", func, | ||
| 160 | pci_name(rdev->lldi.pdev), hwtid, qpid); | ||
| 161 | to = to << 2; | ||
| 162 | } | ||
| 163 | } while (!wr_waitp->done); | ||
| 164 | if (wr_waitp->ret) | ||
| 165 | printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", | ||
| 166 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); | ||
| 167 | return wr_waitp->ret; | ||
| 168 | } | ||
| 169 | |||
| 170 | |||
| 144 | struct c4iw_dev { | 171 | struct c4iw_dev { |
| 145 | struct ib_device ibdev; | 172 | struct ib_device ibdev; |
| 146 | struct c4iw_rdev rdev; | 173 | struct c4iw_rdev rdev; |
| @@ -327,6 +354,7 @@ struct c4iw_qp { | |||
| 327 | struct c4iw_qp_attributes attr; | 354 | struct c4iw_qp_attributes attr; |
| 328 | struct t4_wq wq; | 355 | struct t4_wq wq; |
| 329 | spinlock_t lock; | 356 | spinlock_t lock; |
| 357 | struct mutex mutex; | ||
| 330 | atomic_t refcnt; | 358 | atomic_t refcnt; |
| 331 | wait_queue_head_t wait; | 359 | wait_queue_head_t wait; |
| 332 | struct timer_list timer; | 360 | struct timer_list timer; |
| @@ -579,12 +607,10 @@ struct c4iw_ep_common { | |||
| 579 | struct c4iw_dev *dev; | 607 | struct c4iw_dev *dev; |
| 580 | enum c4iw_ep_state state; | 608 | enum c4iw_ep_state state; |
| 581 | struct kref kref; | 609 | struct kref kref; |
| 582 | spinlock_t lock; | 610 | struct mutex mutex; |
| 583 | struct sockaddr_in local_addr; | 611 | struct sockaddr_in local_addr; |
| 584 | struct sockaddr_in remote_addr; | 612 | struct sockaddr_in remote_addr; |
| 585 | wait_queue_head_t waitq; | 613 | struct c4iw_wr_wait wr_wait; |
| 586 | int rpl_done; | ||
| 587 | int rpl_err; | ||
| 588 | unsigned long flags; | 614 | unsigned long flags; |
| 589 | }; | 615 | }; |
| 590 | 616 | ||
| @@ -654,8 +680,10 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); | |||
| 654 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); | 680 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); |
| 655 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); | 681 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); |
| 656 | int c4iw_rqtpool_create(struct c4iw_rdev *rdev); | 682 | int c4iw_rqtpool_create(struct c4iw_rdev *rdev); |
| 683 | int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev); | ||
| 657 | void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); | 684 | void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); |
| 658 | void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); | 685 | void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); |
| 686 | void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); | ||
| 659 | void c4iw_destroy_resource(struct c4iw_resource *rscp); | 687 | void c4iw_destroy_resource(struct c4iw_resource *rscp); |
| 660 | int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); | 688 | int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); |
| 661 | int c4iw_register_device(struct c4iw_dev *dev); | 689 | int c4iw_register_device(struct c4iw_dev *dev); |
| @@ -721,6 +749,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); | |||
| 721 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | 749 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); |
| 722 | u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); | 750 | u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); |
| 723 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | 751 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); |
| 752 | u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); | ||
| 753 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); | ||
| 724 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); | 754 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); |
| 725 | void c4iw_flush_hw_cq(struct t4_cq *cq); | 755 | void c4iw_flush_hw_cq(struct t4_cq *cq); |
| 726 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); | 756 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 269373a62f22..273ffe49525a 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
| @@ -71,7 +71,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
| 71 | if (i == (num_wqe-1)) { | 71 | if (i == (num_wqe-1)) { |
| 72 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | | 72 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | |
| 73 | FW_WR_COMPL(1)); | 73 | FW_WR_COMPL(1)); |
| 74 | req->wr.wr_lo = (__force __be64)&wr_wait; | 74 | req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; |
| 75 | } else | 75 | } else |
| 76 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); | 76 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); |
| 77 | req->wr.wr_mid = cpu_to_be32( | 77 | req->wr.wr_mid = cpu_to_be32( |
| @@ -103,14 +103,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
| 103 | len -= C4IW_MAX_INLINE_SIZE; | 103 | len -= C4IW_MAX_INLINE_SIZE; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 106 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); |
| 107 | if (!wr_wait.done) { | ||
| 108 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 109 | pci_name(rdev->lldi.pdev)); | ||
| 110 | rdev->flags = T4_FATAL_ERROR; | ||
| 111 | ret = -EIO; | ||
| 112 | } else | ||
| 113 | ret = wr_wait.ret; | ||
| 114 | return ret; | 107 | return ret; |
| 115 | } | 108 | } |
| 116 | 109 | ||
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 8f645c83a125..f66dd8bf5128 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
| @@ -54,9 +54,9 @@ | |||
| 54 | 54 | ||
| 55 | #include "iw_cxgb4.h" | 55 | #include "iw_cxgb4.h" |
| 56 | 56 | ||
| 57 | static int fastreg_support; | 57 | static int fastreg_support = 1; |
| 58 | module_param(fastreg_support, int, 0644); | 58 | module_param(fastreg_support, int, 0644); |
| 59 | MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=0)"); | 59 | MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); |
| 60 | 60 | ||
| 61 | static int c4iw_modify_port(struct ib_device *ibdev, | 61 | static int c4iw_modify_port(struct ib_device *ibdev, |
| 62 | u8 port, int port_modify_mask, | 62 | u8 port, int port_modify_mask, |
| @@ -149,19 +149,28 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) | |||
| 149 | addr = mm->addr; | 149 | addr = mm->addr; |
| 150 | kfree(mm); | 150 | kfree(mm); |
| 151 | 151 | ||
| 152 | if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && | 152 | if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) && |
| 153 | (addr < (pci_resource_start(rdev->lldi.pdev, 2) + | 153 | (addr < (pci_resource_start(rdev->lldi.pdev, 0) + |
| 154 | pci_resource_len(rdev->lldi.pdev, 2)))) { | 154 | pci_resource_len(rdev->lldi.pdev, 0)))) { |
| 155 | 155 | ||
| 156 | /* | 156 | /* |
| 157 | * Map T4 DB register. | 157 | * MA_SYNC register... |
| 158 | */ | 158 | */ |
| 159 | if (vma->vm_flags & VM_READ) | ||
| 160 | return -EPERM; | ||
| 161 | |||
| 162 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 159 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
| 163 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | 160 | ret = io_remap_pfn_range(vma, vma->vm_start, |
| 164 | vma->vm_flags &= ~VM_MAYREAD; | 161 | addr >> PAGE_SHIFT, |
| 162 | len, vma->vm_page_prot); | ||
| 163 | } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && | ||
| 164 | (addr < (pci_resource_start(rdev->lldi.pdev, 2) + | ||
| 165 | pci_resource_len(rdev->lldi.pdev, 2)))) { | ||
| 166 | |||
| 167 | /* | ||
| 168 | * Map user DB or OCQP memory... | ||
| 169 | */ | ||
| 170 | if (addr >= rdev->oc_mw_pa) | ||
| 171 | vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); | ||
| 172 | else | ||
| 173 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
| 165 | ret = io_remap_pfn_range(vma, vma->vm_start, | 174 | ret = io_remap_pfn_range(vma, vma->vm_start, |
| 166 | addr >> PAGE_SHIFT, | 175 | addr >> PAGE_SHIFT, |
| 167 | len, vma->vm_page_prot); | 176 | len, vma->vm_page_prot); |
| @@ -382,7 +391,17 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, | |||
| 382 | static int c4iw_get_mib(struct ib_device *ibdev, | 391 | static int c4iw_get_mib(struct ib_device *ibdev, |
| 383 | union rdma_protocol_stats *stats) | 392 | union rdma_protocol_stats *stats) |
| 384 | { | 393 | { |
| 385 | return -ENOSYS; | 394 | struct tp_tcp_stats v4, v6; |
| 395 | struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); | ||
| 396 | |||
| 397 | cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); | ||
| 398 | memset(stats, 0, sizeof *stats); | ||
| 399 | stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs; | ||
| 400 | stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs; | ||
| 401 | stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs; | ||
| 402 | stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs; | ||
| 403 | |||
| 404 | return 0; | ||
| 386 | } | 405 | } |
| 387 | 406 | ||
| 388 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | 407 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); |
| @@ -472,6 +491,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
| 472 | dev->ibdev.post_send = c4iw_post_send; | 491 | dev->ibdev.post_send = c4iw_post_send; |
| 473 | dev->ibdev.post_recv = c4iw_post_receive; | 492 | dev->ibdev.post_recv = c4iw_post_receive; |
| 474 | dev->ibdev.get_protocol_stats = c4iw_get_mib; | 493 | dev->ibdev.get_protocol_stats = c4iw_get_mib; |
| 494 | dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; | ||
| 475 | 495 | ||
| 476 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); | 496 | dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); |
| 477 | if (!dev->ibdev.iwcm) | 497 | if (!dev->ibdev.iwcm) |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 93f6e5bf0ec5..057cb2505ea1 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -31,6 +31,63 @@ | |||
| 31 | */ | 31 | */ |
| 32 | #include "iw_cxgb4.h" | 32 | #include "iw_cxgb4.h" |
| 33 | 33 | ||
| 34 | static int ocqp_support; | ||
| 35 | module_param(ocqp_support, int, 0644); | ||
| 36 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)"); | ||
| 37 | |||
| 38 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) | ||
| 39 | { | ||
| 40 | unsigned long flag; | ||
| 41 | spin_lock_irqsave(&qhp->lock, flag); | ||
| 42 | qhp->attr.state = state; | ||
| 43 | spin_unlock_irqrestore(&qhp->lock, flag); | ||
| 44 | } | ||
| 45 | |||
| 46 | static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | ||
| 47 | { | ||
| 48 | c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | ||
| 52 | { | ||
| 53 | dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, | ||
| 54 | pci_unmap_addr(sq, mapping)); | ||
| 55 | } | ||
| 56 | |||
| 57 | static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | ||
| 58 | { | ||
| 59 | if (t4_sq_onchip(sq)) | ||
| 60 | dealloc_oc_sq(rdev, sq); | ||
| 61 | else | ||
| 62 | dealloc_host_sq(rdev, sq); | ||
| 63 | } | ||
| 64 | |||
| 65 | static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | ||
| 66 | { | ||
| 67 | if (!ocqp_support || !t4_ocqp_supported()) | ||
| 68 | return -ENOSYS; | ||
| 69 | sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); | ||
| 70 | if (!sq->dma_addr) | ||
| 71 | return -ENOMEM; | ||
| 72 | sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - | ||
| 73 | rdev->lldi.vr->ocq.start; | ||
| 74 | sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - | ||
| 75 | rdev->lldi.vr->ocq.start); | ||
| 76 | sq->flags |= T4_SQ_ONCHIP; | ||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) | ||
| 81 | { | ||
| 82 | sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, | ||
| 83 | &(sq->dma_addr), GFP_KERNEL); | ||
| 84 | if (!sq->queue) | ||
| 85 | return -ENOMEM; | ||
| 86 | sq->phys_addr = virt_to_phys(sq->queue); | ||
| 87 | pci_unmap_addr_set(sq, mapping, sq->dma_addr); | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | |||
| 34 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | 91 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
| 35 | struct c4iw_dev_ucontext *uctx) | 92 | struct c4iw_dev_ucontext *uctx) |
| 36 | { | 93 | { |
| @@ -41,9 +98,7 @@ static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
| 41 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 98 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
| 42 | wq->rq.memsize, wq->rq.queue, | 99 | wq->rq.memsize, wq->rq.queue, |
| 43 | dma_unmap_addr(&wq->rq, mapping)); | 100 | dma_unmap_addr(&wq->rq, mapping)); |
| 44 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 101 | dealloc_sq(rdev, &wq->sq); |
| 45 | wq->sq.memsize, wq->sq.queue, | ||
| 46 | dma_unmap_addr(&wq->sq, mapping)); | ||
| 47 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | 102 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
| 48 | kfree(wq->rq.sw_rq); | 103 | kfree(wq->rq.sw_rq); |
| 49 | kfree(wq->sq.sw_sq); | 104 | kfree(wq->sq.sw_sq); |
| @@ -93,11 +148,12 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
| 93 | if (!wq->rq.rqt_hwaddr) | 148 | if (!wq->rq.rqt_hwaddr) |
| 94 | goto err4; | 149 | goto err4; |
| 95 | 150 | ||
| 96 | wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | 151 | if (user) { |
| 97 | wq->sq.memsize, &(wq->sq.dma_addr), | 152 | if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) |
| 98 | GFP_KERNEL); | 153 | goto err5; |
| 99 | if (!wq->sq.queue) | 154 | } else |
| 100 | goto err5; | 155 | if (alloc_host_sq(rdev, &wq->sq)) |
| 156 | goto err5; | ||
| 101 | memset(wq->sq.queue, 0, wq->sq.memsize); | 157 | memset(wq->sq.queue, 0, wq->sq.memsize); |
| 102 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); | 158 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
| 103 | 159 | ||
| @@ -144,7 +200,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
| 144 | V_FW_RI_RES_WR_NRES(2) | | 200 | V_FW_RI_RES_WR_NRES(2) | |
| 145 | FW_WR_COMPL(1)); | 201 | FW_WR_COMPL(1)); |
| 146 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 202 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
| 147 | res_wr->cookie = (u64)&wr_wait; | 203 | res_wr->cookie = (unsigned long) &wr_wait; |
| 148 | res = res_wr->res; | 204 | res = res_wr->res; |
| 149 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | 205 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; |
| 150 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | 206 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; |
| @@ -158,6 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
| 158 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | 214 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ |
| 159 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | 215 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ |
| 160 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | 216 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ |
| 217 | t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | | ||
| 161 | V_FW_RI_RES_WR_IQID(scq->cqid)); | 218 | V_FW_RI_RES_WR_IQID(scq->cqid)); |
| 162 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | 219 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( |
| 163 | V_FW_RI_RES_WR_DCAEN(0) | | 220 | V_FW_RI_RES_WR_DCAEN(0) | |
| @@ -198,14 +255,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
| 198 | ret = c4iw_ofld_send(rdev, skb); | 255 | ret = c4iw_ofld_send(rdev, skb); |
| 199 | if (ret) | 256 | if (ret) |
| 200 | goto err7; | 257 | goto err7; |
| 201 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 258 | ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); |
| 202 | if (!wr_wait.done) { | ||
| 203 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 204 | pci_name(rdev->lldi.pdev)); | ||
| 205 | rdev->flags = T4_FATAL_ERROR; | ||
| 206 | ret = -EIO; | ||
| 207 | } else | ||
| 208 | ret = wr_wait.ret; | ||
| 209 | if (ret) | 259 | if (ret) |
| 210 | goto err7; | 260 | goto err7; |
| 211 | 261 | ||
| @@ -219,9 +269,7 @@ err7: | |||
| 219 | wq->rq.memsize, wq->rq.queue, | 269 | wq->rq.memsize, wq->rq.queue, |
| 220 | dma_unmap_addr(&wq->rq, mapping)); | 270 | dma_unmap_addr(&wq->rq, mapping)); |
| 221 | err6: | 271 | err6: |
| 222 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 272 | dealloc_sq(rdev, &wq->sq); |
| 223 | wq->sq.memsize, wq->sq.queue, | ||
| 224 | dma_unmap_addr(&wq->sq, mapping)); | ||
| 225 | err5: | 273 | err5: |
| 226 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | 274 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); |
| 227 | err4: | 275 | err4: |
| @@ -263,6 +311,9 @@ static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, | |||
| 263 | rem -= len; | 311 | rem -= len; |
| 264 | } | 312 | } |
| 265 | } | 313 | } |
| 314 | len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp); | ||
| 315 | if (len) | ||
| 316 | memset(dstp, 0, len); | ||
| 266 | immdp->op = FW_RI_DATA_IMMD; | 317 | immdp->op = FW_RI_DATA_IMMD; |
| 267 | immdp->r1 = 0; | 318 | immdp->r1 = 0; |
| 268 | immdp->r2 = 0; | 319 | immdp->r2 = 0; |
| @@ -292,6 +343,7 @@ static int build_isgl(__be64 *queue_start, __be64 *queue_end, | |||
| 292 | if (++flitp == queue_end) | 343 | if (++flitp == queue_end) |
| 293 | flitp = queue_start; | 344 | flitp = queue_start; |
| 294 | } | 345 | } |
| 346 | *flitp = (__force __be64)0; | ||
| 295 | isglp->op = FW_RI_DATA_ISGL; | 347 | isglp->op = FW_RI_DATA_ISGL; |
| 296 | isglp->r1 = 0; | 348 | isglp->r1 = 0; |
| 297 | isglp->nsge = cpu_to_be16(num_sge); | 349 | isglp->nsge = cpu_to_be16(num_sge); |
| @@ -453,13 +505,15 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |||
| 453 | return 0; | 505 | return 0; |
| 454 | } | 506 | } |
| 455 | 507 | ||
| 456 | static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | 508 | static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, |
| 509 | struct ib_send_wr *wr, u8 *len16) | ||
| 457 | { | 510 | { |
| 458 | 511 | ||
| 459 | struct fw_ri_immd *imdp; | 512 | struct fw_ri_immd *imdp; |
| 460 | __be64 *p; | 513 | __be64 *p; |
| 461 | int i; | 514 | int i; |
| 462 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | 515 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); |
| 516 | int rem; | ||
| 463 | 517 | ||
| 464 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | 518 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) |
| 465 | return -EINVAL; | 519 | return -EINVAL; |
| @@ -474,32 +528,28 @@ static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |||
| 474 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | 528 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); |
| 475 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & | 529 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & |
| 476 | 0xffffffff); | 530 | 0xffffffff); |
| 477 | if (pbllen > T4_MAX_FR_IMMD) { | 531 | WARN_ON(pbllen > T4_MAX_FR_IMMD); |
| 478 | struct c4iw_fr_page_list *c4pl = | 532 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); |
| 479 | to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); | 533 | imdp->op = FW_RI_DATA_IMMD; |
| 480 | struct fw_ri_dsgl *sglp; | 534 | imdp->r1 = 0; |
| 481 | 535 | imdp->r2 = 0; | |
| 482 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | 536 | imdp->immdlen = cpu_to_be32(pbllen); |
| 483 | sglp->op = FW_RI_DATA_DSGL; | 537 | p = (__be64 *)(imdp + 1); |
| 484 | sglp->r1 = 0; | 538 | rem = pbllen; |
| 485 | sglp->nsge = cpu_to_be16(1); | 539 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { |
| 486 | sglp->addr0 = cpu_to_be64(c4pl->dma_addr); | 540 | *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); |
| 487 | sglp->len0 = cpu_to_be32(pbllen); | 541 | rem -= sizeof *p; |
| 488 | 542 | if (++p == (__be64 *)&sq->queue[sq->size]) | |
| 489 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16); | 543 | p = (__be64 *)sq->queue; |
| 490 | } else { | ||
| 491 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | ||
| 492 | imdp->op = FW_RI_DATA_IMMD; | ||
| 493 | imdp->r1 = 0; | ||
| 494 | imdp->r2 = 0; | ||
| 495 | imdp->immdlen = cpu_to_be32(pbllen); | ||
| 496 | p = (__be64 *)(imdp + 1); | ||
| 497 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) | ||
| 498 | *p = cpu_to_be64( | ||
| 499 | (u64)wr->wr.fast_reg.page_list->page_list[i]); | ||
| 500 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, | ||
| 501 | 16); | ||
| 502 | } | 544 | } |
| 545 | BUG_ON(rem < 0); | ||
| 546 | while (rem) { | ||
| 547 | *p = 0; | ||
| 548 | rem -= sizeof *p; | ||
| 549 | if (++p == (__be64 *)&sq->queue[sq->size]) | ||
| 550 | p = (__be64 *)sq->queue; | ||
| 551 | } | ||
| 552 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16); | ||
| 503 | return 0; | 553 | return 0; |
| 504 | } | 554 | } |
| 505 | 555 | ||
| @@ -587,7 +637,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 587 | fw_opcode = FW_RI_RDMA_READ_WR; | 637 | fw_opcode = FW_RI_RDMA_READ_WR; |
| 588 | swsqe->opcode = FW_RI_READ_REQ; | 638 | swsqe->opcode = FW_RI_READ_REQ; |
| 589 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) | 639 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) |
| 590 | fw_flags |= FW_RI_RDMA_READ_INVALIDATE; | 640 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; |
| 591 | else | 641 | else |
| 592 | fw_flags = 0; | 642 | fw_flags = 0; |
| 593 | err = build_rdma_read(wqe, wr, &len16); | 643 | err = build_rdma_read(wqe, wr, &len16); |
| @@ -600,7 +650,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 600 | case IB_WR_FAST_REG_MR: | 650 | case IB_WR_FAST_REG_MR: |
| 601 | fw_opcode = FW_RI_FR_NSMR_WR; | 651 | fw_opcode = FW_RI_FR_NSMR_WR; |
| 602 | swsqe->opcode = FW_RI_FAST_REGISTER; | 652 | swsqe->opcode = FW_RI_FAST_REGISTER; |
| 603 | err = build_fastreg(wqe, wr, &len16); | 653 | err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); |
| 604 | break; | 654 | break; |
| 605 | case IB_WR_LOCAL_INV: | 655 | case IB_WR_LOCAL_INV: |
| 606 | if (wr->send_flags & IB_SEND_FENCE) | 656 | if (wr->send_flags & IB_SEND_FENCE) |
| @@ -905,46 +955,38 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, | |||
| 905 | * Assumes qhp lock is held. | 955 | * Assumes qhp lock is held. |
| 906 | */ | 956 | */ |
| 907 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | 957 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, |
| 908 | struct c4iw_cq *schp, unsigned long *flag) | 958 | struct c4iw_cq *schp) |
| 909 | { | 959 | { |
| 910 | int count; | 960 | int count; |
| 911 | int flushed; | 961 | int flushed; |
| 962 | unsigned long flag; | ||
| 912 | 963 | ||
| 913 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 964 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
| 914 | /* take a ref on the qhp since we must release the lock */ | ||
| 915 | atomic_inc(&qhp->refcnt); | ||
| 916 | spin_unlock_irqrestore(&qhp->lock, *flag); | ||
| 917 | 965 | ||
| 918 | /* locking hierarchy: cq lock first, then qp lock. */ | 966 | /* locking hierarchy: cq lock first, then qp lock. */ |
| 919 | spin_lock_irqsave(&rchp->lock, *flag); | 967 | spin_lock_irqsave(&rchp->lock, flag); |
| 920 | spin_lock(&qhp->lock); | 968 | spin_lock(&qhp->lock); |
| 921 | c4iw_flush_hw_cq(&rchp->cq); | 969 | c4iw_flush_hw_cq(&rchp->cq); |
| 922 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); | 970 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); |
| 923 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | 971 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
| 924 | spin_unlock(&qhp->lock); | 972 | spin_unlock(&qhp->lock); |
| 925 | spin_unlock_irqrestore(&rchp->lock, *flag); | 973 | spin_unlock_irqrestore(&rchp->lock, flag); |
| 926 | if (flushed) | 974 | if (flushed) |
| 927 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | 975 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); |
| 928 | 976 | ||
| 929 | /* locking hierarchy: cq lock first, then qp lock. */ | 977 | /* locking hierarchy: cq lock first, then qp lock. */ |
| 930 | spin_lock_irqsave(&schp->lock, *flag); | 978 | spin_lock_irqsave(&schp->lock, flag); |
| 931 | spin_lock(&qhp->lock); | 979 | spin_lock(&qhp->lock); |
| 932 | c4iw_flush_hw_cq(&schp->cq); | 980 | c4iw_flush_hw_cq(&schp->cq); |
| 933 | c4iw_count_scqes(&schp->cq, &qhp->wq, &count); | 981 | c4iw_count_scqes(&schp->cq, &qhp->wq, &count); |
| 934 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); | 982 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); |
| 935 | spin_unlock(&qhp->lock); | 983 | spin_unlock(&qhp->lock); |
| 936 | spin_unlock_irqrestore(&schp->lock, *flag); | 984 | spin_unlock_irqrestore(&schp->lock, flag); |
| 937 | if (flushed) | 985 | if (flushed) |
| 938 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 986 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); |
| 939 | |||
| 940 | /* deref */ | ||
| 941 | if (atomic_dec_and_test(&qhp->refcnt)) | ||
| 942 | wake_up(&qhp->wait); | ||
| 943 | |||
| 944 | spin_lock_irqsave(&qhp->lock, *flag); | ||
| 945 | } | 987 | } |
| 946 | 988 | ||
| 947 | static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | 989 | static void flush_qp(struct c4iw_qp *qhp) |
| 948 | { | 990 | { |
| 949 | struct c4iw_cq *rchp, *schp; | 991 | struct c4iw_cq *rchp, *schp; |
| 950 | 992 | ||
| @@ -958,7 +1000,7 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | |||
| 958 | t4_set_cq_in_error(&schp->cq); | 1000 | t4_set_cq_in_error(&schp->cq); |
| 959 | return; | 1001 | return; |
| 960 | } | 1002 | } |
| 961 | __flush_qp(qhp, rchp, schp, flag); | 1003 | __flush_qp(qhp, rchp, schp); |
| 962 | } | 1004 | } |
| 963 | 1005 | ||
| 964 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | 1006 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
| @@ -966,7 +1008,6 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 966 | { | 1008 | { |
| 967 | struct fw_ri_wr *wqe; | 1009 | struct fw_ri_wr *wqe; |
| 968 | int ret; | 1010 | int ret; |
| 969 | struct c4iw_wr_wait wr_wait; | ||
| 970 | struct sk_buff *skb; | 1011 | struct sk_buff *skb; |
| 971 | 1012 | ||
| 972 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 1013 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
| @@ -985,28 +1026,16 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 985 | wqe->flowid_len16 = cpu_to_be32( | 1026 | wqe->flowid_len16 = cpu_to_be32( |
| 986 | FW_WR_FLOWID(ep->hwtid) | | 1027 | FW_WR_FLOWID(ep->hwtid) | |
| 987 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | 1028 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); |
| 988 | wqe->cookie = (u64)&wr_wait; | 1029 | wqe->cookie = (unsigned long) &ep->com.wr_wait; |
| 989 | 1030 | ||
| 990 | wqe->u.fini.type = FW_RI_TYPE_FINI; | 1031 | wqe->u.fini.type = FW_RI_TYPE_FINI; |
| 991 | c4iw_init_wr_wait(&wr_wait); | 1032 | c4iw_init_wr_wait(&ep->com.wr_wait); |
| 992 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1033 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
| 993 | if (ret) | 1034 | if (ret) |
| 994 | goto out; | 1035 | goto out; |
| 995 | 1036 | ||
| 996 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 1037 | ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, |
| 997 | if (!wr_wait.done) { | 1038 | qhp->wq.sq.qid, __func__); |
| 998 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 999 | pci_name(rhp->rdev.lldi.pdev)); | ||
| 1000 | rhp->rdev.flags = T4_FATAL_ERROR; | ||
| 1001 | ret = -EIO; | ||
| 1002 | } else { | ||
| 1003 | ret = wr_wait.ret; | ||
| 1004 | if (ret) | ||
| 1005 | printk(KERN_WARNING MOD | ||
| 1006 | "%s: Abnormal close qpid %d ret %u\n", | ||
| 1007 | pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid, | ||
| 1008 | ret); | ||
| 1009 | } | ||
| 1010 | out: | 1039 | out: |
| 1011 | PDBG("%s ret %d\n", __func__, ret); | 1040 | PDBG("%s ret %d\n", __func__, ret); |
| 1012 | return ret; | 1041 | return ret; |
| @@ -1040,7 +1069,6 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
| 1040 | { | 1069 | { |
| 1041 | struct fw_ri_wr *wqe; | 1070 | struct fw_ri_wr *wqe; |
| 1042 | int ret; | 1071 | int ret; |
| 1043 | struct c4iw_wr_wait wr_wait; | ||
| 1044 | struct sk_buff *skb; | 1072 | struct sk_buff *skb; |
| 1045 | 1073 | ||
| 1046 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 1074 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
| @@ -1060,7 +1088,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
| 1060 | FW_WR_FLOWID(qhp->ep->hwtid) | | 1088 | FW_WR_FLOWID(qhp->ep->hwtid) | |
| 1061 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | 1089 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); |
| 1062 | 1090 | ||
| 1063 | wqe->cookie = (u64)&wr_wait; | 1091 | wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; |
| 1064 | 1092 | ||
| 1065 | wqe->u.init.type = FW_RI_TYPE_INIT; | 1093 | wqe->u.init.type = FW_RI_TYPE_INIT; |
| 1066 | wqe->u.init.mpareqbit_p2ptype = | 1094 | wqe->u.init.mpareqbit_p2ptype = |
| @@ -1097,19 +1125,13 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
| 1097 | if (qhp->attr.mpa_attr.initiator) | 1125 | if (qhp->attr.mpa_attr.initiator) |
| 1098 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | 1126 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); |
| 1099 | 1127 | ||
| 1100 | c4iw_init_wr_wait(&wr_wait); | 1128 | c4iw_init_wr_wait(&qhp->ep->com.wr_wait); |
| 1101 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1129 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
| 1102 | if (ret) | 1130 | if (ret) |
| 1103 | goto out; | 1131 | goto out; |
| 1104 | 1132 | ||
| 1105 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | 1133 | ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, |
| 1106 | if (!wr_wait.done) { | 1134 | qhp->ep->hwtid, qhp->wq.sq.qid, __func__); |
| 1107 | printk(KERN_ERR MOD "Device %s not responding!\n", | ||
| 1108 | pci_name(rhp->rdev.lldi.pdev)); | ||
| 1109 | rhp->rdev.flags = T4_FATAL_ERROR; | ||
| 1110 | ret = -EIO; | ||
| 1111 | } else | ||
| 1112 | ret = wr_wait.ret; | ||
| 1113 | out: | 1135 | out: |
| 1114 | PDBG("%s ret %d\n", __func__, ret); | 1136 | PDBG("%s ret %d\n", __func__, ret); |
| 1115 | return ret; | 1137 | return ret; |
| @@ -1122,7 +1144,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1122 | { | 1144 | { |
| 1123 | int ret = 0; | 1145 | int ret = 0; |
| 1124 | struct c4iw_qp_attributes newattr = qhp->attr; | 1146 | struct c4iw_qp_attributes newattr = qhp->attr; |
| 1125 | unsigned long flag; | ||
| 1126 | int disconnect = 0; | 1147 | int disconnect = 0; |
| 1127 | int terminate = 0; | 1148 | int terminate = 0; |
| 1128 | int abort = 0; | 1149 | int abort = 0; |
| @@ -1133,7 +1154,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1133 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, | 1154 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, |
| 1134 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); | 1155 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); |
| 1135 | 1156 | ||
| 1136 | spin_lock_irqsave(&qhp->lock, flag); | 1157 | mutex_lock(&qhp->mutex); |
| 1137 | 1158 | ||
| 1138 | /* Process attr changes if in IDLE */ | 1159 | /* Process attr changes if in IDLE */ |
| 1139 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { | 1160 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { |
| @@ -1184,7 +1205,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1184 | qhp->attr.mpa_attr = attrs->mpa_attr; | 1205 | qhp->attr.mpa_attr = attrs->mpa_attr; |
| 1185 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; | 1206 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; |
| 1186 | qhp->ep = qhp->attr.llp_stream_handle; | 1207 | qhp->ep = qhp->attr.llp_stream_handle; |
| 1187 | qhp->attr.state = C4IW_QP_STATE_RTS; | 1208 | set_state(qhp, C4IW_QP_STATE_RTS); |
| 1188 | 1209 | ||
| 1189 | /* | 1210 | /* |
| 1190 | * Ref the endpoint here and deref when we | 1211 | * Ref the endpoint here and deref when we |
| @@ -1193,15 +1214,13 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1193 | * transition. | 1214 | * transition. |
| 1194 | */ | 1215 | */ |
| 1195 | c4iw_get_ep(&qhp->ep->com); | 1216 | c4iw_get_ep(&qhp->ep->com); |
| 1196 | spin_unlock_irqrestore(&qhp->lock, flag); | ||
| 1197 | ret = rdma_init(rhp, qhp); | 1217 | ret = rdma_init(rhp, qhp); |
| 1198 | spin_lock_irqsave(&qhp->lock, flag); | ||
| 1199 | if (ret) | 1218 | if (ret) |
| 1200 | goto err; | 1219 | goto err; |
| 1201 | break; | 1220 | break; |
| 1202 | case C4IW_QP_STATE_ERROR: | 1221 | case C4IW_QP_STATE_ERROR: |
| 1203 | qhp->attr.state = C4IW_QP_STATE_ERROR; | 1222 | set_state(qhp, C4IW_QP_STATE_ERROR); |
| 1204 | flush_qp(qhp, &flag); | 1223 | flush_qp(qhp); |
| 1205 | break; | 1224 | break; |
| 1206 | default: | 1225 | default: |
| 1207 | ret = -EINVAL; | 1226 | ret = -EINVAL; |
| @@ -1212,38 +1231,38 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1212 | switch (attrs->next_state) { | 1231 | switch (attrs->next_state) { |
| 1213 | case C4IW_QP_STATE_CLOSING: | 1232 | case C4IW_QP_STATE_CLOSING: |
| 1214 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | 1233 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); |
| 1215 | qhp->attr.state = C4IW_QP_STATE_CLOSING; | 1234 | set_state(qhp, C4IW_QP_STATE_CLOSING); |
| 1216 | ep = qhp->ep; | 1235 | ep = qhp->ep; |
| 1217 | if (!internal) { | 1236 | if (!internal) { |
| 1218 | abort = 0; | 1237 | abort = 0; |
| 1219 | disconnect = 1; | 1238 | disconnect = 1; |
| 1220 | c4iw_get_ep(&ep->com); | 1239 | c4iw_get_ep(&qhp->ep->com); |
| 1221 | } | 1240 | } |
| 1222 | spin_unlock_irqrestore(&qhp->lock, flag); | ||
| 1223 | ret = rdma_fini(rhp, qhp, ep); | 1241 | ret = rdma_fini(rhp, qhp, ep); |
| 1224 | spin_lock_irqsave(&qhp->lock, flag); | ||
| 1225 | if (ret) { | 1242 | if (ret) { |
| 1226 | c4iw_get_ep(&ep->com); | 1243 | if (internal) |
| 1244 | c4iw_get_ep(&qhp->ep->com); | ||
| 1227 | disconnect = abort = 1; | 1245 | disconnect = abort = 1; |
| 1228 | goto err; | 1246 | goto err; |
| 1229 | } | 1247 | } |
| 1230 | break; | 1248 | break; |
| 1231 | case C4IW_QP_STATE_TERMINATE: | 1249 | case C4IW_QP_STATE_TERMINATE: |
| 1232 | qhp->attr.state = C4IW_QP_STATE_TERMINATE; | 1250 | set_state(qhp, C4IW_QP_STATE_TERMINATE); |
| 1233 | if (qhp->ibqp.uobject) | 1251 | if (qhp->ibqp.uobject) |
| 1234 | t4_set_wq_in_error(&qhp->wq); | 1252 | t4_set_wq_in_error(&qhp->wq); |
| 1235 | ep = qhp->ep; | 1253 | ep = qhp->ep; |
| 1236 | c4iw_get_ep(&ep->com); | 1254 | if (!internal) |
| 1237 | terminate = 1; | 1255 | terminate = 1; |
| 1238 | disconnect = 1; | 1256 | disconnect = 1; |
| 1257 | c4iw_get_ep(&qhp->ep->com); | ||
| 1239 | break; | 1258 | break; |
| 1240 | case C4IW_QP_STATE_ERROR: | 1259 | case C4IW_QP_STATE_ERROR: |
| 1241 | qhp->attr.state = C4IW_QP_STATE_ERROR; | 1260 | set_state(qhp, C4IW_QP_STATE_ERROR); |
| 1242 | if (!internal) { | 1261 | if (!internal) { |
| 1243 | abort = 1; | 1262 | abort = 1; |
| 1244 | disconnect = 1; | 1263 | disconnect = 1; |
| 1245 | ep = qhp->ep; | 1264 | ep = qhp->ep; |
| 1246 | c4iw_get_ep(&ep->com); | 1265 | c4iw_get_ep(&qhp->ep->com); |
| 1247 | } | 1266 | } |
| 1248 | goto err; | 1267 | goto err; |
| 1249 | break; | 1268 | break; |
| @@ -1259,8 +1278,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1259 | } | 1278 | } |
| 1260 | switch (attrs->next_state) { | 1279 | switch (attrs->next_state) { |
| 1261 | case C4IW_QP_STATE_IDLE: | 1280 | case C4IW_QP_STATE_IDLE: |
| 1262 | flush_qp(qhp, &flag); | 1281 | flush_qp(qhp); |
| 1263 | qhp->attr.state = C4IW_QP_STATE_IDLE; | 1282 | set_state(qhp, C4IW_QP_STATE_IDLE); |
| 1264 | qhp->attr.llp_stream_handle = NULL; | 1283 | qhp->attr.llp_stream_handle = NULL; |
| 1265 | c4iw_put_ep(&qhp->ep->com); | 1284 | c4iw_put_ep(&qhp->ep->com); |
| 1266 | qhp->ep = NULL; | 1285 | qhp->ep = NULL; |
| @@ -1282,7 +1301,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
| 1282 | ret = -EINVAL; | 1301 | ret = -EINVAL; |
| 1283 | goto out; | 1302 | goto out; |
| 1284 | } | 1303 | } |
| 1285 | qhp->attr.state = C4IW_QP_STATE_IDLE; | 1304 | set_state(qhp, C4IW_QP_STATE_IDLE); |
| 1286 | break; | 1305 | break; |
| 1287 | case C4IW_QP_STATE_TERMINATE: | 1306 | case C4IW_QP_STATE_TERMINATE: |
| 1288 | if (!internal) { | 1307 | if (!internal) { |
| @@ -1305,15 +1324,16 @@ err: | |||
| 1305 | 1324 | ||
| 1306 | /* disassociate the LLP connection */ | 1325 | /* disassociate the LLP connection */ |
| 1307 | qhp->attr.llp_stream_handle = NULL; | 1326 | qhp->attr.llp_stream_handle = NULL; |
| 1308 | ep = qhp->ep; | 1327 | if (!ep) |
| 1328 | ep = qhp->ep; | ||
| 1309 | qhp->ep = NULL; | 1329 | qhp->ep = NULL; |
| 1310 | qhp->attr.state = C4IW_QP_STATE_ERROR; | 1330 | set_state(qhp, C4IW_QP_STATE_ERROR); |
| 1311 | free = 1; | 1331 | free = 1; |
| 1312 | wake_up(&qhp->wait); | 1332 | wake_up(&qhp->wait); |
| 1313 | BUG_ON(!ep); | 1333 | BUG_ON(!ep); |
| 1314 | flush_qp(qhp, &flag); | 1334 | flush_qp(qhp); |
| 1315 | out: | 1335 | out: |
| 1316 | spin_unlock_irqrestore(&qhp->lock, flag); | 1336 | mutex_unlock(&qhp->mutex); |
| 1317 | 1337 | ||
| 1318 | if (terminate) | 1338 | if (terminate) |
| 1319 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); | 1339 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
| @@ -1335,7 +1355,6 @@ out: | |||
| 1335 | */ | 1355 | */ |
| 1336 | if (free) | 1356 | if (free) |
| 1337 | c4iw_put_ep(&ep->com); | 1357 | c4iw_put_ep(&ep->com); |
| 1338 | |||
| 1339 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | 1358 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); |
| 1340 | return ret; | 1359 | return ret; |
| 1341 | } | 1360 | } |
| @@ -1380,7 +1399,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1380 | int sqsize, rqsize; | 1399 | int sqsize, rqsize; |
| 1381 | struct c4iw_ucontext *ucontext; | 1400 | struct c4iw_ucontext *ucontext; |
| 1382 | int ret; | 1401 | int ret; |
| 1383 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; | 1402 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; |
| 1384 | 1403 | ||
| 1385 | PDBG("%s ib_pd %p\n", __func__, pd); | 1404 | PDBG("%s ib_pd %p\n", __func__, pd); |
| 1386 | 1405 | ||
| @@ -1450,6 +1469,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1450 | qhp->attr.max_ord = 1; | 1469 | qhp->attr.max_ord = 1; |
| 1451 | qhp->attr.max_ird = 1; | 1470 | qhp->attr.max_ird = 1; |
| 1452 | spin_lock_init(&qhp->lock); | 1471 | spin_lock_init(&qhp->lock); |
| 1472 | mutex_init(&qhp->mutex); | ||
| 1453 | init_waitqueue_head(&qhp->wait); | 1473 | init_waitqueue_head(&qhp->wait); |
| 1454 | atomic_set(&qhp->refcnt, 1); | 1474 | atomic_set(&qhp->refcnt, 1); |
| 1455 | 1475 | ||
| @@ -1478,7 +1498,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1478 | ret = -ENOMEM; | 1498 | ret = -ENOMEM; |
| 1479 | goto err6; | 1499 | goto err6; |
| 1480 | } | 1500 | } |
| 1481 | 1501 | if (t4_sq_onchip(&qhp->wq.sq)) { | |
| 1502 | mm5 = kmalloc(sizeof *mm5, GFP_KERNEL); | ||
| 1503 | if (!mm5) { | ||
| 1504 | ret = -ENOMEM; | ||
| 1505 | goto err7; | ||
| 1506 | } | ||
| 1507 | uresp.flags = C4IW_QPF_ONCHIP; | ||
| 1508 | } else | ||
| 1509 | uresp.flags = 0; | ||
| 1482 | uresp.qid_mask = rhp->rdev.qpmask; | 1510 | uresp.qid_mask = rhp->rdev.qpmask; |
| 1483 | uresp.sqid = qhp->wq.sq.qid; | 1511 | uresp.sqid = qhp->wq.sq.qid; |
| 1484 | uresp.sq_size = qhp->wq.sq.size; | 1512 | uresp.sq_size = qhp->wq.sq.size; |
| @@ -1487,6 +1515,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1487 | uresp.rq_size = qhp->wq.rq.size; | 1515 | uresp.rq_size = qhp->wq.rq.size; |
| 1488 | uresp.rq_memsize = qhp->wq.rq.memsize; | 1516 | uresp.rq_memsize = qhp->wq.rq.memsize; |
| 1489 | spin_lock(&ucontext->mmap_lock); | 1517 | spin_lock(&ucontext->mmap_lock); |
| 1518 | if (mm5) { | ||
| 1519 | uresp.ma_sync_key = ucontext->key; | ||
| 1520 | ucontext->key += PAGE_SIZE; | ||
| 1521 | } | ||
| 1490 | uresp.sq_key = ucontext->key; | 1522 | uresp.sq_key = ucontext->key; |
| 1491 | ucontext->key += PAGE_SIZE; | 1523 | ucontext->key += PAGE_SIZE; |
| 1492 | uresp.rq_key = ucontext->key; | 1524 | uresp.rq_key = ucontext->key; |
| @@ -1498,9 +1530,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1498 | spin_unlock(&ucontext->mmap_lock); | 1530 | spin_unlock(&ucontext->mmap_lock); |
| 1499 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | 1531 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); |
| 1500 | if (ret) | 1532 | if (ret) |
| 1501 | goto err7; | 1533 | goto err8; |
| 1502 | mm1->key = uresp.sq_key; | 1534 | mm1->key = uresp.sq_key; |
| 1503 | mm1->addr = virt_to_phys(qhp->wq.sq.queue); | 1535 | mm1->addr = qhp->wq.sq.phys_addr; |
| 1504 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); | 1536 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); |
| 1505 | insert_mmap(ucontext, mm1); | 1537 | insert_mmap(ucontext, mm1); |
| 1506 | mm2->key = uresp.rq_key; | 1538 | mm2->key = uresp.rq_key; |
| @@ -1515,6 +1547,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1515 | mm4->addr = qhp->wq.rq.udb; | 1547 | mm4->addr = qhp->wq.rq.udb; |
| 1516 | mm4->len = PAGE_SIZE; | 1548 | mm4->len = PAGE_SIZE; |
| 1517 | insert_mmap(ucontext, mm4); | 1549 | insert_mmap(ucontext, mm4); |
| 1550 | if (mm5) { | ||
| 1551 | mm5->key = uresp.ma_sync_key; | ||
| 1552 | mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) | ||
| 1553 | + A_PCIE_MA_SYNC) & PAGE_MASK; | ||
| 1554 | mm5->len = PAGE_SIZE; | ||
| 1555 | insert_mmap(ucontext, mm5); | ||
| 1556 | } | ||
| 1518 | } | 1557 | } |
| 1519 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | 1558 | qhp->ibqp.qp_num = qhp->wq.sq.qid; |
| 1520 | init_timer(&(qhp->timer)); | 1559 | init_timer(&(qhp->timer)); |
| @@ -1522,6 +1561,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
| 1522 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | 1561 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, |
| 1523 | qhp->wq.sq.qid); | 1562 | qhp->wq.sq.qid); |
| 1524 | return &qhp->ibqp; | 1563 | return &qhp->ibqp; |
| 1564 | err8: | ||
| 1565 | kfree(mm5); | ||
| 1525 | err7: | 1566 | err7: |
| 1526 | kfree(mm4); | 1567 | kfree(mm4); |
| 1527 | err6: | 1568 | err6: |
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c index 83b23dfa250d..4fb50d58b493 100644 --- a/drivers/infiniband/hw/cxgb4/resource.c +++ b/drivers/infiniband/hw/cxgb4/resource.c | |||
| @@ -311,6 +311,9 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) | |||
| 311 | { | 311 | { |
| 312 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); | 312 | unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); |
| 313 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | 313 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); |
| 314 | if (!addr && printk_ratelimit()) | ||
| 315 | printk(KERN_WARNING MOD "%s: Out of PBL memory\n", | ||
| 316 | pci_name(rdev->lldi.pdev)); | ||
| 314 | return (u32)addr; | 317 | return (u32)addr; |
| 315 | } | 318 | } |
| 316 | 319 | ||
| @@ -370,6 +373,9 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) | |||
| 370 | { | 373 | { |
| 371 | unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); | 374 | unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); |
| 372 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); | 375 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); |
| 376 | if (!addr && printk_ratelimit()) | ||
| 377 | printk(KERN_WARNING MOD "%s: Out of RQT memory\n", | ||
| 378 | pci_name(rdev->lldi.pdev)); | ||
| 373 | return (u32)addr; | 379 | return (u32)addr; |
| 374 | } | 380 | } |
| 375 | 381 | ||
| @@ -416,3 +422,59 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) | |||
| 416 | { | 422 | { |
| 417 | gen_pool_destroy(rdev->rqt_pool); | 423 | gen_pool_destroy(rdev->rqt_pool); |
| 418 | } | 424 | } |
| 425 | |||
| 426 | /* | ||
| 427 | * On-Chip QP Memory. | ||
| 428 | */ | ||
| 429 | #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */ | ||
| 430 | |||
| 431 | u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) | ||
| 432 | { | ||
| 433 | unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); | ||
| 434 | PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); | ||
| 435 | return (u32)addr; | ||
| 436 | } | ||
| 437 | |||
| 438 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) | ||
| 439 | { | ||
| 440 | PDBG("%s addr 0x%x size %d\n", __func__, addr, size); | ||
| 441 | gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size); | ||
| 442 | } | ||
| 443 | |||
| 444 | int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) | ||
| 445 | { | ||
| 446 | unsigned start, chunk, top; | ||
| 447 | |||
| 448 | rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1); | ||
| 449 | if (!rdev->ocqp_pool) | ||
| 450 | return -ENOMEM; | ||
| 451 | |||
| 452 | start = rdev->lldi.vr->ocq.start; | ||
| 453 | chunk = rdev->lldi.vr->ocq.size; | ||
| 454 | top = start + chunk; | ||
| 455 | |||
| 456 | while (start < top) { | ||
| 457 | chunk = min(top - start + 1, chunk); | ||
| 458 | if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { | ||
| 459 | PDBG("%s failed to add OCQP chunk (%x/%x)\n", | ||
| 460 | __func__, start, chunk); | ||
| 461 | if (chunk <= 1024 << MIN_OCQP_SHIFT) { | ||
| 462 | printk(KERN_WARNING MOD | ||
| 463 | "Failed to add all OCQP chunks (%x/%x)\n", | ||
| 464 | start, top - start); | ||
| 465 | return 0; | ||
| 466 | } | ||
| 467 | chunk >>= 1; | ||
| 468 | } else { | ||
| 469 | PDBG("%s added OCQP chunk (%x/%x)\n", | ||
| 470 | __func__, start, chunk); | ||
| 471 | start += chunk; | ||
| 472 | } | ||
| 473 | } | ||
| 474 | return 0; | ||
| 475 | } | ||
| 476 | |||
| 477 | void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev) | ||
| 478 | { | ||
| 479 | gen_pool_destroy(rdev->ocqp_pool); | ||
| 480 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 24f369046ef3..70004425d695 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #define T4_STAG_UNSET 0xffffffff | 52 | #define T4_STAG_UNSET 0xffffffff |
| 53 | #define T4_FW_MAJ 0 | 53 | #define T4_FW_MAJ 0 |
| 54 | #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) | 54 | #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) |
| 55 | #define A_PCIE_MA_SYNC 0x30b4 | ||
| 55 | 56 | ||
| 56 | struct t4_status_page { | 57 | struct t4_status_page { |
| 57 | __be32 rsvd1; /* flit 0 - hw owns */ | 58 | __be32 rsvd1; /* flit 0 - hw owns */ |
| @@ -65,7 +66,7 @@ struct t4_status_page { | |||
| 65 | 66 | ||
| 66 | #define T4_EQ_ENTRY_SIZE 64 | 67 | #define T4_EQ_ENTRY_SIZE 64 |
| 67 | 68 | ||
| 68 | #define T4_SQ_NUM_SLOTS 4 | 69 | #define T4_SQ_NUM_SLOTS 5 |
| 69 | #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) | 70 | #define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS) |
| 70 | #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ | 71 | #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \ |
| 71 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | 72 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) |
| @@ -78,7 +79,7 @@ struct t4_status_page { | |||
| 78 | sizeof(struct fw_ri_rdma_write_wr) - \ | 79 | sizeof(struct fw_ri_rdma_write_wr) - \ |
| 79 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) | 80 | sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) |
| 80 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ | 81 | #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ |
| 81 | sizeof(struct fw_ri_immd))) | 82 | sizeof(struct fw_ri_immd)) & ~31UL) |
| 82 | #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) | 83 | #define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) |
| 83 | 84 | ||
| 84 | #define T4_RQ_NUM_SLOTS 2 | 85 | #define T4_RQ_NUM_SLOTS 2 |
| @@ -266,10 +267,36 @@ struct t4_swsqe { | |||
| 266 | u16 idx; | 267 | u16 idx; |
| 267 | }; | 268 | }; |
| 268 | 269 | ||
| 270 | static inline pgprot_t t4_pgprot_wc(pgprot_t prot) | ||
| 271 | { | ||
| 272 | #if defined(__i386__) || defined(__x86_64__) | ||
| 273 | return pgprot_writecombine(prot); | ||
| 274 | #elif defined(CONFIG_PPC64) | ||
| 275 | return __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE) & | ||
| 276 | ~(pgprot_t)_PAGE_GUARDED); | ||
| 277 | #else | ||
| 278 | return pgprot_noncached(prot); | ||
| 279 | #endif | ||
| 280 | } | ||
| 281 | |||
| 282 | static inline int t4_ocqp_supported(void) | ||
| 283 | { | ||
| 284 | #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64) | ||
| 285 | return 1; | ||
| 286 | #else | ||
| 287 | return 0; | ||
| 288 | #endif | ||
| 289 | } | ||
| 290 | |||
| 291 | enum { | ||
| 292 | T4_SQ_ONCHIP = (1<<0), | ||
| 293 | }; | ||
| 294 | |||
| 269 | struct t4_sq { | 295 | struct t4_sq { |
| 270 | union t4_wr *queue; | 296 | union t4_wr *queue; |
| 271 | dma_addr_t dma_addr; | 297 | dma_addr_t dma_addr; |
| 272 | DEFINE_DMA_UNMAP_ADDR(mapping); | 298 | DEFINE_DMA_UNMAP_ADDR(mapping); |
| 299 | unsigned long phys_addr; | ||
| 273 | struct t4_swsqe *sw_sq; | 300 | struct t4_swsqe *sw_sq; |
| 274 | struct t4_swsqe *oldest_read; | 301 | struct t4_swsqe *oldest_read; |
| 275 | u64 udb; | 302 | u64 udb; |
| @@ -280,6 +307,7 @@ struct t4_sq { | |||
| 280 | u16 cidx; | 307 | u16 cidx; |
| 281 | u16 pidx; | 308 | u16 pidx; |
| 282 | u16 wq_pidx; | 309 | u16 wq_pidx; |
| 310 | u16 flags; | ||
| 283 | }; | 311 | }; |
| 284 | 312 | ||
| 285 | struct t4_swrqe { | 313 | struct t4_swrqe { |
| @@ -350,6 +378,11 @@ static inline void t4_rq_consume(struct t4_wq *wq) | |||
| 350 | wq->rq.cidx = 0; | 378 | wq->rq.cidx = 0; |
| 351 | } | 379 | } |
| 352 | 380 | ||
| 381 | static inline int t4_sq_onchip(struct t4_sq *sq) | ||
| 382 | { | ||
| 383 | return sq->flags & T4_SQ_ONCHIP; | ||
| 384 | } | ||
| 385 | |||
| 353 | static inline int t4_sq_empty(struct t4_wq *wq) | 386 | static inline int t4_sq_empty(struct t4_wq *wq) |
| 354 | { | 387 | { |
| 355 | return wq->sq.in_use == 0; | 388 | return wq->sq.in_use == 0; |
| @@ -396,30 +429,27 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc) | |||
| 396 | 429 | ||
| 397 | static inline int t4_wq_in_error(struct t4_wq *wq) | 430 | static inline int t4_wq_in_error(struct t4_wq *wq) |
| 398 | { | 431 | { |
| 399 | return wq->sq.queue[wq->sq.size].status.qp_err; | 432 | return wq->rq.queue[wq->rq.size].status.qp_err; |
| 400 | } | 433 | } |
| 401 | 434 | ||
| 402 | static inline void t4_set_wq_in_error(struct t4_wq *wq) | 435 | static inline void t4_set_wq_in_error(struct t4_wq *wq) |
| 403 | { | 436 | { |
| 404 | wq->sq.queue[wq->sq.size].status.qp_err = 1; | ||
| 405 | wq->rq.queue[wq->rq.size].status.qp_err = 1; | 437 | wq->rq.queue[wq->rq.size].status.qp_err = 1; |
| 406 | } | 438 | } |
| 407 | 439 | ||
| 408 | static inline void t4_disable_wq_db(struct t4_wq *wq) | 440 | static inline void t4_disable_wq_db(struct t4_wq *wq) |
| 409 | { | 441 | { |
| 410 | wq->sq.queue[wq->sq.size].status.db_off = 1; | ||
| 411 | wq->rq.queue[wq->rq.size].status.db_off = 1; | 442 | wq->rq.queue[wq->rq.size].status.db_off = 1; |
| 412 | } | 443 | } |
| 413 | 444 | ||
| 414 | static inline void t4_enable_wq_db(struct t4_wq *wq) | 445 | static inline void t4_enable_wq_db(struct t4_wq *wq) |
| 415 | { | 446 | { |
| 416 | wq->sq.queue[wq->sq.size].status.db_off = 0; | ||
| 417 | wq->rq.queue[wq->rq.size].status.db_off = 0; | 447 | wq->rq.queue[wq->rq.size].status.db_off = 0; |
| 418 | } | 448 | } |
| 419 | 449 | ||
| 420 | static inline int t4_wq_db_enabled(struct t4_wq *wq) | 450 | static inline int t4_wq_db_enabled(struct t4_wq *wq) |
| 421 | { | 451 | { |
| 422 | return !wq->sq.queue[wq->sq.size].status.db_off; | 452 | return !wq->rq.queue[wq->rq.size].status.db_off; |
| 423 | } | 453 | } |
| 424 | 454 | ||
| 425 | struct t4_cq { | 455 | struct t4_cq { |
diff --git a/drivers/infiniband/hw/cxgb4/user.h b/drivers/infiniband/hw/cxgb4/user.h index ed6414abde02..e6669d54770e 100644 --- a/drivers/infiniband/hw/cxgb4/user.h +++ b/drivers/infiniband/hw/cxgb4/user.h | |||
| @@ -50,7 +50,13 @@ struct c4iw_create_cq_resp { | |||
| 50 | __u32 qid_mask; | 50 | __u32 qid_mask; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | |||
| 54 | enum { | ||
| 55 | C4IW_QPF_ONCHIP = (1<<0) | ||
| 56 | }; | ||
| 57 | |||
| 53 | struct c4iw_create_qp_resp { | 58 | struct c4iw_create_qp_resp { |
| 59 | __u64 ma_sync_key; | ||
| 54 | __u64 sq_key; | 60 | __u64 sq_key; |
| 55 | __u64 rq_key; | 61 | __u64 rq_key; |
| 56 | __u64 sq_db_gts_key; | 62 | __u64 sq_db_gts_key; |
| @@ -62,5 +68,6 @@ struct c4iw_create_qp_resp { | |||
| 62 | __u32 sq_size; | 68 | __u32 sq_size; |
| 63 | __u32 rq_size; | 69 | __u32 rq_size; |
| 64 | __u32 qid_mask; | 70 | __u32 qid_mask; |
| 71 | __u32 flags; | ||
| 65 | }; | 72 | }; |
| 66 | #endif | 73 | #endif |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 53f4cd4fc19a..43cae84005f0 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
| @@ -171,7 +171,7 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags) | |||
| 171 | } | 171 | } |
| 172 | 172 | ||
| 173 | ret = ehca_reg_maxmr(shca, e_maxmr, | 173 | ret = ehca_reg_maxmr(shca, e_maxmr, |
| 174 | (void *)ehca_map_vaddr((void *)KERNELBASE), | 174 | (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)), |
| 175 | mr_access_flags, e_pd, | 175 | mr_access_flags, e_pd, |
| 176 | &e_maxmr->ib.ib_mr.lkey, | 176 | &e_maxmr->ib.ib_mr.lkey, |
| 177 | &e_maxmr->ib.ib_mr.rkey); | 177 | &e_maxmr->ib.ib_mr.rkey); |
| @@ -1636,7 +1636,7 @@ int ehca_reg_internal_maxmr( | |||
| 1636 | 1636 | ||
| 1637 | /* register internal max-MR on HCA */ | 1637 | /* register internal max-MR on HCA */ |
| 1638 | size_maxmr = ehca_mr_len; | 1638 | size_maxmr = ehca_mr_len; |
| 1639 | iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE); | 1639 | iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)); |
| 1640 | ib_pbuf.addr = 0; | 1640 | ib_pbuf.addr = 0; |
| 1641 | ib_pbuf.size = size_maxmr; | 1641 | ib_pbuf.size = size_maxmr; |
| 1642 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, | 1642 | num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, |
| @@ -2209,7 +2209,7 @@ int ehca_mr_is_maxmr(u64 size, | |||
| 2209 | { | 2209 | { |
| 2210 | /* a MR is treated as max-MR only if it fits following: */ | 2210 | /* a MR is treated as max-MR only if it fits following: */ |
| 2211 | if ((size == ehca_mr_len) && | 2211 | if ((size == ehca_mr_len) && |
| 2212 | (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) { | 2212 | (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) { |
| 2213 | ehca_gen_dbg("this is a max-MR"); | 2213 | ehca_gen_dbg("this is a max-MR"); |
| 2214 | return 1; | 2214 | return 1; |
| 2215 | } else | 2215 | } else |
diff --git a/drivers/infiniband/hw/ipath/Makefile b/drivers/infiniband/hw/ipath/Makefile index fa3df82681df..4496f2820c92 100644 --- a/drivers/infiniband/hw/ipath/Makefile +++ b/drivers/infiniband/hw/ipath/Makefile | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | EXTRA_CFLAGS += -DIPATH_IDSTR='"QLogic kernel.org driver"' \ | 1 | ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \ |
| 2 | -DIPATH_KERN_TYPE=0 | 2 | -DIPATH_KERN_TYPE=0 |
| 3 | 3 | ||
| 4 | obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o | 4 | obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 11a236f8d884..4b8f9c49397e 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
| @@ -30,66 +30,163 @@ | |||
| 30 | * SOFTWARE. | 30 | * SOFTWARE. |
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #include <rdma/ib_addr.h> | ||
| 34 | #include <rdma/ib_cache.h> | ||
| 35 | |||
| 33 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
| 37 | #include <linux/inet.h> | ||
| 38 | #include <linux/string.h> | ||
| 34 | 39 | ||
| 35 | #include "mlx4_ib.h" | 40 | #include "mlx4_ib.h" |
| 36 | 41 | ||
| 37 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | 42 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, |
| 43 | u8 *mac, int *is_mcast, u8 port) | ||
| 38 | { | 44 | { |
| 39 | struct mlx4_dev *dev = to_mdev(pd->device)->dev; | 45 | struct in6_addr in6; |
| 40 | struct mlx4_ib_ah *ah; | ||
| 41 | 46 | ||
| 42 | ah = kmalloc(sizeof *ah, GFP_ATOMIC); | 47 | *is_mcast = 0; |
| 43 | if (!ah) | ||
| 44 | return ERR_PTR(-ENOMEM); | ||
| 45 | 48 | ||
| 46 | memset(&ah->av, 0, sizeof ah->av); | 49 | memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6); |
| 50 | if (rdma_link_local_addr(&in6)) | ||
| 51 | rdma_get_ll_mac(&in6, mac); | ||
| 52 | else if (rdma_is_multicast_addr(&in6)) { | ||
| 53 | rdma_get_mcast_mac(&in6, mac); | ||
| 54 | *is_mcast = 1; | ||
| 55 | } else | ||
| 56 | return -EINVAL; | ||
| 47 | 57 | ||
| 48 | ah->av.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 58 | return 0; |
| 49 | ah->av.g_slid = ah_attr->src_path_bits; | 59 | } |
| 50 | ah->av.dlid = cpu_to_be16(ah_attr->dlid); | 60 | |
| 51 | if (ah_attr->static_rate) { | 61 | static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, |
| 52 | ah->av.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; | 62 | struct mlx4_ib_ah *ah) |
| 53 | while (ah->av.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && | 63 | { |
| 54 | !(1 << ah->av.stat_rate & dev->caps.stat_rate_support)) | 64 | struct mlx4_dev *dev = to_mdev(pd->device)->dev; |
| 55 | --ah->av.stat_rate; | 65 | |
| 56 | } | 66 | ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
| 57 | ah->av.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | 67 | ah->av.ib.g_slid = ah_attr->src_path_bits; |
| 58 | if (ah_attr->ah_flags & IB_AH_GRH) { | 68 | if (ah_attr->ah_flags & IB_AH_GRH) { |
| 59 | ah->av.g_slid |= 0x80; | 69 | ah->av.ib.g_slid |= 0x80; |
| 60 | ah->av.gid_index = ah_attr->grh.sgid_index; | 70 | ah->av.ib.gid_index = ah_attr->grh.sgid_index; |
| 61 | ah->av.hop_limit = ah_attr->grh.hop_limit; | 71 | ah->av.ib.hop_limit = ah_attr->grh.hop_limit; |
| 62 | ah->av.sl_tclass_flowlabel |= | 72 | ah->av.ib.sl_tclass_flowlabel |= |
| 63 | cpu_to_be32((ah_attr->grh.traffic_class << 20) | | 73 | cpu_to_be32((ah_attr->grh.traffic_class << 20) | |
| 64 | ah_attr->grh.flow_label); | 74 | ah_attr->grh.flow_label); |
| 65 | memcpy(ah->av.dgid, ah_attr->grh.dgid.raw, 16); | 75 | memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16); |
| 76 | } | ||
| 77 | |||
| 78 | ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid); | ||
| 79 | if (ah_attr->static_rate) { | ||
| 80 | ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; | ||
| 81 | while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && | ||
| 82 | !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) | ||
| 83 | --ah->av.ib.stat_rate; | ||
| 66 | } | 84 | } |
| 85 | ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 67 | 86 | ||
| 68 | return &ah->ibah; | 87 | return &ah->ibah; |
| 69 | } | 88 | } |
| 70 | 89 | ||
| 90 | static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, | ||
| 91 | struct mlx4_ib_ah *ah) | ||
| 92 | { | ||
| 93 | struct mlx4_ib_dev *ibdev = to_mdev(pd->device); | ||
| 94 | struct mlx4_dev *dev = ibdev->dev; | ||
| 95 | union ib_gid sgid; | ||
| 96 | u8 mac[6]; | ||
| 97 | int err; | ||
| 98 | int is_mcast; | ||
| 99 | u16 vlan_tag; | ||
| 100 | |||
| 101 | err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); | ||
| 102 | if (err) | ||
| 103 | return ERR_PTR(err); | ||
| 104 | |||
| 105 | memcpy(ah->av.eth.mac, mac, 6); | ||
| 106 | err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); | ||
| 107 | if (err) | ||
| 108 | return ERR_PTR(err); | ||
| 109 | vlan_tag = rdma_get_vlan_id(&sgid); | ||
| 110 | if (vlan_tag < 0x1000) | ||
| 111 | vlan_tag |= (ah_attr->sl & 7) << 13; | ||
| 112 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | ||
| 113 | ah->av.eth.gid_index = ah_attr->grh.sgid_index; | ||
| 114 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); | ||
| 115 | if (ah_attr->static_rate) { | ||
| 116 | ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; | ||
| 117 | while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && | ||
| 118 | !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) | ||
| 119 | --ah->av.eth.stat_rate; | ||
| 120 | } | ||
| 121 | |||
| 122 | /* | ||
| 123 | * HW requires multicast LID so we just choose one. | ||
| 124 | */ | ||
| 125 | if (is_mcast) | ||
| 126 | ah->av.ib.dlid = cpu_to_be16(0xc000); | ||
| 127 | |||
| 128 | memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); | ||
| 129 | ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 130 | |||
| 131 | return &ah->ibah; | ||
| 132 | } | ||
| 133 | |||
| 134 | struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) | ||
| 135 | { | ||
| 136 | struct mlx4_ib_ah *ah; | ||
| 137 | struct ib_ah *ret; | ||
| 138 | |||
| 139 | ah = kzalloc(sizeof *ah, GFP_ATOMIC); | ||
| 140 | if (!ah) | ||
| 141 | return ERR_PTR(-ENOMEM); | ||
| 142 | |||
| 143 | if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) { | ||
| 144 | if (!(ah_attr->ah_flags & IB_AH_GRH)) { | ||
| 145 | ret = ERR_PTR(-EINVAL); | ||
| 146 | } else { | ||
| 147 | /* | ||
| 148 | * TBD: need to handle the case when we get | ||
| 149 | * called in an atomic context and there we | ||
| 150 | * might sleep. We don't expect this | ||
| 151 | * currently since we're working with link | ||
| 152 | * local addresses which we can translate | ||
| 153 | * without going to sleep. | ||
| 154 | */ | ||
| 155 | ret = create_iboe_ah(pd, ah_attr, ah); | ||
| 156 | } | ||
| 157 | |||
| 158 | if (IS_ERR(ret)) | ||
| 159 | kfree(ah); | ||
| 160 | |||
| 161 | return ret; | ||
| 162 | } else | ||
| 163 | return create_ib_ah(pd, ah_attr, ah); /* never fails */ | ||
| 164 | } | ||
| 165 | |||
| 71 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) | 166 | int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) |
| 72 | { | 167 | { |
| 73 | struct mlx4_ib_ah *ah = to_mah(ibah); | 168 | struct mlx4_ib_ah *ah = to_mah(ibah); |
| 169 | enum rdma_link_layer ll; | ||
| 74 | 170 | ||
| 75 | memset(ah_attr, 0, sizeof *ah_attr); | 171 | memset(ah_attr, 0, sizeof *ah_attr); |
| 76 | ah_attr->dlid = be16_to_cpu(ah->av.dlid); | 172 | ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; |
| 77 | ah_attr->sl = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; | 173 | ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; |
| 78 | ah_attr->port_num = be32_to_cpu(ah->av.port_pd) >> 24; | 174 | ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); |
| 79 | if (ah->av.stat_rate) | 175 | ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; |
| 80 | ah_attr->static_rate = ah->av.stat_rate - MLX4_STAT_RATE_OFFSET; | 176 | if (ah->av.ib.stat_rate) |
| 81 | ah_attr->src_path_bits = ah->av.g_slid & 0x7F; | 177 | ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; |
| 178 | ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F; | ||
| 82 | 179 | ||
| 83 | if (mlx4_ib_ah_grh_present(ah)) { | 180 | if (mlx4_ib_ah_grh_present(ah)) { |
| 84 | ah_attr->ah_flags = IB_AH_GRH; | 181 | ah_attr->ah_flags = IB_AH_GRH; |
| 85 | 182 | ||
| 86 | ah_attr->grh.traffic_class = | 183 | ah_attr->grh.traffic_class = |
| 87 | be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20; | 184 | be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20; |
| 88 | ah_attr->grh.flow_label = | 185 | ah_attr->grh.flow_label = |
| 89 | be32_to_cpu(ah->av.sl_tclass_flowlabel) & 0xfffff; | 186 | be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff; |
| 90 | ah_attr->grh.hop_limit = ah->av.hop_limit; | 187 | ah_attr->grh.hop_limit = ah->av.ib.hop_limit; |
| 91 | ah_attr->grh.sgid_index = ah->av.gid_index; | 188 | ah_attr->grh.sgid_index = ah->av.ib.gid_index; |
| 92 | memcpy(ah_attr->grh.dgid.raw, ah->av.dgid, 16); | 189 | memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16); |
| 93 | } | 190 | } |
| 94 | 191 | ||
| 95 | return 0; | 192 | return 0; |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index f38d5b118927..c9a8dd63b9e2 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -311,19 +311,25 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) | |||
| 311 | struct ib_mad_agent *agent; | 311 | struct ib_mad_agent *agent; |
| 312 | int p, q; | 312 | int p, q; |
| 313 | int ret; | 313 | int ret; |
| 314 | enum rdma_link_layer ll; | ||
| 314 | 315 | ||
| 315 | for (p = 0; p < dev->num_ports; ++p) | 316 | for (p = 0; p < dev->num_ports; ++p) { |
| 317 | ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); | ||
| 316 | for (q = 0; q <= 1; ++q) { | 318 | for (q = 0; q <= 1; ++q) { |
| 317 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, | 319 | if (ll == IB_LINK_LAYER_INFINIBAND) { |
| 318 | q ? IB_QPT_GSI : IB_QPT_SMI, | 320 | agent = ib_register_mad_agent(&dev->ib_dev, p + 1, |
| 319 | NULL, 0, send_handler, | 321 | q ? IB_QPT_GSI : IB_QPT_SMI, |
| 320 | NULL, NULL); | 322 | NULL, 0, send_handler, |
| 321 | if (IS_ERR(agent)) { | 323 | NULL, NULL); |
| 322 | ret = PTR_ERR(agent); | 324 | if (IS_ERR(agent)) { |
| 323 | goto err; | 325 | ret = PTR_ERR(agent); |
| 324 | } | 326 | goto err; |
| 325 | dev->send_agent[p][q] = agent; | 327 | } |
| 328 | dev->send_agent[p][q] = agent; | ||
| 329 | } else | ||
| 330 | dev->send_agent[p][q] = NULL; | ||
| 326 | } | 331 | } |
| 332 | } | ||
| 327 | 333 | ||
| 328 | return 0; | 334 | return 0; |
| 329 | 335 | ||
| @@ -344,8 +350,10 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) | |||
| 344 | for (p = 0; p < dev->num_ports; ++p) { | 350 | for (p = 0; p < dev->num_ports; ++p) { |
| 345 | for (q = 0; q <= 1; ++q) { | 351 | for (q = 0; q <= 1; ++q) { |
| 346 | agent = dev->send_agent[p][q]; | 352 | agent = dev->send_agent[p][q]; |
| 347 | dev->send_agent[p][q] = NULL; | 353 | if (agent) { |
| 348 | ib_unregister_mad_agent(agent); | 354 | dev->send_agent[p][q] = NULL; |
| 355 | ib_unregister_mad_agent(agent); | ||
| 356 | } | ||
| 349 | } | 357 | } |
| 350 | 358 | ||
| 351 | if (dev->sm_ah[p]) | 359 | if (dev->sm_ah[p]) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ac6951d99336..bf3e20cd0298 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -35,9 +35,14 @@ | |||
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
| 37 | #include <linux/errno.h> | 37 | #include <linux/errno.h> |
| 38 | #include <linux/netdevice.h> | ||
| 39 | #include <linux/inetdevice.h> | ||
| 40 | #include <linux/rtnetlink.h> | ||
| 41 | #include <linux/if_vlan.h> | ||
| 38 | 42 | ||
| 39 | #include <rdma/ib_smi.h> | 43 | #include <rdma/ib_smi.h> |
| 40 | #include <rdma/ib_user_verbs.h> | 44 | #include <rdma/ib_user_verbs.h> |
| 45 | #include <rdma/ib_addr.h> | ||
| 41 | 46 | ||
| 42 | #include <linux/mlx4/driver.h> | 47 | #include <linux/mlx4/driver.h> |
| 43 | #include <linux/mlx4/cmd.h> | 48 | #include <linux/mlx4/cmd.h> |
| @@ -58,6 +63,15 @@ static const char mlx4_ib_version[] = | |||
| 58 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" | 63 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" |
| 59 | DRV_VERSION " (" DRV_RELDATE ")\n"; | 64 | DRV_VERSION " (" DRV_RELDATE ")\n"; |
| 60 | 65 | ||
| 66 | struct update_gid_work { | ||
| 67 | struct work_struct work; | ||
| 68 | union ib_gid gids[128]; | ||
| 69 | struct mlx4_ib_dev *dev; | ||
| 70 | int port; | ||
| 71 | }; | ||
| 72 | |||
| 73 | static struct workqueue_struct *wq; | ||
| 74 | |||
| 61 | static void init_query_mad(struct ib_smp *mad) | 75 | static void init_query_mad(struct ib_smp *mad) |
| 62 | { | 76 | { |
| 63 | mad->base_version = 1; | 77 | mad->base_version = 1; |
| @@ -66,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad) | |||
| 66 | mad->method = IB_MGMT_METHOD_GET; | 80 | mad->method = IB_MGMT_METHOD_GET; |
| 67 | } | 81 | } |
| 68 | 82 | ||
| 83 | static union ib_gid zgid; | ||
| 84 | |||
| 69 | static int mlx4_ib_query_device(struct ib_device *ibdev, | 85 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
| 70 | struct ib_device_attr *props) | 86 | struct ib_device_attr *props) |
| 71 | { | 87 | { |
| @@ -154,28 +170,19 @@ out: | |||
| 154 | return err; | 170 | return err; |
| 155 | } | 171 | } |
| 156 | 172 | ||
| 157 | static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | 173 | static enum rdma_link_layer |
| 158 | struct ib_port_attr *props) | 174 | mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) |
| 159 | { | 175 | { |
| 160 | struct ib_smp *in_mad = NULL; | 176 | struct mlx4_dev *dev = to_mdev(device)->dev; |
| 161 | struct ib_smp *out_mad = NULL; | ||
| 162 | int err = -ENOMEM; | ||
| 163 | 177 | ||
| 164 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | 178 | return dev->caps.port_mask & (1 << (port_num - 1)) ? |
| 165 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | 179 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; |
| 166 | if (!in_mad || !out_mad) | 180 | } |
| 167 | goto out; | ||
| 168 | |||
| 169 | memset(props, 0, sizeof *props); | ||
| 170 | |||
| 171 | init_query_mad(in_mad); | ||
| 172 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
| 173 | in_mad->attr_mod = cpu_to_be32(port); | ||
| 174 | |||
| 175 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
| 176 | if (err) | ||
| 177 | goto out; | ||
| 178 | 181 | ||
| 182 | static int ib_link_query_port(struct ib_device *ibdev, u8 port, | ||
| 183 | struct ib_port_attr *props, | ||
| 184 | struct ib_smp *out_mad) | ||
| 185 | { | ||
| 179 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); | 186 | props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); |
| 180 | props->lmc = out_mad->data[34] & 0x7; | 187 | props->lmc = out_mad->data[34] & 0x7; |
| 181 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); | 188 | props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); |
| @@ -196,6 +203,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | |||
| 196 | props->max_vl_num = out_mad->data[37] >> 4; | 203 | props->max_vl_num = out_mad->data[37] >> 4; |
| 197 | props->init_type_reply = out_mad->data[41] >> 4; | 204 | props->init_type_reply = out_mad->data[41] >> 4; |
| 198 | 205 | ||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | static u8 state_to_phys_state(enum ib_port_state state) | ||
| 210 | { | ||
| 211 | return state == IB_PORT_ACTIVE ? 5 : 3; | ||
| 212 | } | ||
| 213 | |||
| 214 | static int eth_link_query_port(struct ib_device *ibdev, u8 port, | ||
| 215 | struct ib_port_attr *props, | ||
| 216 | struct ib_smp *out_mad) | ||
| 217 | { | ||
| 218 | struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; | ||
| 219 | struct net_device *ndev; | ||
| 220 | enum ib_mtu tmp; | ||
| 221 | |||
| 222 | props->active_width = IB_WIDTH_4X; | ||
| 223 | props->active_speed = 4; | ||
| 224 | props->port_cap_flags = IB_PORT_CM_SUP; | ||
| 225 | props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; | ||
| 226 | props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; | ||
| 227 | props->pkey_tbl_len = 1; | ||
| 228 | props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); | ||
| 229 | props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); | ||
| 230 | props->max_mtu = IB_MTU_2048; | ||
| 231 | props->subnet_timeout = 0; | ||
| 232 | props->max_vl_num = out_mad->data[37] >> 4; | ||
| 233 | props->init_type_reply = 0; | ||
| 234 | props->state = IB_PORT_DOWN; | ||
| 235 | props->phys_state = state_to_phys_state(props->state); | ||
| 236 | props->active_mtu = IB_MTU_256; | ||
| 237 | spin_lock(&iboe->lock); | ||
| 238 | ndev = iboe->netdevs[port - 1]; | ||
| 239 | if (!ndev) | ||
| 240 | goto out; | ||
| 241 | |||
| 242 | tmp = iboe_get_mtu(ndev->mtu); | ||
| 243 | props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; | ||
| 244 | |||
| 245 | props->state = netif_running(ndev) && netif_oper_up(ndev) ? | ||
| 246 | IB_PORT_ACTIVE : IB_PORT_DOWN; | ||
| 247 | props->phys_state = state_to_phys_state(props->state); | ||
| 248 | |||
| 249 | out: | ||
| 250 | spin_unlock(&iboe->lock); | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, | ||
| 255 | struct ib_port_attr *props) | ||
| 256 | { | ||
| 257 | struct ib_smp *in_mad = NULL; | ||
| 258 | struct ib_smp *out_mad = NULL; | ||
| 259 | int err = -ENOMEM; | ||
| 260 | |||
| 261 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); | ||
| 262 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); | ||
| 263 | if (!in_mad || !out_mad) | ||
| 264 | goto out; | ||
| 265 | |||
| 266 | memset(props, 0, sizeof *props); | ||
| 267 | |||
| 268 | init_query_mad(in_mad); | ||
| 269 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
| 270 | in_mad->attr_mod = cpu_to_be32(port); | ||
| 271 | |||
| 272 | err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
| 273 | if (err) | ||
| 274 | goto out; | ||
| 275 | |||
| 276 | err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? | ||
| 277 | ib_link_query_port(ibdev, port, props, out_mad) : | ||
| 278 | eth_link_query_port(ibdev, port, props, out_mad); | ||
| 279 | |||
| 199 | out: | 280 | out: |
| 200 | kfree(in_mad); | 281 | kfree(in_mad); |
| 201 | kfree(out_mad); | 282 | kfree(out_mad); |
| @@ -203,8 +284,8 @@ out: | |||
| 203 | return err; | 284 | return err; |
| 204 | } | 285 | } |
| 205 | 286 | ||
| 206 | static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | 287 | static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, |
| 207 | union ib_gid *gid) | 288 | union ib_gid *gid) |
| 208 | { | 289 | { |
| 209 | struct ib_smp *in_mad = NULL; | 290 | struct ib_smp *in_mad = NULL; |
| 210 | struct ib_smp *out_mad = NULL; | 291 | struct ib_smp *out_mad = NULL; |
| @@ -241,6 +322,25 @@ out: | |||
| 241 | return err; | 322 | return err; |
| 242 | } | 323 | } |
| 243 | 324 | ||
| 325 | static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index, | ||
| 326 | union ib_gid *gid) | ||
| 327 | { | ||
| 328 | struct mlx4_ib_dev *dev = to_mdev(ibdev); | ||
| 329 | |||
| 330 | *gid = dev->iboe.gid_table[port - 1][index]; | ||
| 331 | |||
| 332 | return 0; | ||
| 333 | } | ||
| 334 | |||
| 335 | static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | ||
| 336 | union ib_gid *gid) | ||
| 337 | { | ||
| 338 | if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND) | ||
| 339 | return __mlx4_ib_query_gid(ibdev, port, index, gid); | ||
| 340 | else | ||
| 341 | return iboe_query_gid(ibdev, port, index, gid); | ||
| 342 | } | ||
| 343 | |||
| 244 | static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | 344 | static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, |
| 245 | u16 *pkey) | 345 | u16 *pkey) |
| 246 | { | 346 | { |
| @@ -307,6 +407,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, | |||
| 307 | { | 407 | { |
| 308 | struct mlx4_cmd_mailbox *mailbox; | 408 | struct mlx4_cmd_mailbox *mailbox; |
| 309 | int err; | 409 | int err; |
| 410 | u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; | ||
| 310 | 411 | ||
| 311 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); | 412 | mailbox = mlx4_alloc_cmd_mailbox(dev->dev); |
| 312 | if (IS_ERR(mailbox)) | 413 | if (IS_ERR(mailbox)) |
| @@ -322,7 +423,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols, | |||
| 322 | ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); | 423 | ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); |
| 323 | } | 424 | } |
| 324 | 425 | ||
| 325 | err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, | 426 | err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT, |
| 326 | MLX4_CMD_TIME_CLASS_B); | 427 | MLX4_CMD_TIME_CLASS_B); |
| 327 | 428 | ||
| 328 | mlx4_free_cmd_mailbox(dev->dev, mailbox); | 429 | mlx4_free_cmd_mailbox(dev->dev, mailbox); |
| @@ -465,18 +566,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd) | |||
| 465 | return 0; | 566 | return 0; |
| 466 | } | 567 | } |
| 467 | 568 | ||
| 569 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) | ||
| 570 | { | ||
| 571 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | ||
| 572 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); | ||
| 573 | struct mlx4_ib_gid_entry *ge; | ||
| 574 | |||
| 575 | ge = kzalloc(sizeof *ge, GFP_KERNEL); | ||
| 576 | if (!ge) | ||
| 577 | return -ENOMEM; | ||
| 578 | |||
| 579 | ge->gid = *gid; | ||
| 580 | if (mlx4_ib_add_mc(mdev, mqp, gid)) { | ||
| 581 | ge->port = mqp->port; | ||
| 582 | ge->added = 1; | ||
| 583 | } | ||
| 584 | |||
| 585 | mutex_lock(&mqp->mutex); | ||
| 586 | list_add_tail(&ge->list, &mqp->gid_list); | ||
| 587 | mutex_unlock(&mqp->mutex); | ||
| 588 | |||
| 589 | return 0; | ||
| 590 | } | ||
| 591 | |||
| 592 | int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | ||
| 593 | union ib_gid *gid) | ||
| 594 | { | ||
| 595 | u8 mac[6]; | ||
| 596 | struct net_device *ndev; | ||
| 597 | int ret = 0; | ||
| 598 | |||
| 599 | if (!mqp->port) | ||
| 600 | return 0; | ||
| 601 | |||
| 602 | spin_lock(&mdev->iboe.lock); | ||
| 603 | ndev = mdev->iboe.netdevs[mqp->port - 1]; | ||
| 604 | if (ndev) | ||
| 605 | dev_hold(ndev); | ||
| 606 | spin_unlock(&mdev->iboe.lock); | ||
| 607 | |||
| 608 | if (ndev) { | ||
| 609 | rdma_get_mcast_mac((struct in6_addr *)gid, mac); | ||
| 610 | rtnl_lock(); | ||
| 611 | dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac); | ||
| 612 | ret = 1; | ||
| 613 | rtnl_unlock(); | ||
| 614 | dev_put(ndev); | ||
| 615 | } | ||
| 616 | |||
| 617 | return ret; | ||
| 618 | } | ||
| 619 | |||
| 468 | static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | 620 | static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 469 | { | 621 | { |
| 470 | return mlx4_multicast_attach(to_mdev(ibqp->device)->dev, | 622 | int err; |
| 471 | &to_mqp(ibqp)->mqp, gid->raw, | 623 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
| 472 | !!(to_mqp(ibqp)->flags & | 624 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 473 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); | 625 | |
| 626 | err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags & | ||
| 627 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); | ||
| 628 | if (err) | ||
| 629 | return err; | ||
| 630 | |||
| 631 | err = add_gid_entry(ibqp, gid); | ||
| 632 | if (err) | ||
| 633 | goto err_add; | ||
| 634 | |||
| 635 | return 0; | ||
| 636 | |||
| 637 | err_add: | ||
| 638 | mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw); | ||
| 639 | return err; | ||
| 640 | } | ||
| 641 | |||
| 642 | static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) | ||
| 643 | { | ||
| 644 | struct mlx4_ib_gid_entry *ge; | ||
| 645 | struct mlx4_ib_gid_entry *tmp; | ||
| 646 | struct mlx4_ib_gid_entry *ret = NULL; | ||
| 647 | |||
| 648 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { | ||
| 649 | if (!memcmp(raw, ge->gid.raw, 16)) { | ||
| 650 | ret = ge; | ||
| 651 | break; | ||
| 652 | } | ||
| 653 | } | ||
| 654 | |||
| 655 | return ret; | ||
| 474 | } | 656 | } |
| 475 | 657 | ||
| 476 | static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | 658 | static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 477 | { | 659 | { |
| 478 | return mlx4_multicast_detach(to_mdev(ibqp->device)->dev, | 660 | int err; |
| 479 | &to_mqp(ibqp)->mqp, gid->raw); | 661 | struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); |
| 662 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); | ||
| 663 | u8 mac[6]; | ||
| 664 | struct net_device *ndev; | ||
| 665 | struct mlx4_ib_gid_entry *ge; | ||
| 666 | |||
| 667 | err = mlx4_multicast_detach(mdev->dev, | ||
| 668 | &mqp->mqp, gid->raw); | ||
| 669 | if (err) | ||
| 670 | return err; | ||
| 671 | |||
| 672 | mutex_lock(&mqp->mutex); | ||
| 673 | ge = find_gid_entry(mqp, gid->raw); | ||
| 674 | if (ge) { | ||
| 675 | spin_lock(&mdev->iboe.lock); | ||
| 676 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; | ||
| 677 | if (ndev) | ||
| 678 | dev_hold(ndev); | ||
| 679 | spin_unlock(&mdev->iboe.lock); | ||
| 680 | rdma_get_mcast_mac((struct in6_addr *)gid, mac); | ||
| 681 | if (ndev) { | ||
| 682 | rtnl_lock(); | ||
| 683 | dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac); | ||
| 684 | rtnl_unlock(); | ||
| 685 | dev_put(ndev); | ||
| 686 | } | ||
| 687 | list_del(&ge->list); | ||
| 688 | kfree(ge); | ||
| 689 | } else | ||
| 690 | printk(KERN_WARNING "could not find mgid entry\n"); | ||
| 691 | |||
| 692 | mutex_unlock(&mqp->mutex); | ||
| 693 | |||
| 694 | return 0; | ||
| 480 | } | 695 | } |
| 481 | 696 | ||
| 482 | static int init_node_data(struct mlx4_ib_dev *dev) | 697 | static int init_node_data(struct mlx4_ib_dev *dev) |
| @@ -561,15 +776,215 @@ static struct device_attribute *mlx4_class_attributes[] = { | |||
| 561 | &dev_attr_board_id | 776 | &dev_attr_board_id |
| 562 | }; | 777 | }; |
| 563 | 778 | ||
| 779 | static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev) | ||
| 780 | { | ||
| 781 | memcpy(eui, dev->dev_addr, 3); | ||
| 782 | memcpy(eui + 5, dev->dev_addr + 3, 3); | ||
| 783 | if (vlan_id < 0x1000) { | ||
| 784 | eui[3] = vlan_id >> 8; | ||
| 785 | eui[4] = vlan_id & 0xff; | ||
| 786 | } else { | ||
| 787 | eui[3] = 0xff; | ||
| 788 | eui[4] = 0xfe; | ||
| 789 | } | ||
| 790 | eui[0] ^= 2; | ||
| 791 | } | ||
| 792 | |||
| 793 | static void update_gids_task(struct work_struct *work) | ||
| 794 | { | ||
| 795 | struct update_gid_work *gw = container_of(work, struct update_gid_work, work); | ||
| 796 | struct mlx4_cmd_mailbox *mailbox; | ||
| 797 | union ib_gid *gids; | ||
| 798 | int err; | ||
| 799 | struct mlx4_dev *dev = gw->dev->dev; | ||
| 800 | struct ib_event event; | ||
| 801 | |||
| 802 | mailbox = mlx4_alloc_cmd_mailbox(dev); | ||
| 803 | if (IS_ERR(mailbox)) { | ||
| 804 | printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox)); | ||
| 805 | return; | ||
| 806 | } | ||
| 807 | |||
| 808 | gids = mailbox->buf; | ||
| 809 | memcpy(gids, gw->gids, sizeof gw->gids); | ||
| 810 | |||
| 811 | err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port, | ||
| 812 | 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B); | ||
| 813 | if (err) | ||
| 814 | printk(KERN_WARNING "set port command failed\n"); | ||
| 815 | else { | ||
| 816 | memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); | ||
| 817 | event.device = &gw->dev->ib_dev; | ||
| 818 | event.element.port_num = gw->port; | ||
| 819 | event.event = IB_EVENT_LID_CHANGE; | ||
| 820 | ib_dispatch_event(&event); | ||
| 821 | } | ||
| 822 | |||
| 823 | mlx4_free_cmd_mailbox(dev, mailbox); | ||
| 824 | kfree(gw); | ||
| 825 | } | ||
| 826 | |||
| 827 | static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear) | ||
| 828 | { | ||
| 829 | struct net_device *ndev = dev->iboe.netdevs[port - 1]; | ||
| 830 | struct update_gid_work *work; | ||
| 831 | struct net_device *tmp; | ||
| 832 | int i; | ||
| 833 | u8 *hits; | ||
| 834 | int ret; | ||
| 835 | union ib_gid gid; | ||
| 836 | int free; | ||
| 837 | int found; | ||
| 838 | int need_update = 0; | ||
| 839 | u16 vid; | ||
| 840 | |||
| 841 | work = kzalloc(sizeof *work, GFP_ATOMIC); | ||
| 842 | if (!work) | ||
| 843 | return -ENOMEM; | ||
| 844 | |||
| 845 | hits = kzalloc(128, GFP_ATOMIC); | ||
| 846 | if (!hits) { | ||
| 847 | ret = -ENOMEM; | ||
| 848 | goto out; | ||
| 849 | } | ||
| 850 | |||
| 851 | read_lock(&dev_base_lock); | ||
| 852 | for_each_netdev(&init_net, tmp) { | ||
| 853 | if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) { | ||
| 854 | gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL); | ||
| 855 | vid = rdma_vlan_dev_vlan_id(tmp); | ||
| 856 | mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev); | ||
| 857 | found = 0; | ||
| 858 | free = -1; | ||
| 859 | for (i = 0; i < 128; ++i) { | ||
| 860 | if (free < 0 && | ||
| 861 | !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) | ||
| 862 | free = i; | ||
| 863 | if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) { | ||
| 864 | hits[i] = 1; | ||
| 865 | found = 1; | ||
| 866 | break; | ||
| 867 | } | ||
| 868 | } | ||
| 869 | |||
| 870 | if (!found) { | ||
| 871 | if (tmp == ndev && | ||
| 872 | (memcmp(&dev->iboe.gid_table[port - 1][0], | ||
| 873 | &gid, sizeof gid) || | ||
| 874 | !memcmp(&dev->iboe.gid_table[port - 1][0], | ||
| 875 | &zgid, sizeof gid))) { | ||
| 876 | dev->iboe.gid_table[port - 1][0] = gid; | ||
| 877 | ++need_update; | ||
| 878 | hits[0] = 1; | ||
| 879 | } else if (free >= 0) { | ||
| 880 | dev->iboe.gid_table[port - 1][free] = gid; | ||
| 881 | hits[free] = 1; | ||
| 882 | ++need_update; | ||
| 883 | } | ||
| 884 | } | ||
| 885 | } | ||
| 886 | } | ||
| 887 | read_unlock(&dev_base_lock); | ||
| 888 | |||
| 889 | for (i = 0; i < 128; ++i) | ||
| 890 | if (!hits[i]) { | ||
| 891 | if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid)) | ||
| 892 | ++need_update; | ||
| 893 | dev->iboe.gid_table[port - 1][i] = zgid; | ||
| 894 | } | ||
| 895 | |||
| 896 | if (need_update) { | ||
| 897 | memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids); | ||
| 898 | INIT_WORK(&work->work, update_gids_task); | ||
| 899 | work->port = port; | ||
| 900 | work->dev = dev; | ||
| 901 | queue_work(wq, &work->work); | ||
| 902 | } else | ||
| 903 | kfree(work); | ||
| 904 | |||
| 905 | kfree(hits); | ||
| 906 | return 0; | ||
| 907 | |||
| 908 | out: | ||
| 909 | kfree(work); | ||
| 910 | return ret; | ||
| 911 | } | ||
| 912 | |||
| 913 | static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event) | ||
| 914 | { | ||
| 915 | switch (event) { | ||
| 916 | case NETDEV_UP: | ||
| 917 | case NETDEV_CHANGEADDR: | ||
| 918 | update_ipv6_gids(dev, port, 0); | ||
| 919 | break; | ||
| 920 | |||
| 921 | case NETDEV_DOWN: | ||
| 922 | update_ipv6_gids(dev, port, 1); | ||
| 923 | dev->iboe.netdevs[port - 1] = NULL; | ||
| 924 | } | ||
| 925 | } | ||
| 926 | |||
| 927 | static void netdev_added(struct mlx4_ib_dev *dev, int port) | ||
| 928 | { | ||
| 929 | update_ipv6_gids(dev, port, 0); | ||
| 930 | } | ||
| 931 | |||
| 932 | static void netdev_removed(struct mlx4_ib_dev *dev, int port) | ||
| 933 | { | ||
| 934 | update_ipv6_gids(dev, port, 1); | ||
| 935 | } | ||
| 936 | |||
| 937 | static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event, | ||
| 938 | void *ptr) | ||
| 939 | { | ||
| 940 | struct net_device *dev = ptr; | ||
| 941 | struct mlx4_ib_dev *ibdev; | ||
| 942 | struct net_device *oldnd; | ||
| 943 | struct mlx4_ib_iboe *iboe; | ||
| 944 | int port; | ||
| 945 | |||
| 946 | if (!net_eq(dev_net(dev), &init_net)) | ||
| 947 | return NOTIFY_DONE; | ||
| 948 | |||
| 949 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); | ||
| 950 | iboe = &ibdev->iboe; | ||
| 951 | |||
| 952 | spin_lock(&iboe->lock); | ||
| 953 | mlx4_foreach_ib_transport_port(port, ibdev->dev) { | ||
| 954 | oldnd = iboe->netdevs[port - 1]; | ||
| 955 | iboe->netdevs[port - 1] = | ||
| 956 | mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port); | ||
| 957 | if (oldnd != iboe->netdevs[port - 1]) { | ||
| 958 | if (iboe->netdevs[port - 1]) | ||
| 959 | netdev_added(ibdev, port); | ||
| 960 | else | ||
| 961 | netdev_removed(ibdev, port); | ||
| 962 | } | ||
| 963 | } | ||
| 964 | |||
| 965 | if (dev == iboe->netdevs[0] || | ||
| 966 | (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0])) | ||
| 967 | handle_en_event(ibdev, 1, event); | ||
| 968 | else if (dev == iboe->netdevs[1] | ||
| 969 | || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1])) | ||
| 970 | handle_en_event(ibdev, 2, event); | ||
| 971 | |||
| 972 | spin_unlock(&iboe->lock); | ||
| 973 | |||
| 974 | return NOTIFY_DONE; | ||
| 975 | } | ||
| 976 | |||
| 564 | static void *mlx4_ib_add(struct mlx4_dev *dev) | 977 | static void *mlx4_ib_add(struct mlx4_dev *dev) |
| 565 | { | 978 | { |
| 566 | struct mlx4_ib_dev *ibdev; | 979 | struct mlx4_ib_dev *ibdev; |
| 567 | int num_ports = 0; | 980 | int num_ports = 0; |
| 568 | int i; | 981 | int i; |
| 982 | int err; | ||
| 983 | struct mlx4_ib_iboe *iboe; | ||
| 569 | 984 | ||
| 570 | printk_once(KERN_INFO "%s", mlx4_ib_version); | 985 | printk_once(KERN_INFO "%s", mlx4_ib_version); |
| 571 | 986 | ||
| 572 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) | 987 | mlx4_foreach_ib_transport_port(i, dev) |
| 573 | num_ports++; | 988 | num_ports++; |
| 574 | 989 | ||
| 575 | /* No point in registering a device with no ports... */ | 990 | /* No point in registering a device with no ports... */ |
| @@ -582,6 +997,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 582 | return NULL; | 997 | return NULL; |
| 583 | } | 998 | } |
| 584 | 999 | ||
| 1000 | iboe = &ibdev->iboe; | ||
| 1001 | |||
| 585 | if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) | 1002 | if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) |
| 586 | goto err_dealloc; | 1003 | goto err_dealloc; |
| 587 | 1004 | ||
| @@ -630,6 +1047,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 630 | 1047 | ||
| 631 | ibdev->ib_dev.query_device = mlx4_ib_query_device; | 1048 | ibdev->ib_dev.query_device = mlx4_ib_query_device; |
| 632 | ibdev->ib_dev.query_port = mlx4_ib_query_port; | 1049 | ibdev->ib_dev.query_port = mlx4_ib_query_port; |
| 1050 | ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; | ||
| 633 | ibdev->ib_dev.query_gid = mlx4_ib_query_gid; | 1051 | ibdev->ib_dev.query_gid = mlx4_ib_query_gid; |
| 634 | ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; | 1052 | ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; |
| 635 | ibdev->ib_dev.modify_device = mlx4_ib_modify_device; | 1053 | ibdev->ib_dev.modify_device = mlx4_ib_modify_device; |
| @@ -674,6 +1092,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 674 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; | 1092 | ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; |
| 675 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; | 1093 | ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; |
| 676 | 1094 | ||
| 1095 | spin_lock_init(&iboe->lock); | ||
| 1096 | |||
| 677 | if (init_node_data(ibdev)) | 1097 | if (init_node_data(ibdev)) |
| 678 | goto err_map; | 1098 | goto err_map; |
| 679 | 1099 | ||
| @@ -686,16 +1106,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
| 686 | if (mlx4_ib_mad_init(ibdev)) | 1106 | if (mlx4_ib_mad_init(ibdev)) |
| 687 | goto err_reg; | 1107 | goto err_reg; |
| 688 | 1108 | ||
| 1109 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) { | ||
| 1110 | iboe->nb.notifier_call = mlx4_ib_netdev_event; | ||
| 1111 | err = register_netdevice_notifier(&iboe->nb); | ||
| 1112 | if (err) | ||
| 1113 | goto err_reg; | ||
| 1114 | } | ||
| 1115 | |||
| 689 | for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { | 1116 | for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { |
| 690 | if (device_create_file(&ibdev->ib_dev.dev, | 1117 | if (device_create_file(&ibdev->ib_dev.dev, |
| 691 | mlx4_class_attributes[i])) | 1118 | mlx4_class_attributes[i])) |
| 692 | goto err_reg; | 1119 | goto err_notif; |
| 693 | } | 1120 | } |
| 694 | 1121 | ||
| 695 | ibdev->ib_active = true; | 1122 | ibdev->ib_active = true; |
| 696 | 1123 | ||
| 697 | return ibdev; | 1124 | return ibdev; |
| 698 | 1125 | ||
| 1126 | err_notif: | ||
| 1127 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) | ||
| 1128 | printk(KERN_WARNING "failure unregistering notifier\n"); | ||
| 1129 | flush_workqueue(wq); | ||
| 1130 | |||
| 699 | err_reg: | 1131 | err_reg: |
| 700 | ib_unregister_device(&ibdev->ib_dev); | 1132 | ib_unregister_device(&ibdev->ib_dev); |
| 701 | 1133 | ||
| @@ -721,11 +1153,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) | |||
| 721 | 1153 | ||
| 722 | mlx4_ib_mad_cleanup(ibdev); | 1154 | mlx4_ib_mad_cleanup(ibdev); |
| 723 | ib_unregister_device(&ibdev->ib_dev); | 1155 | ib_unregister_device(&ibdev->ib_dev); |
| 1156 | if (ibdev->iboe.nb.notifier_call) { | ||
| 1157 | if (unregister_netdevice_notifier(&ibdev->iboe.nb)) | ||
| 1158 | printk(KERN_WARNING "failure unregistering notifier\n"); | ||
| 1159 | ibdev->iboe.nb.notifier_call = NULL; | ||
| 1160 | } | ||
| 1161 | iounmap(ibdev->uar_map); | ||
| 724 | 1162 | ||
| 725 | for (p = 1; p <= ibdev->num_ports; ++p) | 1163 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) |
| 726 | mlx4_CLOSE_PORT(dev, p); | 1164 | mlx4_CLOSE_PORT(dev, p); |
| 727 | 1165 | ||
| 728 | iounmap(ibdev->uar_map); | ||
| 729 | mlx4_uar_free(dev, &ibdev->priv_uar); | 1166 | mlx4_uar_free(dev, &ibdev->priv_uar); |
| 730 | mlx4_pd_free(dev, ibdev->priv_pdn); | 1167 | mlx4_pd_free(dev, ibdev->priv_pdn); |
| 731 | ib_dealloc_device(&ibdev->ib_dev); | 1168 | ib_dealloc_device(&ibdev->ib_dev); |
| @@ -765,19 +1202,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, | |||
| 765 | } | 1202 | } |
| 766 | 1203 | ||
| 767 | static struct mlx4_interface mlx4_ib_interface = { | 1204 | static struct mlx4_interface mlx4_ib_interface = { |
| 768 | .add = mlx4_ib_add, | 1205 | .add = mlx4_ib_add, |
| 769 | .remove = mlx4_ib_remove, | 1206 | .remove = mlx4_ib_remove, |
| 770 | .event = mlx4_ib_event | 1207 | .event = mlx4_ib_event, |
| 1208 | .protocol = MLX4_PROTOCOL_IB | ||
| 771 | }; | 1209 | }; |
| 772 | 1210 | ||
| 773 | static int __init mlx4_ib_init(void) | 1211 | static int __init mlx4_ib_init(void) |
| 774 | { | 1212 | { |
| 775 | return mlx4_register_interface(&mlx4_ib_interface); | 1213 | int err; |
| 1214 | |||
| 1215 | wq = create_singlethread_workqueue("mlx4_ib"); | ||
| 1216 | if (!wq) | ||
| 1217 | return -ENOMEM; | ||
| 1218 | |||
| 1219 | err = mlx4_register_interface(&mlx4_ib_interface); | ||
| 1220 | if (err) { | ||
| 1221 | destroy_workqueue(wq); | ||
| 1222 | return err; | ||
| 1223 | } | ||
| 1224 | |||
| 1225 | return 0; | ||
| 776 | } | 1226 | } |
| 777 | 1227 | ||
| 778 | static void __exit mlx4_ib_cleanup(void) | 1228 | static void __exit mlx4_ib_cleanup(void) |
| 779 | { | 1229 | { |
| 780 | mlx4_unregister_interface(&mlx4_ib_interface); | 1230 | mlx4_unregister_interface(&mlx4_ib_interface); |
| 1231 | destroy_workqueue(wq); | ||
| 781 | } | 1232 | } |
| 782 | 1233 | ||
| 783 | module_init(mlx4_ib_init); | 1234 | module_init(mlx4_ib_init); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 3486d7675e56..2a322f21049f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -112,6 +112,13 @@ enum mlx4_ib_qp_flags { | |||
| 112 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1, | 112 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 1, |
| 113 | }; | 113 | }; |
| 114 | 114 | ||
| 115 | struct mlx4_ib_gid_entry { | ||
| 116 | struct list_head list; | ||
| 117 | union ib_gid gid; | ||
| 118 | int added; | ||
| 119 | u8 port; | ||
| 120 | }; | ||
| 121 | |||
| 115 | struct mlx4_ib_qp { | 122 | struct mlx4_ib_qp { |
| 116 | struct ib_qp ibqp; | 123 | struct ib_qp ibqp; |
| 117 | struct mlx4_qp mqp; | 124 | struct mlx4_qp mqp; |
| @@ -138,6 +145,8 @@ struct mlx4_ib_qp { | |||
| 138 | u8 resp_depth; | 145 | u8 resp_depth; |
| 139 | u8 sq_no_prefetch; | 146 | u8 sq_no_prefetch; |
| 140 | u8 state; | 147 | u8 state; |
| 148 | int mlx_type; | ||
| 149 | struct list_head gid_list; | ||
| 141 | }; | 150 | }; |
| 142 | 151 | ||
| 143 | struct mlx4_ib_srq { | 152 | struct mlx4_ib_srq { |
| @@ -157,7 +166,14 @@ struct mlx4_ib_srq { | |||
| 157 | 166 | ||
| 158 | struct mlx4_ib_ah { | 167 | struct mlx4_ib_ah { |
| 159 | struct ib_ah ibah; | 168 | struct ib_ah ibah; |
| 160 | struct mlx4_av av; | 169 | union mlx4_ext_av av; |
| 170 | }; | ||
| 171 | |||
| 172 | struct mlx4_ib_iboe { | ||
| 173 | spinlock_t lock; | ||
| 174 | struct net_device *netdevs[MLX4_MAX_PORTS]; | ||
| 175 | struct notifier_block nb; | ||
| 176 | union ib_gid gid_table[MLX4_MAX_PORTS][128]; | ||
| 161 | }; | 177 | }; |
| 162 | 178 | ||
| 163 | struct mlx4_ib_dev { | 179 | struct mlx4_ib_dev { |
| @@ -176,6 +192,7 @@ struct mlx4_ib_dev { | |||
| 176 | 192 | ||
| 177 | struct mutex cap_mask_mutex; | 193 | struct mutex cap_mask_mutex; |
| 178 | bool ib_active; | 194 | bool ib_active; |
| 195 | struct mlx4_ib_iboe iboe; | ||
| 179 | }; | 196 | }; |
| 180 | 197 | ||
| 181 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) | 198 | static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) |
| @@ -314,9 +331,20 @@ int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, int npages, | |||
| 314 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list); | 331 | int mlx4_ib_unmap_fmr(struct list_head *fmr_list); |
| 315 | int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr); | 332 | int mlx4_ib_fmr_dealloc(struct ib_fmr *fmr); |
| 316 | 333 | ||
| 334 | int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, | ||
| 335 | u8 *mac, int *is_mcast, u8 port); | ||
| 336 | |||
| 317 | static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) | 337 | static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) |
| 318 | { | 338 | { |
| 319 | return !!(ah->av.g_slid & 0x80); | 339 | u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; |
| 340 | |||
| 341 | if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) | ||
| 342 | return 1; | ||
| 343 | |||
| 344 | return !!(ah->av.ib.g_slid & 0x80); | ||
| 320 | } | 345 | } |
| 321 | 346 | ||
| 347 | int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, | ||
| 348 | union ib_gid *gid); | ||
| 349 | |||
| 322 | #endif /* MLX4_IB_H */ | 350 | #endif /* MLX4_IB_H */ |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 6a60827b2301..9a7794ac34c1 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -33,9 +33,11 @@ | |||
| 33 | 33 | ||
| 34 | #include <linux/log2.h> | 34 | #include <linux/log2.h> |
| 35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
| 36 | #include <linux/netdevice.h> | ||
| 36 | 37 | ||
| 37 | #include <rdma/ib_cache.h> | 38 | #include <rdma/ib_cache.h> |
| 38 | #include <rdma/ib_pack.h> | 39 | #include <rdma/ib_pack.h> |
| 40 | #include <rdma/ib_addr.h> | ||
| 39 | 41 | ||
| 40 | #include <linux/mlx4/qp.h> | 42 | #include <linux/mlx4/qp.h> |
| 41 | 43 | ||
| @@ -48,17 +50,26 @@ enum { | |||
| 48 | 50 | ||
| 49 | enum { | 51 | enum { |
| 50 | MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, | 52 | MLX4_IB_DEFAULT_SCHED_QUEUE = 0x83, |
| 51 | MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f | 53 | MLX4_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f, |
| 54 | MLX4_IB_LINK_TYPE_IB = 0, | ||
| 55 | MLX4_IB_LINK_TYPE_ETH = 1 | ||
| 52 | }; | 56 | }; |
| 53 | 57 | ||
| 54 | enum { | 58 | enum { |
| 55 | /* | 59 | /* |
| 56 | * Largest possible UD header: send with GRH and immediate data. | 60 | * Largest possible UD header: send with GRH and immediate |
| 61 | * data plus 18 bytes for an Ethernet header with VLAN/802.1Q | ||
| 62 | * tag. (LRH would only use 8 bytes, so Ethernet is the | ||
| 63 | * biggest case) | ||
| 57 | */ | 64 | */ |
| 58 | MLX4_IB_UD_HEADER_SIZE = 72, | 65 | MLX4_IB_UD_HEADER_SIZE = 82, |
| 59 | MLX4_IB_LSO_HEADER_SPARE = 128, | 66 | MLX4_IB_LSO_HEADER_SPARE = 128, |
| 60 | }; | 67 | }; |
| 61 | 68 | ||
| 69 | enum { | ||
| 70 | MLX4_IB_IBOE_ETHERTYPE = 0x8915 | ||
| 71 | }; | ||
| 72 | |||
| 62 | struct mlx4_ib_sqp { | 73 | struct mlx4_ib_sqp { |
| 63 | struct mlx4_ib_qp qp; | 74 | struct mlx4_ib_qp qp; |
| 64 | int pkey_index; | 75 | int pkey_index; |
| @@ -462,6 +473,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
| 462 | mutex_init(&qp->mutex); | 473 | mutex_init(&qp->mutex); |
| 463 | spin_lock_init(&qp->sq.lock); | 474 | spin_lock_init(&qp->sq.lock); |
| 464 | spin_lock_init(&qp->rq.lock); | 475 | spin_lock_init(&qp->rq.lock); |
| 476 | INIT_LIST_HEAD(&qp->gid_list); | ||
| 465 | 477 | ||
| 466 | qp->state = IB_QPS_RESET; | 478 | qp->state = IB_QPS_RESET; |
| 467 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | 479 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) |
| @@ -649,6 +661,16 @@ static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *re | |||
| 649 | } | 661 | } |
| 650 | } | 662 | } |
| 651 | 663 | ||
| 664 | static void del_gid_entries(struct mlx4_ib_qp *qp) | ||
| 665 | { | ||
| 666 | struct mlx4_ib_gid_entry *ge, *tmp; | ||
| 667 | |||
| 668 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { | ||
| 669 | list_del(&ge->list); | ||
| 670 | kfree(ge); | ||
| 671 | } | ||
| 672 | } | ||
| 673 | |||
| 652 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | 674 | static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, |
| 653 | int is_user) | 675 | int is_user) |
| 654 | { | 676 | { |
| @@ -695,6 +717,8 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, | |||
| 695 | if (!qp->ibqp.srq) | 717 | if (!qp->ibqp.srq) |
| 696 | mlx4_db_free(dev->dev, &qp->db); | 718 | mlx4_db_free(dev->dev, &qp->db); |
| 697 | } | 719 | } |
| 720 | |||
| 721 | del_gid_entries(qp); | ||
| 698 | } | 722 | } |
| 699 | 723 | ||
| 700 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, | 724 | struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, |
| @@ -852,6 +876,14 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) | |||
| 852 | static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | 876 | static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, |
| 853 | struct mlx4_qp_path *path, u8 port) | 877 | struct mlx4_qp_path *path, u8 port) |
| 854 | { | 878 | { |
| 879 | int err; | ||
| 880 | int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == | ||
| 881 | IB_LINK_LAYER_ETHERNET; | ||
| 882 | u8 mac[6]; | ||
| 883 | int is_mcast; | ||
| 884 | u16 vlan_tag; | ||
| 885 | int vidx; | ||
| 886 | |||
| 855 | path->grh_mylmc = ah->src_path_bits & 0x7f; | 887 | path->grh_mylmc = ah->src_path_bits & 0x7f; |
| 856 | path->rlid = cpu_to_be16(ah->dlid); | 888 | path->rlid = cpu_to_be16(ah->dlid); |
| 857 | if (ah->static_rate) { | 889 | if (ah->static_rate) { |
| @@ -879,12 +911,49 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, | |||
| 879 | memcpy(path->rgid, ah->grh.dgid.raw, 16); | 911 | memcpy(path->rgid, ah->grh.dgid.raw, 16); |
| 880 | } | 912 | } |
| 881 | 913 | ||
| 882 | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | | 914 | if (is_eth) { |
| 883 | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); | 915 | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | |
| 916 | ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1); | ||
| 917 | |||
| 918 | if (!(ah->ah_flags & IB_AH_GRH)) | ||
| 919 | return -1; | ||
| 920 | |||
| 921 | err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); | ||
| 922 | if (err) | ||
| 923 | return err; | ||
| 924 | |||
| 925 | memcpy(path->dmac, mac, 6); | ||
| 926 | path->ackto = MLX4_IB_LINK_TYPE_ETH; | ||
| 927 | /* use index 0 into MAC table for IBoE */ | ||
| 928 | path->grh_mylmc &= 0x80; | ||
| 929 | |||
| 930 | vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); | ||
| 931 | if (vlan_tag < 0x1000) { | ||
| 932 | if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) | ||
| 933 | return -ENOENT; | ||
| 934 | |||
| 935 | path->vlan_index = vidx; | ||
| 936 | path->fl = 1 << 6; | ||
| 937 | } | ||
| 938 | } else | ||
| 939 | path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | | ||
| 940 | ((port - 1) << 6) | ((ah->sl & 0xf) << 2); | ||
| 884 | 941 | ||
| 885 | return 0; | 942 | return 0; |
| 886 | } | 943 | } |
| 887 | 944 | ||
| 945 | static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | ||
| 946 | { | ||
| 947 | struct mlx4_ib_gid_entry *ge, *tmp; | ||
| 948 | |||
| 949 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { | ||
| 950 | if (!ge->added && mlx4_ib_add_mc(dev, qp, &ge->gid)) { | ||
| 951 | ge->added = 1; | ||
| 952 | ge->port = qp->port; | ||
| 953 | } | ||
| 954 | } | ||
| 955 | } | ||
| 956 | |||
| 888 | static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | 957 | static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, |
| 889 | const struct ib_qp_attr *attr, int attr_mask, | 958 | const struct ib_qp_attr *attr, int attr_mask, |
| 890 | enum ib_qp_state cur_state, enum ib_qp_state new_state) | 959 | enum ib_qp_state cur_state, enum ib_qp_state new_state) |
| @@ -980,7 +1049,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 980 | } | 1049 | } |
| 981 | 1050 | ||
| 982 | if (attr_mask & IB_QP_TIMEOUT) { | 1051 | if (attr_mask & IB_QP_TIMEOUT) { |
| 983 | context->pri_path.ackto = attr->timeout << 3; | 1052 | context->pri_path.ackto |= attr->timeout << 3; |
| 984 | optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; | 1053 | optpar |= MLX4_QP_OPTPAR_ACK_TIMEOUT; |
| 985 | } | 1054 | } |
| 986 | 1055 | ||
| @@ -1118,8 +1187,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
| 1118 | qp->atomic_rd_en = attr->qp_access_flags; | 1187 | qp->atomic_rd_en = attr->qp_access_flags; |
| 1119 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) | 1188 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
| 1120 | qp->resp_depth = attr->max_dest_rd_atomic; | 1189 | qp->resp_depth = attr->max_dest_rd_atomic; |
| 1121 | if (attr_mask & IB_QP_PORT) | 1190 | if (attr_mask & IB_QP_PORT) { |
| 1122 | qp->port = attr->port_num; | 1191 | qp->port = attr->port_num; |
| 1192 | update_mcg_macs(dev, qp); | ||
| 1193 | } | ||
| 1123 | if (attr_mask & IB_QP_ALT_PATH) | 1194 | if (attr_mask & IB_QP_ALT_PATH) |
| 1124 | qp->alt_port = attr->alt_port_num; | 1195 | qp->alt_port = attr->alt_port_num; |
| 1125 | 1196 | ||
| @@ -1221,40 +1292,59 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1221 | struct mlx4_wqe_mlx_seg *mlx = wqe; | 1292 | struct mlx4_wqe_mlx_seg *mlx = wqe; |
| 1222 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; | 1293 | struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; |
| 1223 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); | 1294 | struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); |
| 1295 | union ib_gid sgid; | ||
| 1224 | u16 pkey; | 1296 | u16 pkey; |
| 1225 | int send_size; | 1297 | int send_size; |
| 1226 | int header_size; | 1298 | int header_size; |
| 1227 | int spc; | 1299 | int spc; |
| 1228 | int i; | 1300 | int i; |
| 1301 | int is_eth; | ||
| 1302 | int is_vlan = 0; | ||
| 1303 | int is_grh; | ||
| 1304 | u16 vlan; | ||
| 1229 | 1305 | ||
| 1230 | send_size = 0; | 1306 | send_size = 0; |
| 1231 | for (i = 0; i < wr->num_sge; ++i) | 1307 | for (i = 0; i < wr->num_sge; ++i) |
| 1232 | send_size += wr->sg_list[i].length; | 1308 | send_size += wr->sg_list[i].length; |
| 1233 | 1309 | ||
| 1234 | ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header); | 1310 | is_eth = rdma_port_get_link_layer(sqp->qp.ibqp.device, sqp->qp.port) == IB_LINK_LAYER_ETHERNET; |
| 1311 | is_grh = mlx4_ib_ah_grh_present(ah); | ||
| 1312 | if (is_eth) { | ||
| 1313 | ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, | ||
| 1314 | ah->av.ib.gid_index, &sgid); | ||
| 1315 | vlan = rdma_get_vlan_id(&sgid); | ||
| 1316 | is_vlan = vlan < 0x1000; | ||
| 1317 | } | ||
| 1318 | ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); | ||
| 1319 | |||
| 1320 | if (!is_eth) { | ||
| 1321 | sqp->ud_header.lrh.service_level = | ||
| 1322 | be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; | ||
| 1323 | sqp->ud_header.lrh.destination_lid = ah->av.ib.dlid; | ||
| 1324 | sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.ib.g_slid & 0x7f); | ||
| 1325 | } | ||
| 1235 | 1326 | ||
| 1236 | sqp->ud_header.lrh.service_level = | 1327 | if (is_grh) { |
| 1237 | be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; | ||
| 1238 | sqp->ud_header.lrh.destination_lid = ah->av.dlid; | ||
| 1239 | sqp->ud_header.lrh.source_lid = cpu_to_be16(ah->av.g_slid & 0x7f); | ||
| 1240 | if (mlx4_ib_ah_grh_present(ah)) { | ||
| 1241 | sqp->ud_header.grh.traffic_class = | 1328 | sqp->ud_header.grh.traffic_class = |
| 1242 | (be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 20) & 0xff; | 1329 | (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20) & 0xff; |
| 1243 | sqp->ud_header.grh.flow_label = | 1330 | sqp->ud_header.grh.flow_label = |
| 1244 | ah->av.sl_tclass_flowlabel & cpu_to_be32(0xfffff); | 1331 | ah->av.ib.sl_tclass_flowlabel & cpu_to_be32(0xfffff); |
| 1245 | sqp->ud_header.grh.hop_limit = ah->av.hop_limit; | 1332 | sqp->ud_header.grh.hop_limit = ah->av.ib.hop_limit; |
| 1246 | ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.port_pd) >> 24, | 1333 | ib_get_cached_gid(ib_dev, be32_to_cpu(ah->av.ib.port_pd) >> 24, |
| 1247 | ah->av.gid_index, &sqp->ud_header.grh.source_gid); | 1334 | ah->av.ib.gid_index, &sqp->ud_header.grh.source_gid); |
| 1248 | memcpy(sqp->ud_header.grh.destination_gid.raw, | 1335 | memcpy(sqp->ud_header.grh.destination_gid.raw, |
| 1249 | ah->av.dgid, 16); | 1336 | ah->av.ib.dgid, 16); |
| 1250 | } | 1337 | } |
| 1251 | 1338 | ||
| 1252 | mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | 1339 | mlx->flags &= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); |
| 1253 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | | 1340 | |
| 1254 | (sqp->ud_header.lrh.destination_lid == | 1341 | if (!is_eth) { |
| 1255 | IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | | 1342 | mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MLX4_WQE_MLX_VL15 : 0) | |
| 1256 | (sqp->ud_header.lrh.service_level << 8)); | 1343 | (sqp->ud_header.lrh.destination_lid == |
| 1257 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | 1344 | IB_LID_PERMISSIVE ? MLX4_WQE_MLX_SLR : 0) | |
| 1345 | (sqp->ud_header.lrh.service_level << 8)); | ||
| 1346 | mlx->rlid = sqp->ud_header.lrh.destination_lid; | ||
| 1347 | } | ||
| 1258 | 1348 | ||
| 1259 | switch (wr->opcode) { | 1349 | switch (wr->opcode) { |
| 1260 | case IB_WR_SEND: | 1350 | case IB_WR_SEND: |
| @@ -1270,9 +1360,29 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, | |||
| 1270 | return -EINVAL; | 1360 | return -EINVAL; |
| 1271 | } | 1361 | } |
| 1272 | 1362 | ||
| 1273 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | 1363 | if (is_eth) { |
| 1274 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) | 1364 | u8 *smac; |
| 1275 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | 1365 | |
| 1366 | memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); | ||
| 1367 | /* FIXME: cache smac value? */ | ||
| 1368 | smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; | ||
| 1369 | memcpy(sqp->ud_header.eth.smac_h, smac, 6); | ||
| 1370 | if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) | ||
| 1371 | mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); | ||
| 1372 | if (!is_vlan) { | ||
| 1373 | sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); | ||
| 1374 | } else { | ||
| 1375 | u16 pcp; | ||
| 1376 | |||
| 1377 | sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); | ||
| 1378 | pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13; | ||
| 1379 | sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); | ||
| 1380 | } | ||
| 1381 | } else { | ||
| 1382 | sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; | ||
| 1383 | if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) | ||
| 1384 | sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; | ||
| 1385 | } | ||
| 1276 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); | 1386 | sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); |
| 1277 | if (!sqp->qp.ibqp.qp_num) | 1387 | if (!sqp->qp.ibqp.qp_num) |
| 1278 | ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); | 1388 | ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey); |
| @@ -1429,11 +1539,14 @@ static void set_masked_atomic_seg(struct mlx4_wqe_masked_atomic_seg *aseg, | |||
| 1429 | } | 1539 | } |
| 1430 | 1540 | ||
| 1431 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, | 1541 | static void set_datagram_seg(struct mlx4_wqe_datagram_seg *dseg, |
| 1432 | struct ib_send_wr *wr) | 1542 | struct ib_send_wr *wr, __be16 *vlan) |
| 1433 | { | 1543 | { |
| 1434 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); | 1544 | memcpy(dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof (struct mlx4_av)); |
| 1435 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); | 1545 | dseg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); |
| 1436 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); | 1546 | dseg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); |
| 1547 | dseg->vlan = to_mah(wr->wr.ud.ah)->av.eth.vlan; | ||
| 1548 | memcpy(dseg->mac, to_mah(wr->wr.ud.ah)->av.eth.mac, 6); | ||
| 1549 | *vlan = dseg->vlan; | ||
| 1437 | } | 1550 | } |
| 1438 | 1551 | ||
| 1439 | static void set_mlx_icrc_seg(void *dseg) | 1552 | static void set_mlx_icrc_seg(void *dseg) |
| @@ -1536,6 +1649,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 1536 | __be32 uninitialized_var(lso_hdr_sz); | 1649 | __be32 uninitialized_var(lso_hdr_sz); |
| 1537 | __be32 blh; | 1650 | __be32 blh; |
| 1538 | int i; | 1651 | int i; |
| 1652 | __be16 vlan = cpu_to_be16(0xffff); | ||
| 1539 | 1653 | ||
| 1540 | spin_lock_irqsave(&qp->sq.lock, flags); | 1654 | spin_lock_irqsave(&qp->sq.lock, flags); |
| 1541 | 1655 | ||
| @@ -1639,7 +1753,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 1639 | break; | 1753 | break; |
| 1640 | 1754 | ||
| 1641 | case IB_QPT_UD: | 1755 | case IB_QPT_UD: |
| 1642 | set_datagram_seg(wqe, wr); | 1756 | set_datagram_seg(wqe, wr, &vlan); |
| 1643 | wqe += sizeof (struct mlx4_wqe_datagram_seg); | 1757 | wqe += sizeof (struct mlx4_wqe_datagram_seg); |
| 1644 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; | 1758 | size += sizeof (struct mlx4_wqe_datagram_seg) / 16; |
| 1645 | 1759 | ||
| @@ -1717,6 +1831,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 1717 | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | | 1831 | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | |
| 1718 | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; | 1832 | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; |
| 1719 | 1833 | ||
| 1834 | if (be16_to_cpu(vlan) < 0x1000) { | ||
| 1835 | ctrl->ins_vlan = 1 << 6; | ||
| 1836 | ctrl->vlan_tag = vlan; | ||
| 1837 | } | ||
| 1838 | |||
| 1720 | stamp = ind + qp->sq_spare_wqes; | 1839 | stamp = ind + qp->sq_spare_wqes; |
| 1721 | ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); | 1840 | ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); |
| 1722 | 1841 | ||
| @@ -1866,17 +1985,27 @@ static int to_ib_qp_access_flags(int mlx4_flags) | |||
| 1866 | return ib_flags; | 1985 | return ib_flags; |
| 1867 | } | 1986 | } |
| 1868 | 1987 | ||
| 1869 | static void to_ib_ah_attr(struct mlx4_dev *dev, struct ib_ah_attr *ib_ah_attr, | 1988 | static void to_ib_ah_attr(struct mlx4_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, |
| 1870 | struct mlx4_qp_path *path) | 1989 | struct mlx4_qp_path *path) |
| 1871 | { | 1990 | { |
| 1991 | struct mlx4_dev *dev = ibdev->dev; | ||
| 1992 | int is_eth; | ||
| 1993 | |||
| 1872 | memset(ib_ah_attr, 0, sizeof *ib_ah_attr); | 1994 | memset(ib_ah_attr, 0, sizeof *ib_ah_attr); |
| 1873 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; | 1995 | ib_ah_attr->port_num = path->sched_queue & 0x40 ? 2 : 1; |
| 1874 | 1996 | ||
| 1875 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) | 1997 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) |
| 1876 | return; | 1998 | return; |
| 1877 | 1999 | ||
| 2000 | is_eth = rdma_port_get_link_layer(&ibdev->ib_dev, ib_ah_attr->port_num) == | ||
| 2001 | IB_LINK_LAYER_ETHERNET; | ||
| 2002 | if (is_eth) | ||
| 2003 | ib_ah_attr->sl = ((path->sched_queue >> 3) & 0x7) | | ||
| 2004 | ((path->sched_queue & 4) << 1); | ||
| 2005 | else | ||
| 2006 | ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; | ||
| 2007 | |||
| 1878 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); | 2008 | ib_ah_attr->dlid = be16_to_cpu(path->rlid); |
| 1879 | ib_ah_attr->sl = (path->sched_queue >> 2) & 0xf; | ||
| 1880 | ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; | 2009 | ib_ah_attr->src_path_bits = path->grh_mylmc & 0x7f; |
| 1881 | ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; | 2010 | ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0; |
| 1882 | ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; | 2011 | ib_ah_attr->ah_flags = (path->grh_mylmc & (1 << 7)) ? IB_AH_GRH : 0; |
| @@ -1929,8 +2058,8 @@ int mlx4_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr | |||
| 1929 | to_ib_qp_access_flags(be32_to_cpu(context.params2)); | 2058 | to_ib_qp_access_flags(be32_to_cpu(context.params2)); |
| 1930 | 2059 | ||
| 1931 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | 2060 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { |
| 1932 | to_ib_ah_attr(dev->dev, &qp_attr->ah_attr, &context.pri_path); | 2061 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context.pri_path); |
| 1933 | to_ib_ah_attr(dev->dev, &qp_attr->alt_ah_attr, &context.alt_path); | 2062 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context.alt_path); |
| 1934 | qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; | 2063 | qp_attr->alt_pkey_index = context.alt_path.pkey_index & 0x7f; |
| 1935 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | 2064 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; |
| 1936 | } | 2065 | } |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index d2d172e6289c..a34c9d38e822 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
| @@ -1493,7 +1493,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, | |||
| 1493 | int err; | 1493 | int err; |
| 1494 | u16 pkey; | 1494 | u16 pkey; |
| 1495 | 1495 | ||
| 1496 | ib_ud_header_init(256, /* assume a MAD */ | 1496 | ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, |
| 1497 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, | 1497 | mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, |
| 1498 | &sqp->ud_header); | 1498 | &sqp->ud_header); |
| 1499 | 1499 | ||
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 61e0efd4ccfb..5c8d34cb6a23 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
| @@ -1424,7 +1424,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
| 1424 | { | 1424 | { |
| 1425 | 1425 | ||
| 1426 | int reset = 0; /* whether to send reset in case of err.. */ | 1426 | int reset = 0; /* whether to send reset in case of err.. */ |
| 1427 | int passive_state; | ||
| 1428 | atomic_inc(&cm_resets_recvd); | 1427 | atomic_inc(&cm_resets_recvd); |
| 1429 | nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." | 1428 | nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u." |
| 1430 | " refcnt=%d\n", cm_node, cm_node->state, | 1429 | " refcnt=%d\n", cm_node, cm_node->state, |
| @@ -1439,7 +1438,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
| 1439 | active_open_err(cm_node, skb, reset); | 1438 | active_open_err(cm_node, skb, reset); |
| 1440 | break; | 1439 | break; |
| 1441 | case NES_CM_STATE_MPAREQ_RCVD: | 1440 | case NES_CM_STATE_MPAREQ_RCVD: |
| 1442 | passive_state = atomic_add_return(1, &cm_node->passive_state); | 1441 | atomic_inc(&cm_node->passive_state); |
| 1443 | dev_kfree_skb_any(skb); | 1442 | dev_kfree_skb_any(skb); |
| 1444 | break; | 1443 | break; |
| 1445 | case NES_CM_STATE_ESTABLISHED: | 1444 | case NES_CM_STATE_ESTABLISHED: |
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 10560c796fd6..3892e2c0e95a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c | |||
| @@ -271,6 +271,7 @@ static int nes_netdev_stop(struct net_device *netdev) | |||
| 271 | 271 | ||
| 272 | if (netif_msg_ifdown(nesvnic)) | 272 | if (netif_msg_ifdown(nesvnic)) |
| 273 | printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name); | 273 | printk(KERN_INFO PFX "%s: disabling interface\n", netdev->name); |
| 274 | netif_carrier_off(netdev); | ||
| 274 | 275 | ||
| 275 | /* Disable network packets */ | 276 | /* Disable network packets */ |
| 276 | napi_disable(&nesvnic->napi); | 277 | napi_disable(&nesvnic->napi); |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 9046e6675686..2374efbdda6b 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
| @@ -476,9 +476,9 @@ static struct ib_fast_reg_page_list *nes_alloc_fast_reg_page_list( | |||
| 476 | } | 476 | } |
| 477 | nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, " | 477 | nes_debug(NES_DBG_MR, "nes_alloc_fast_reg_pbl: nes_frpl = %p, " |
| 478 | "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, " | 478 | "ibfrpl = %p, ibfrpl.page_list = %p, pbl.kva = %p, " |
| 479 | "pbl.paddr= %p\n", pnesfrpl, &pnesfrpl->ibfrpl, | 479 | "pbl.paddr = %llx\n", pnesfrpl, &pnesfrpl->ibfrpl, |
| 480 | pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva, | 480 | pnesfrpl->ibfrpl.page_list, pnesfrpl->nes_wqe_pbl.kva, |
| 481 | (void *)pnesfrpl->nes_wqe_pbl.paddr); | 481 | (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr); |
| 482 | 482 | ||
| 483 | return pifrpl; | 483 | return pifrpl; |
| 484 | } | 484 | } |
| @@ -584,7 +584,9 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr | |||
| 584 | props->lmc = 0; | 584 | props->lmc = 0; |
| 585 | props->sm_lid = 0; | 585 | props->sm_lid = 0; |
| 586 | props->sm_sl = 0; | 586 | props->sm_sl = 0; |
| 587 | if (nesvnic->linkup) | 587 | if (netif_queue_stopped(netdev)) |
| 588 | props->state = IB_PORT_DOWN; | ||
| 589 | else if (nesvnic->linkup) | ||
| 588 | props->state = IB_PORT_ACTIVE; | 590 | props->state = IB_PORT_ACTIVE; |
| 589 | else | 591 | else |
| 590 | props->state = IB_PORT_DOWN; | 592 | props->state = IB_PORT_DOWN; |
| @@ -3483,13 +3485,13 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, | |||
| 3483 | for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++) | 3485 | for (i = 0; i < ib_wr->wr.fast_reg.page_list_len; i++) |
| 3484 | dst_page_list[i] = cpu_to_le64(src_page_list[i]); | 3486 | dst_page_list[i] = cpu_to_le64(src_page_list[i]); |
| 3485 | 3487 | ||
| 3486 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %p, " | 3488 | nes_debug(NES_DBG_IW_TX, "SQ_FMR: iova_start: %llx, " |
| 3487 | "length: %d, rkey: %0x, pgl_paddr: %p, " | 3489 | "length: %d, rkey: %0x, pgl_paddr: %llx, " |
| 3488 | "page_list_len: %u, wqe_misc: %x\n", | 3490 | "page_list_len: %u, wqe_misc: %x\n", |
| 3489 | (void *)ib_wr->wr.fast_reg.iova_start, | 3491 | (unsigned long long) ib_wr->wr.fast_reg.iova_start, |
| 3490 | ib_wr->wr.fast_reg.length, | 3492 | ib_wr->wr.fast_reg.length, |
| 3491 | ib_wr->wr.fast_reg.rkey, | 3493 | ib_wr->wr.fast_reg.rkey, |
| 3492 | (void *)pnesfrpl->nes_wqe_pbl.paddr, | 3494 | (unsigned long long) pnesfrpl->nes_wqe_pbl.paddr, |
| 3493 | ib_wr->wr.fast_reg.page_list_len, | 3495 | ib_wr->wr.fast_reg.page_list_len, |
| 3494 | wqe_misc); | 3496 | wqe_misc); |
| 3495 | break; | 3497 | break; |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 61de0654820e..64c9e7d02d4a 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
| @@ -1406,7 +1406,7 @@ extern struct mutex qib_mutex; | |||
| 1406 | */ | 1406 | */ |
| 1407 | #define qib_early_err(dev, fmt, ...) \ | 1407 | #define qib_early_err(dev, fmt, ...) \ |
| 1408 | do { \ | 1408 | do { \ |
| 1409 | dev_info(dev, KERN_ERR QIB_DRV_NAME ": " fmt, ##__VA_ARGS__); \ | 1409 | dev_err(dev, fmt, ##__VA_ARGS__); \ |
| 1410 | } while (0) | 1410 | } while (0) |
| 1411 | 1411 | ||
| 1412 | #define qib_dev_err(dd, fmt, ...) \ | 1412 | #define qib_dev_err(dd, fmt, ...) \ |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6b11645edf35..cef5d676120a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
| @@ -1722,7 +1722,7 @@ static int qib_close(struct inode *in, struct file *fp) | |||
| 1722 | 1722 | ||
| 1723 | mutex_lock(&qib_mutex); | 1723 | mutex_lock(&qib_mutex); |
| 1724 | 1724 | ||
| 1725 | fd = (struct qib_filedata *) fp->private_data; | 1725 | fd = fp->private_data; |
| 1726 | fp->private_data = NULL; | 1726 | fp->private_data = NULL; |
| 1727 | rcd = fd->rcd; | 1727 | rcd = fd->rcd; |
| 1728 | if (!rcd) { | 1728 | if (!rcd) { |
| @@ -1808,7 +1808,7 @@ static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo) | |||
| 1808 | struct qib_ctxtdata *rcd = ctxt_fp(fp); | 1808 | struct qib_ctxtdata *rcd = ctxt_fp(fp); |
| 1809 | struct qib_filedata *fd; | 1809 | struct qib_filedata *fd; |
| 1810 | 1810 | ||
| 1811 | fd = (struct qib_filedata *) fp->private_data; | 1811 | fd = fp->private_data; |
| 1812 | 1812 | ||
| 1813 | info.num_active = qib_count_active_units(); | 1813 | info.num_active = qib_count_active_units(); |
| 1814 | info.unit = rcd->dd->unit; | 1814 | info.unit = rcd->dd->unit; |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index f1d16d3a01f6..f3b503936043 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
| @@ -1243,6 +1243,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, | |||
| 1243 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " | 1243 | qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot " |
| 1244 | "work if CONFIG_PCI_MSI is not enabled\n", | 1244 | "work if CONFIG_PCI_MSI is not enabled\n", |
| 1245 | ent->device); | 1245 | ent->device); |
| 1246 | dd = ERR_PTR(-ENODEV); | ||
| 1246 | #endif | 1247 | #endif |
| 1247 | break; | 1248 | break; |
| 1248 | 1249 | ||
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 7fa6e5592630..48b6674cbc49 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c | |||
| @@ -103,16 +103,20 @@ int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | 103 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
| 104 | } else | 104 | } else |
| 105 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | 105 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
| 106 | if (ret) | 106 | if (ret) { |
| 107 | qib_early_err(&pdev->dev, | 107 | qib_early_err(&pdev->dev, |
| 108 | "Unable to set DMA consistent mask: %d\n", ret); | 108 | "Unable to set DMA consistent mask: %d\n", ret); |
| 109 | goto bail; | ||
| 110 | } | ||
| 109 | 111 | ||
| 110 | pci_set_master(pdev); | 112 | pci_set_master(pdev); |
| 111 | ret = pci_enable_pcie_error_reporting(pdev); | 113 | ret = pci_enable_pcie_error_reporting(pdev); |
| 112 | if (ret) | 114 | if (ret) { |
| 113 | qib_early_err(&pdev->dev, | 115 | qib_early_err(&pdev->dev, |
| 114 | "Unable to enable pcie error reporting: %d\n", | 116 | "Unable to enable pcie error reporting: %d\n", |
| 115 | ret); | 117 | ret); |
| 118 | ret = 0; | ||
| 119 | } | ||
| 116 | goto done; | 120 | goto done; |
| 117 | 121 | ||
| 118 | bail: | 122 | bail: |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index a0931119bd78..955fb7157793 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
| @@ -2068,7 +2068,10 @@ send_last: | |||
| 2068 | goto nack_op_err; | 2068 | goto nack_op_err; |
| 2069 | if (!ret) | 2069 | if (!ret) |
| 2070 | goto rnr_nak; | 2070 | goto rnr_nak; |
| 2071 | goto send_last_imm; | 2071 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
| 2072 | hdrsize += 4; | ||
| 2073 | wc.wc_flags = IB_WC_WITH_IMM; | ||
| 2074 | goto send_last; | ||
| 2072 | 2075 | ||
| 2073 | case OP(RDMA_READ_REQUEST): { | 2076 | case OP(RDMA_READ_REQUEST): { |
| 2074 | struct qib_ack_entry *e; | 2077 | struct qib_ack_entry *e; |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index b9c8b6346c1b..32ccf3c824ca 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
| @@ -457,8 +457,10 @@ rdma_first: | |||
| 457 | } | 457 | } |
| 458 | if (opcode == OP(RDMA_WRITE_ONLY)) | 458 | if (opcode == OP(RDMA_WRITE_ONLY)) |
| 459 | goto rdma_last; | 459 | goto rdma_last; |
| 460 | else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) | 460 | else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) { |
| 461 | wc.ex.imm_data = ohdr->u.rc.imm_data; | ||
| 461 | goto rdma_last_imm; | 462 | goto rdma_last_imm; |
| 463 | } | ||
| 462 | /* FALLTHROUGH */ | 464 | /* FALLTHROUGH */ |
| 463 | case OP(RDMA_WRITE_MIDDLE): | 465 | case OP(RDMA_WRITE_MIDDLE): |
| 464 | /* Check for invalid length PMTU or posted rwqe len. */ | 466 | /* Check for invalid length PMTU or posted rwqe len. */ |
| @@ -471,8 +473,8 @@ rdma_first: | |||
| 471 | break; | 473 | break; |
| 472 | 474 | ||
| 473 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): | 475 | case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): |
| 474 | rdma_last_imm: | ||
| 475 | wc.ex.imm_data = ohdr->u.imm_data; | 476 | wc.ex.imm_data = ohdr->u.imm_data; |
| 477 | rdma_last_imm: | ||
| 476 | hdrsize += 4; | 478 | hdrsize += 4; |
| 477 | wc.wc_flags = IB_WC_WITH_IMM; | 479 | wc.wc_flags = IB_WC_WITH_IMM; |
| 478 | 480 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ec6b4fbe25e4..dfa71903d6e4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -223,6 +223,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 223 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; | 223 | unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; |
| 224 | struct sk_buff *skb; | 224 | struct sk_buff *skb; |
| 225 | u64 mapping[IPOIB_UD_RX_SG]; | 225 | u64 mapping[IPOIB_UD_RX_SG]; |
| 226 | union ib_gid *dgid; | ||
| 226 | 227 | ||
| 227 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", | 228 | ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", |
| 228 | wr_id, wc->status); | 229 | wr_id, wc->status); |
| @@ -271,6 +272,16 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 271 | ipoib_ud_dma_unmap_rx(priv, mapping); | 272 | ipoib_ud_dma_unmap_rx(priv, mapping); |
| 272 | ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); | 273 | ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); |
| 273 | 274 | ||
| 275 | /* First byte of dgid signals multicast when 0xff */ | ||
| 276 | dgid = &((struct ib_grh *)skb->data)->dgid; | ||
| 277 | |||
| 278 | if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) | ||
| 279 | skb->pkt_type = PACKET_HOST; | ||
| 280 | else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) | ||
| 281 | skb->pkt_type = PACKET_BROADCAST; | ||
| 282 | else | ||
| 283 | skb->pkt_type = PACKET_MULTICAST; | ||
| 284 | |||
| 274 | skb_pull(skb, IB_GRH_BYTES); | 285 | skb_pull(skb, IB_GRH_BYTES); |
| 275 | 286 | ||
| 276 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; | 287 | skb->protocol = ((struct ipoib_header *) skb->data)->proto; |
| @@ -281,9 +292,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
| 281 | dev->stats.rx_bytes += skb->len; | 292 | dev->stats.rx_bytes += skb->len; |
| 282 | 293 | ||
| 283 | skb->dev = dev; | 294 | skb->dev = dev; |
| 284 | /* XXX get correct PACKET_ type here */ | ||
| 285 | skb->pkt_type = PACKET_HOST; | ||
| 286 | |||
| 287 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) | 295 | if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok)) |
| 288 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 296 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 289 | 297 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index b4b22576f12a..4f258c88c1b4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1240,6 +1240,7 @@ static struct net_device *ipoib_add_port(const char *format, | |||
| 1240 | goto alloc_mem_failed; | 1240 | goto alloc_mem_failed; |
| 1241 | 1241 | ||
| 1242 | SET_NETDEV_DEV(priv->dev, hca->dma_device); | 1242 | SET_NETDEV_DEV(priv->dev, hca->dma_device); |
| 1243 | priv->dev->dev_id = port - 1; | ||
| 1243 | 1244 | ||
| 1244 | if (!ib_query_port(hca, port, &attr)) | 1245 | if (!ib_query_port(hca, port, &attr)) |
| 1245 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); | 1246 | priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); |
| @@ -1362,6 +1363,8 @@ static void ipoib_add_one(struct ib_device *device) | |||
| 1362 | } | 1363 | } |
| 1363 | 1364 | ||
| 1364 | for (p = s; p <= e; ++p) { | 1365 | for (p = s; p <= e; ++p) { |
| 1366 | if (rdma_port_get_link_layer(device, p) != IB_LINK_LAYER_INFINIBAND) | ||
| 1367 | continue; | ||
| 1365 | dev = ipoib_add_port("ib%d", device, p); | 1368 | dev = ipoib_add_port("ib%d", device, p); |
| 1366 | if (!IS_ERR(dev)) { | 1369 | if (!IS_ERR(dev)) { |
| 1367 | priv = netdev_priv(dev); | 1370 | priv = netdev_priv(dev); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 7f8f16bad753..cfc1d65c4577 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target) | |||
| 291 | 291 | ||
| 292 | for (i = 0; i < SRP_RQ_SIZE; ++i) | 292 | for (i = 0; i < SRP_RQ_SIZE; ++i) |
| 293 | srp_free_iu(target->srp_host, target->rx_ring[i]); | 293 | srp_free_iu(target->srp_host, target->rx_ring[i]); |
| 294 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) | 294 | for (i = 0; i < SRP_SQ_SIZE; ++i) |
| 295 | srp_free_iu(target->srp_host, target->tx_ring[i]); | 295 | srp_free_iu(target->srp_host, target->tx_ring[i]); |
| 296 | } | 296 | } |
| 297 | 297 | ||
| @@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
| 811 | return len; | 811 | return len; |
| 812 | } | 812 | } |
| 813 | 813 | ||
| 814 | /* | ||
| 815 | * Must be called with target->scsi_host->host_lock held to protect | ||
| 816 | * req_lim and tx_head. Lock cannot be dropped between call here and | ||
| 817 | * call to __srp_post_send(). | ||
| 818 | * | ||
| 819 | * Note: | ||
| 820 | * An upper limit for the number of allocated information units for each | ||
| 821 | * request type is: | ||
| 822 | * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues | ||
| 823 | * more than Scsi_Host.can_queue requests. | ||
| 824 | * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. | ||
| 825 | * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than | ||
| 826 | * one unanswered SRP request to an initiator. | ||
| 827 | */ | ||
| 828 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | ||
| 829 | enum srp_iu_type iu_type) | ||
| 830 | { | ||
| 831 | s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; | ||
| 832 | struct srp_iu *iu; | ||
| 833 | |||
| 834 | srp_send_completion(target->send_cq, target); | ||
| 835 | |||
| 836 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | ||
| 837 | return NULL; | ||
| 838 | |||
| 839 | /* Initiator responses to target requests do not consume credits */ | ||
| 840 | if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { | ||
| 841 | ++target->zero_req_lim; | ||
| 842 | return NULL; | ||
| 843 | } | ||
| 844 | |||
| 845 | iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; | ||
| 846 | iu->type = iu_type; | ||
| 847 | return iu; | ||
| 848 | } | ||
| 849 | |||
| 850 | /* | ||
| 851 | * Must be called with target->scsi_host->host_lock held to protect | ||
| 852 | * req_lim and tx_head. | ||
| 853 | */ | ||
| 854 | static int __srp_post_send(struct srp_target_port *target, | ||
| 855 | struct srp_iu *iu, int len) | ||
| 856 | { | ||
| 857 | struct ib_sge list; | ||
| 858 | struct ib_send_wr wr, *bad_wr; | ||
| 859 | int ret = 0; | ||
| 860 | |||
| 861 | list.addr = iu->dma; | ||
| 862 | list.length = len; | ||
| 863 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
| 864 | |||
| 865 | wr.next = NULL; | ||
| 866 | wr.wr_id = target->tx_head & SRP_SQ_MASK; | ||
| 867 | wr.sg_list = &list; | ||
| 868 | wr.num_sge = 1; | ||
| 869 | wr.opcode = IB_WR_SEND; | ||
| 870 | wr.send_flags = IB_SEND_SIGNALED; | ||
| 871 | |||
| 872 | ret = ib_post_send(target->qp, &wr, &bad_wr); | ||
| 873 | |||
| 874 | if (!ret) { | ||
| 875 | ++target->tx_head; | ||
| 876 | if (iu->type != SRP_IU_RSP) | ||
| 877 | --target->req_lim; | ||
| 878 | } | ||
| 879 | |||
| 880 | return ret; | ||
| 881 | } | ||
| 882 | |||
| 814 | static int srp_post_recv(struct srp_target_port *target) | 883 | static int srp_post_recv(struct srp_target_port *target) |
| 815 | { | 884 | { |
| 816 | unsigned long flags; | 885 | unsigned long flags; |
| @@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target) | |||
| 822 | 891 | ||
| 823 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 892 | spin_lock_irqsave(target->scsi_host->host_lock, flags); |
| 824 | 893 | ||
| 825 | next = target->rx_head & (SRP_RQ_SIZE - 1); | 894 | next = target->rx_head & SRP_RQ_MASK; |
| 826 | wr.wr_id = next; | 895 | wr.wr_id = next; |
| 827 | iu = target->rx_ring[next]; | 896 | iu = target->rx_ring[next]; |
| 828 | 897 | ||
| @@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
| 896 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 965 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); |
| 897 | } | 966 | } |
| 898 | 967 | ||
| 968 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, | ||
| 969 | void *rsp, int len) | ||
| 970 | { | ||
| 971 | struct ib_device *dev; | ||
| 972 | unsigned long flags; | ||
| 973 | struct srp_iu *iu; | ||
| 974 | int err = 1; | ||
| 975 | |||
| 976 | dev = target->srp_host->srp_dev->dev; | ||
| 977 | |||
| 978 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
| 979 | target->req_lim += req_delta; | ||
| 980 | |||
| 981 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); | ||
| 982 | if (!iu) { | ||
| 983 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
| 984 | "no IU available to send response\n"); | ||
| 985 | goto out; | ||
| 986 | } | ||
| 987 | |||
| 988 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); | ||
| 989 | memcpy(iu->buf, rsp, len); | ||
| 990 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); | ||
| 991 | |||
| 992 | err = __srp_post_send(target, iu, len); | ||
| 993 | if (err) | ||
| 994 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
| 995 | "unable to post response: %d\n", err); | ||
| 996 | |||
| 997 | out: | ||
| 998 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
| 999 | return err; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | static void srp_process_cred_req(struct srp_target_port *target, | ||
| 1003 | struct srp_cred_req *req) | ||
| 1004 | { | ||
| 1005 | struct srp_cred_rsp rsp = { | ||
| 1006 | .opcode = SRP_CRED_RSP, | ||
| 1007 | .tag = req->tag, | ||
| 1008 | }; | ||
| 1009 | s32 delta = be32_to_cpu(req->req_lim_delta); | ||
| 1010 | |||
| 1011 | if (srp_response_common(target, delta, &rsp, sizeof rsp)) | ||
| 1012 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
| 1013 | "problems processing SRP_CRED_REQ\n"); | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | static void srp_process_aer_req(struct srp_target_port *target, | ||
| 1017 | struct srp_aer_req *req) | ||
| 1018 | { | ||
| 1019 | struct srp_aer_rsp rsp = { | ||
| 1020 | .opcode = SRP_AER_RSP, | ||
| 1021 | .tag = req->tag, | ||
| 1022 | }; | ||
| 1023 | s32 delta = be32_to_cpu(req->req_lim_delta); | ||
| 1024 | |||
| 1025 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
| 1026 | "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun)); | ||
| 1027 | |||
| 1028 | if (srp_response_common(target, delta, &rsp, sizeof rsp)) | ||
| 1029 | shost_printk(KERN_ERR, target->scsi_host, PFX | ||
| 1030 | "problems processing SRP_AER_REQ\n"); | ||
| 1031 | } | ||
| 1032 | |||
| 899 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | 1033 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
| 900 | { | 1034 | { |
| 901 | struct ib_device *dev; | 1035 | struct ib_device *dev; |
| @@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
| 923 | srp_process_rsp(target, iu->buf); | 1057 | srp_process_rsp(target, iu->buf); |
| 924 | break; | 1058 | break; |
| 925 | 1059 | ||
| 1060 | case SRP_CRED_REQ: | ||
| 1061 | srp_process_cred_req(target, iu->buf); | ||
| 1062 | break; | ||
| 1063 | |||
| 1064 | case SRP_AER_REQ: | ||
| 1065 | srp_process_aer_req(target, iu->buf); | ||
| 1066 | break; | ||
| 1067 | |||
| 926 | case SRP_T_LOGOUT: | 1068 | case SRP_T_LOGOUT: |
| 927 | /* XXX Handle target logout */ | 1069 | /* XXX Handle target logout */ |
| 928 | shost_printk(KERN_WARNING, target->scsi_host, | 1070 | shost_printk(KERN_WARNING, target->scsi_host, |
| @@ -981,61 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
| 981 | } | 1123 | } |
| 982 | } | 1124 | } |
| 983 | 1125 | ||
| 984 | /* | ||
| 985 | * Must be called with target->scsi_host->host_lock held to protect | ||
| 986 | * req_lim and tx_head. Lock cannot be dropped between call here and | ||
| 987 | * call to __srp_post_send(). | ||
| 988 | */ | ||
| 989 | static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | ||
| 990 | enum srp_request_type req_type) | ||
| 991 | { | ||
| 992 | s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; | ||
| 993 | |||
| 994 | srp_send_completion(target->send_cq, target); | ||
| 995 | |||
| 996 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | ||
| 997 | return NULL; | ||
| 998 | |||
| 999 | if (target->req_lim < min) { | ||
| 1000 | ++target->zero_req_lim; | ||
| 1001 | return NULL; | ||
| 1002 | } | ||
| 1003 | |||
| 1004 | return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | /* | ||
| 1008 | * Must be called with target->scsi_host->host_lock held to protect | ||
| 1009 | * req_lim and tx_head. | ||
| 1010 | */ | ||
| 1011 | static int __srp_post_send(struct srp_target_port *target, | ||
| 1012 | struct srp_iu *iu, int len) | ||
| 1013 | { | ||
| 1014 | struct ib_sge list; | ||
| 1015 | struct ib_send_wr wr, *bad_wr; | ||
| 1016 | int ret = 0; | ||
| 1017 | |||
| 1018 | list.addr = iu->dma; | ||
| 1019 | list.length = len; | ||
| 1020 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
| 1021 | |||
| 1022 | wr.next = NULL; | ||
| 1023 | wr.wr_id = target->tx_head & SRP_SQ_SIZE; | ||
| 1024 | wr.sg_list = &list; | ||
| 1025 | wr.num_sge = 1; | ||
| 1026 | wr.opcode = IB_WR_SEND; | ||
| 1027 | wr.send_flags = IB_SEND_SIGNALED; | ||
| 1028 | |||
| 1029 | ret = ib_post_send(target->qp, &wr, &bad_wr); | ||
| 1030 | |||
| 1031 | if (!ret) { | ||
| 1032 | ++target->tx_head; | ||
| 1033 | --target->req_lim; | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | return ret; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | static int srp_queuecommand(struct scsi_cmnd *scmnd, | 1126 | static int srp_queuecommand(struct scsi_cmnd *scmnd, |
| 1040 | void (*done)(struct scsi_cmnd *)) | 1127 | void (*done)(struct scsi_cmnd *)) |
| 1041 | { | 1128 | { |
| @@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
| 1056 | return 0; | 1143 | return 0; |
| 1057 | } | 1144 | } |
| 1058 | 1145 | ||
| 1059 | iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); | 1146 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
| 1060 | if (!iu) | 1147 | if (!iu) |
| 1061 | goto err; | 1148 | goto err; |
| 1062 | 1149 | ||
| @@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
| 1064 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, | 1151 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, |
| 1065 | DMA_TO_DEVICE); | 1152 | DMA_TO_DEVICE); |
| 1066 | 1153 | ||
| 1067 | req = list_entry(target->free_reqs.next, struct srp_request, list); | 1154 | req = list_first_entry(&target->free_reqs, struct srp_request, list); |
| 1068 | 1155 | ||
| 1069 | scmnd->scsi_done = done; | 1156 | scmnd->scsi_done = done; |
| 1070 | scmnd->result = 0; | 1157 | scmnd->result = 0; |
| @@ -1121,7 +1208,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target) | |||
| 1121 | goto err; | 1208 | goto err; |
| 1122 | } | 1209 | } |
| 1123 | 1210 | ||
| 1124 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | 1211 | for (i = 0; i < SRP_SQ_SIZE; ++i) { |
| 1125 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, | 1212 | target->tx_ring[i] = srp_alloc_iu(target->srp_host, |
| 1126 | srp_max_iu_len, | 1213 | srp_max_iu_len, |
| 1127 | GFP_KERNEL, DMA_TO_DEVICE); | 1214 | GFP_KERNEL, DMA_TO_DEVICE); |
| @@ -1137,7 +1224,7 @@ err: | |||
| 1137 | target->rx_ring[i] = NULL; | 1224 | target->rx_ring[i] = NULL; |
| 1138 | } | 1225 | } |
| 1139 | 1226 | ||
| 1140 | for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { | 1227 | for (i = 0; i < SRP_SQ_SIZE; ++i) { |
| 1141 | srp_free_iu(target->srp_host, target->tx_ring[i]); | 1228 | srp_free_iu(target->srp_host, target->tx_ring[i]); |
| 1142 | target->tx_ring[i] = NULL; | 1229 | target->tx_ring[i] = NULL; |
| 1143 | } | 1230 | } |
| @@ -1252,8 +1339,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
| 1252 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); | 1339 | target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); |
| 1253 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); | 1340 | target->req_lim = be32_to_cpu(rsp->req_lim_delta); |
| 1254 | 1341 | ||
| 1255 | target->scsi_host->can_queue = min(target->req_lim, | 1342 | /* |
| 1256 | target->scsi_host->can_queue); | 1343 | * Reserve credits for task management so we don't |
| 1344 | * bounce requests back to the SCSI mid-layer. | ||
| 1345 | */ | ||
| 1346 | target->scsi_host->can_queue | ||
| 1347 | = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE, | ||
| 1348 | target->scsi_host->can_queue); | ||
| 1257 | } else { | 1349 | } else { |
| 1258 | shost_printk(KERN_WARNING, target->scsi_host, | 1350 | shost_printk(KERN_WARNING, target->scsi_host, |
| 1259 | PFX "Unhandled RSP opcode %#x\n", opcode); | 1351 | PFX "Unhandled RSP opcode %#x\n", opcode); |
| @@ -1350,6 +1442,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
| 1350 | static int srp_send_tsk_mgmt(struct srp_target_port *target, | 1442 | static int srp_send_tsk_mgmt(struct srp_target_port *target, |
| 1351 | struct srp_request *req, u8 func) | 1443 | struct srp_request *req, u8 func) |
| 1352 | { | 1444 | { |
| 1445 | struct ib_device *dev = target->srp_host->srp_dev->dev; | ||
| 1353 | struct srp_iu *iu; | 1446 | struct srp_iu *iu; |
| 1354 | struct srp_tsk_mgmt *tsk_mgmt; | 1447 | struct srp_tsk_mgmt *tsk_mgmt; |
| 1355 | 1448 | ||
| @@ -1363,10 +1456,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
| 1363 | 1456 | ||
| 1364 | init_completion(&req->done); | 1457 | init_completion(&req->done); |
| 1365 | 1458 | ||
| 1366 | iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); | 1459 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); |
| 1367 | if (!iu) | 1460 | if (!iu) |
| 1368 | goto out; | 1461 | goto out; |
| 1369 | 1462 | ||
| 1463 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, | ||
| 1464 | DMA_TO_DEVICE); | ||
| 1370 | tsk_mgmt = iu->buf; | 1465 | tsk_mgmt = iu->buf; |
| 1371 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); | 1466 | memset(tsk_mgmt, 0, sizeof *tsk_mgmt); |
| 1372 | 1467 | ||
| @@ -1376,6 +1471,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
| 1376 | tsk_mgmt->tsk_mgmt_func = func; | 1471 | tsk_mgmt->tsk_mgmt_func = func; |
| 1377 | tsk_mgmt->task_tag = req->index; | 1472 | tsk_mgmt->task_tag = req->index; |
| 1378 | 1473 | ||
| 1474 | ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, | ||
| 1475 | DMA_TO_DEVICE); | ||
| 1379 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1476 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) |
| 1380 | goto out; | 1477 | goto out; |
| 1381 | 1478 | ||
| @@ -1626,9 +1723,9 @@ static struct scsi_host_template srp_template = { | |||
| 1626 | .eh_abort_handler = srp_abort, | 1723 | .eh_abort_handler = srp_abort, |
| 1627 | .eh_device_reset_handler = srp_reset_device, | 1724 | .eh_device_reset_handler = srp_reset_device, |
| 1628 | .eh_host_reset_handler = srp_reset_host, | 1725 | .eh_host_reset_handler = srp_reset_host, |
| 1629 | .can_queue = SRP_SQ_SIZE, | 1726 | .can_queue = SRP_CMD_SQ_SIZE, |
| 1630 | .this_id = -1, | 1727 | .this_id = -1, |
| 1631 | .cmd_per_lun = SRP_SQ_SIZE, | 1728 | .cmd_per_lun = SRP_CMD_SQ_SIZE, |
| 1632 | .use_clustering = ENABLE_CLUSTERING, | 1729 | .use_clustering = ENABLE_CLUSTERING, |
| 1633 | .shost_attrs = srp_host_attrs | 1730 | .shost_attrs = srp_host_attrs |
| 1634 | }; | 1731 | }; |
| @@ -1813,7 +1910,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) | |||
| 1813 | printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); | 1910 | printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); |
| 1814 | goto out; | 1911 | goto out; |
| 1815 | } | 1912 | } |
| 1816 | target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); | 1913 | target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); |
| 1817 | break; | 1914 | break; |
| 1818 | 1915 | ||
| 1819 | case SRP_OPT_IO_CLASS: | 1916 | case SRP_OPT_IO_CLASS: |
| @@ -1891,7 +1988,7 @@ static ssize_t srp_create_target(struct device *dev, | |||
| 1891 | 1988 | ||
| 1892 | INIT_LIST_HEAD(&target->free_reqs); | 1989 | INIT_LIST_HEAD(&target->free_reqs); |
| 1893 | INIT_LIST_HEAD(&target->req_queue); | 1990 | INIT_LIST_HEAD(&target->req_queue); |
| 1894 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | 1991 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
| 1895 | target->req_ring[i].index = i; | 1992 | target->req_ring[i].index = i; |
| 1896 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); | 1993 | list_add_tail(&target->req_ring[i].list, &target->free_reqs); |
| 1897 | } | 1994 | } |
| @@ -2159,6 +2256,9 @@ static int __init srp_init_module(void) | |||
| 2159 | { | 2256 | { |
| 2160 | int ret; | 2257 | int ret; |
| 2161 | 2258 | ||
| 2259 | BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE); | ||
| 2260 | BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE); | ||
| 2261 | |||
| 2162 | if (srp_sg_tablesize > 255) { | 2262 | if (srp_sg_tablesize > 255) { |
| 2163 | printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); | 2263 | printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); |
| 2164 | srp_sg_tablesize = 255; | 2264 | srp_sg_tablesize = 255; |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 5a80eac6fdaa..ed0dce9e479f 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
| @@ -59,7 +59,14 @@ enum { | |||
| 59 | 59 | ||
| 60 | SRP_RQ_SHIFT = 6, | 60 | SRP_RQ_SHIFT = 6, |
| 61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, | 61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, |
| 62 | SRP_SQ_SIZE = SRP_RQ_SIZE - 1, | 62 | SRP_RQ_MASK = SRP_RQ_SIZE - 1, |
| 63 | |||
| 64 | SRP_SQ_SIZE = SRP_RQ_SIZE, | ||
| 65 | SRP_SQ_MASK = SRP_SQ_SIZE - 1, | ||
| 66 | SRP_RSP_SQ_SIZE = 1, | ||
| 67 | SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE, | ||
| 68 | SRP_TSK_MGMT_SQ_SIZE = 1, | ||
| 69 | SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE, | ||
| 63 | 70 | ||
| 64 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), | 71 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), |
| 65 | 72 | ||
| @@ -75,9 +82,10 @@ enum srp_target_state { | |||
| 75 | SRP_TARGET_REMOVED | 82 | SRP_TARGET_REMOVED |
| 76 | }; | 83 | }; |
| 77 | 84 | ||
| 78 | enum srp_request_type { | 85 | enum srp_iu_type { |
| 79 | SRP_REQ_NORMAL, | 86 | SRP_IU_CMD, |
| 80 | SRP_REQ_TASK_MGMT, | 87 | SRP_IU_TSK_MGMT, |
| 88 | SRP_IU_RSP, | ||
| 81 | }; | 89 | }; |
| 82 | 90 | ||
| 83 | struct srp_device { | 91 | struct srp_device { |
| @@ -144,11 +152,11 @@ struct srp_target_port { | |||
| 144 | 152 | ||
| 145 | unsigned tx_head; | 153 | unsigned tx_head; |
| 146 | unsigned tx_tail; | 154 | unsigned tx_tail; |
| 147 | struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; | 155 | struct srp_iu *tx_ring[SRP_SQ_SIZE]; |
| 148 | 156 | ||
| 149 | struct list_head free_reqs; | 157 | struct list_head free_reqs; |
| 150 | struct list_head req_queue; | 158 | struct list_head req_queue; |
| 151 | struct srp_request req_ring[SRP_SQ_SIZE]; | 159 | struct srp_request req_ring[SRP_CMD_SQ_SIZE]; |
| 152 | 160 | ||
| 153 | struct work_struct work; | 161 | struct work_struct work; |
| 154 | 162 | ||
| @@ -164,6 +172,7 @@ struct srp_iu { | |||
| 164 | void *buf; | 172 | void *buf; |
| 165 | size_t size; | 173 | size_t size; |
| 166 | enum dma_data_direction direction; | 174 | enum dma_data_direction direction; |
| 175 | enum srp_iu_type type; | ||
| 167 | }; | 176 | }; |
| 168 | 177 | ||
| 169 | #endif /* IB_SRP_H */ | 178 | #endif /* IB_SRP_H */ |
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 97934f1ec53a..b2df3eeb1598 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
| @@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) | |||
| 124 | return 0; | 124 | return 0; |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) | ||
| 128 | { | ||
| 129 | struct mlx4_en_dev *endev = ctx; | ||
| 130 | |||
| 131 | return endev->pndev[port]; | ||
| 132 | } | ||
| 133 | |||
| 127 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, | 134 | static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr, |
| 128 | enum mlx4_dev_event event, int port) | 135 | enum mlx4_dev_event event, int port) |
| 129 | { | 136 | { |
| @@ -282,9 +289,11 @@ err_free_res: | |||
| 282 | } | 289 | } |
| 283 | 290 | ||
| 284 | static struct mlx4_interface mlx4_en_interface = { | 291 | static struct mlx4_interface mlx4_en_interface = { |
| 285 | .add = mlx4_en_add, | 292 | .add = mlx4_en_add, |
| 286 | .remove = mlx4_en_remove, | 293 | .remove = mlx4_en_remove, |
| 287 | .event = mlx4_en_event, | 294 | .event = mlx4_en_event, |
| 295 | .get_dev = mlx4_en_get_netdev, | ||
| 296 | .protocol = MLX4_PROTOCOL_EN, | ||
| 288 | }; | 297 | }; |
| 289 | 298 | ||
| 290 | static int __init mlx4_en_init(void) | 299 | static int __init mlx4_en_init(void) |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index a0d8a26f5a02..9a87c4f3bbbd 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
| @@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
| 69 | struct mlx4_en_priv *priv = netdev_priv(dev); | 69 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 70 | struct mlx4_en_dev *mdev = priv->mdev; | 70 | struct mlx4_en_dev *mdev = priv->mdev; |
| 71 | int err; | 71 | int err; |
| 72 | int idx; | ||
| 72 | 73 | ||
| 73 | if (!priv->vlgrp) | 74 | if (!priv->vlgrp) |
| 74 | return; | 75 | return; |
| @@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
| 83 | if (err) | 84 | if (err) |
| 84 | en_err(priv, "Failed configuring VLAN filter\n"); | 85 | en_err(priv, "Failed configuring VLAN filter\n"); |
| 85 | } | 86 | } |
| 87 | if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) | ||
| 88 | en_err(priv, "failed adding vlan %d\n", vid); | ||
| 86 | mutex_unlock(&mdev->state_lock); | 89 | mutex_unlock(&mdev->state_lock); |
| 90 | |||
| 87 | } | 91 | } |
| 88 | 92 | ||
| 89 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | 93 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) |
| @@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
| 91 | struct mlx4_en_priv *priv = netdev_priv(dev); | 95 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 92 | struct mlx4_en_dev *mdev = priv->mdev; | 96 | struct mlx4_en_dev *mdev = priv->mdev; |
| 93 | int err; | 97 | int err; |
| 98 | int idx; | ||
| 94 | 99 | ||
| 95 | if (!priv->vlgrp) | 100 | if (!priv->vlgrp) |
| 96 | return; | 101 | return; |
| @@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
| 101 | 106 | ||
| 102 | /* Remove VID from port VLAN filter */ | 107 | /* Remove VID from port VLAN filter */ |
| 103 | mutex_lock(&mdev->state_lock); | 108 | mutex_lock(&mdev->state_lock); |
| 109 | if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) | ||
| 110 | mlx4_unregister_vlan(mdev->dev, priv->port, idx); | ||
| 111 | else | ||
| 112 | en_err(priv, "could not find vid %d in cache\n", vid); | ||
| 113 | |||
| 104 | if (mdev->device_up && priv->port_up) { | 114 | if (mdev->device_up && priv->port_up) { |
| 105 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 115 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
| 106 | if (err) | 116 | if (err) |
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c index a29abe845d2e..a24988799e01 100644 --- a/drivers/net/mlx4/en_port.c +++ b/drivers/net/mlx4/en_port.c | |||
| @@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, | |||
| 127 | memset(context, 0, sizeof *context); | 127 | memset(context, 0, sizeof *context); |
| 128 | 128 | ||
| 129 | context->base_qpn = cpu_to_be32(base_qpn); | 129 | context->base_qpn = cpu_to_be32(base_qpn); |
| 130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn); | 130 | context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn); |
| 131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn); | 131 | context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn); |
| 132 | context->intra_no_vlan = 0; | 132 | context->intra_no_vlan = 0; |
| 133 | context->no_vlan = MLX4_NO_VLAN_IDX; | 133 | context->no_vlan = MLX4_NO_VLAN_IDX; |
| 134 | context->intra_vlan_miss = 0; | 134 | context->intra_vlan_miss = 0; |
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h index e6477f12beb5..935489120400 100644 --- a/drivers/net/mlx4/en_port.h +++ b/drivers/net/mlx4/en_port.h | |||
| @@ -36,7 +36,8 @@ | |||
| 36 | 36 | ||
| 37 | 37 | ||
| 38 | #define SET_PORT_GEN_ALL_VALID 0x7 | 38 | #define SET_PORT_GEN_ALL_VALID 0x7 |
| 39 | #define SET_PORT_PROMISC_SHIFT 31 | 39 | #define SET_PORT_PROMISC_EN_SHIFT 31 |
| 40 | #define SET_PORT_PROMISC_MODE_SHIFT 30 | ||
| 40 | 41 | ||
| 41 | enum { | 42 | enum { |
| 42 | MLX4_CMD_SET_VLAN_FLTR = 0x47, | 43 | MLX4_CMD_SET_VLAN_FLTR = 0x47, |
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index 04f42ae1eda0..5b3593d3cd74 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c | |||
| @@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) | |||
| 98 | [20] = "Address vector port checking support", | 98 | [20] = "Address vector port checking support", |
| 99 | [21] = "UD multicast support", | 99 | [21] = "UD multicast support", |
| 100 | [24] = "Demand paging support", | 100 | [24] = "Demand paging support", |
| 101 | [25] = "Router support" | 101 | [25] = "Router support", |
| 102 | [30] = "IBoE support" | ||
| 102 | }; | 103 | }; |
| 103 | int i; | 104 | int i; |
| 104 | 105 | ||
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 555067802751..73c94fcdfddf 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c | |||
| @@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev) | |||
| 161 | 161 | ||
| 162 | mutex_unlock(&intf_mutex); | 162 | mutex_unlock(&intf_mutex); |
| 163 | } | 163 | } |
| 164 | |||
| 165 | void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port) | ||
| 166 | { | ||
| 167 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 168 | struct mlx4_device_context *dev_ctx; | ||
| 169 | unsigned long flags; | ||
| 170 | void *result = NULL; | ||
| 171 | |||
| 172 | spin_lock_irqsave(&priv->ctx_lock, flags); | ||
| 173 | |||
| 174 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | ||
| 175 | if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) { | ||
| 176 | result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port); | ||
| 177 | break; | ||
| 178 | } | ||
| 179 | |||
| 180 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | ||
| 181 | |||
| 182 | return result; | ||
| 183 | } | ||
| 184 | EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); | ||
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index 449210994ee9..dab5eafb8946 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
| @@ -463,6 +463,7 @@ struct mlx4_en_priv { | |||
| 463 | char *mc_addrs; | 463 | char *mc_addrs; |
| 464 | int mc_addrs_cnt; | 464 | int mc_addrs_cnt; |
| 465 | struct mlx4_en_stat_out_mbox hw_stats; | 465 | struct mlx4_en_stat_out_mbox hw_stats; |
| 466 | int vids[128]; | ||
| 466 | }; | 467 | }; |
| 467 | 468 | ||
| 468 | 469 | ||
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 606aa58afdea..56371ef328ef 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c | |||
| @@ -182,6 +182,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, | |||
| 182 | return err; | 182 | return err; |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx) | ||
| 186 | { | ||
| 187 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | ||
| 188 | int i; | ||
| 189 | |||
| 190 | for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) { | ||
| 191 | if (table->refs[i] && | ||
| 192 | (vid == (MLX4_VLAN_MASK & | ||
| 193 | be32_to_cpu(table->entries[i])))) { | ||
| 194 | /* VLAN already registered, increase reference count */ | ||
| 195 | *idx = i; | ||
| 196 | return 0; | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | return -ENOENT; | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan); | ||
| 203 | |||
| 185 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) | 204 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) |
| 186 | { | 205 | { |
| 187 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; | 206 | struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 2731266e73a7..ddbe81261d5d 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
| @@ -141,6 +141,7 @@ enum { | |||
| 141 | MLX4_SET_PORT_MAC_TABLE = 0x2, | 141 | MLX4_SET_PORT_MAC_TABLE = 0x2, |
| 142 | MLX4_SET_PORT_VLAN_TABLE = 0x3, | 142 | MLX4_SET_PORT_VLAN_TABLE = 0x3, |
| 143 | MLX4_SET_PORT_PRIO_MAP = 0x4, | 143 | MLX4_SET_PORT_PRIO_MAP = 0x4, |
| 144 | MLX4_SET_PORT_GID_TABLE = 0x5, | ||
| 144 | }; | 145 | }; |
| 145 | 146 | ||
| 146 | struct mlx4_dev; | 147 | struct mlx4_dev; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ada69389fb91..897ec71a0ca5 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -67,7 +67,8 @@ enum { | |||
| 67 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, | 67 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, |
| 68 | MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19, | 68 | MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19, |
| 69 | MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20, | 69 | MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20, |
| 70 | MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21 | 70 | MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21, |
| 71 | MLX4_DEV_CAP_FLAG_IBOE = 1 << 30 | ||
| 71 | }; | 72 | }; |
| 72 | 73 | ||
| 73 | enum { | 74 | enum { |
| @@ -377,6 +378,27 @@ struct mlx4_av { | |||
| 377 | u8 dgid[16]; | 378 | u8 dgid[16]; |
| 378 | }; | 379 | }; |
| 379 | 380 | ||
| 381 | struct mlx4_eth_av { | ||
| 382 | __be32 port_pd; | ||
| 383 | u8 reserved1; | ||
| 384 | u8 smac_idx; | ||
| 385 | u16 reserved2; | ||
| 386 | u8 reserved3; | ||
| 387 | u8 gid_index; | ||
| 388 | u8 stat_rate; | ||
| 389 | u8 hop_limit; | ||
| 390 | __be32 sl_tclass_flowlabel; | ||
| 391 | u8 dgid[16]; | ||
| 392 | u32 reserved4[2]; | ||
| 393 | __be16 vlan; | ||
| 394 | u8 mac[6]; | ||
| 395 | }; | ||
| 396 | |||
| 397 | union mlx4_ext_av { | ||
| 398 | struct mlx4_av ib; | ||
| 399 | struct mlx4_eth_av eth; | ||
| 400 | }; | ||
| 401 | |||
| 380 | struct mlx4_dev { | 402 | struct mlx4_dev { |
| 381 | struct pci_dev *pdev; | 403 | struct pci_dev *pdev; |
| 382 | unsigned long flags; | 404 | unsigned long flags; |
| @@ -405,6 +427,12 @@ struct mlx4_init_port_param { | |||
| 405 | if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ | 427 | if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \ |
| 406 | ~(dev)->caps.port_mask) & 1 << ((port) - 1)) | 428 | ~(dev)->caps.port_mask) & 1 << ((port) - 1)) |
| 407 | 429 | ||
| 430 | #define mlx4_foreach_ib_transport_port(port, dev) \ | ||
| 431 | for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ | ||
| 432 | if (((dev)->caps.port_mask & 1 << ((port) - 1)) || \ | ||
| 433 | ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) | ||
| 434 | |||
| 435 | |||
| 408 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | 436 | int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, |
| 409 | struct mlx4_buf *buf); | 437 | struct mlx4_buf *buf); |
| 410 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); | 438 | void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); |
| @@ -472,6 +500,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); | |||
| 472 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); | 500 | int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index); |
| 473 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); | 501 | void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index); |
| 474 | 502 | ||
| 503 | int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); | ||
| 475 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); | 504 | int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); |
| 476 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); | 505 | void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); |
| 477 | 506 | ||
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 53c5fdb6eac4..f407cd4bfb34 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h | |||
| @@ -44,15 +44,24 @@ enum mlx4_dev_event { | |||
| 44 | MLX4_DEV_EVENT_PORT_REINIT, | 44 | MLX4_DEV_EVENT_PORT_REINIT, |
| 45 | }; | 45 | }; |
| 46 | 46 | ||
| 47 | enum mlx4_protocol { | ||
| 48 | MLX4_PROTOCOL_IB, | ||
| 49 | MLX4_PROTOCOL_EN, | ||
| 50 | }; | ||
| 51 | |||
| 47 | struct mlx4_interface { | 52 | struct mlx4_interface { |
| 48 | void * (*add) (struct mlx4_dev *dev); | 53 | void * (*add) (struct mlx4_dev *dev); |
| 49 | void (*remove)(struct mlx4_dev *dev, void *context); | 54 | void (*remove)(struct mlx4_dev *dev, void *context); |
| 50 | void (*event) (struct mlx4_dev *dev, void *context, | 55 | void (*event) (struct mlx4_dev *dev, void *context, |
| 51 | enum mlx4_dev_event event, int port); | 56 | enum mlx4_dev_event event, int port); |
| 57 | void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port); | ||
| 52 | struct list_head list; | 58 | struct list_head list; |
| 59 | enum mlx4_protocol protocol; | ||
| 53 | }; | 60 | }; |
| 54 | 61 | ||
| 55 | int mlx4_register_interface(struct mlx4_interface *intf); | 62 | int mlx4_register_interface(struct mlx4_interface *intf); |
| 56 | void mlx4_unregister_interface(struct mlx4_interface *intf); | 63 | void mlx4_unregister_interface(struct mlx4_interface *intf); |
| 57 | 64 | ||
| 65 | void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); | ||
| 66 | |||
| 58 | #endif /* MLX4_DRIVER_H */ | 67 | #endif /* MLX4_DRIVER_H */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 7abe64326f72..0eeb2a1a867c 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -109,10 +109,11 @@ struct mlx4_qp_path { | |||
| 109 | __be32 tclass_flowlabel; | 109 | __be32 tclass_flowlabel; |
| 110 | u8 rgid[16]; | 110 | u8 rgid[16]; |
| 111 | u8 sched_queue; | 111 | u8 sched_queue; |
| 112 | u8 snooper_flags; | 112 | u8 vlan_index; |
| 113 | u8 reserved3[2]; | 113 | u8 reserved3[2]; |
| 114 | u8 counter_index; | 114 | u8 counter_index; |
| 115 | u8 reserved4[7]; | 115 | u8 reserved4; |
| 116 | u8 dmac[6]; | ||
| 116 | }; | 117 | }; |
| 117 | 118 | ||
| 118 | struct mlx4_qp_context { | 119 | struct mlx4_qp_context { |
| @@ -166,6 +167,7 @@ enum { | |||
| 166 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, | 167 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, |
| 167 | MLX4_WQE_CTRL_INS_VLAN = 1 << 6, | 168 | MLX4_WQE_CTRL_INS_VLAN = 1 << 6, |
| 168 | MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, | 169 | MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, |
| 170 | MLX4_WQE_CTRL_FORCE_LOOPBACK = 1 << 0, | ||
| 169 | }; | 171 | }; |
| 170 | 172 | ||
| 171 | struct mlx4_wqe_ctrl_seg { | 173 | struct mlx4_wqe_ctrl_seg { |
| @@ -219,7 +221,8 @@ struct mlx4_wqe_datagram_seg { | |||
| 219 | __be32 av[8]; | 221 | __be32 av[8]; |
| 220 | __be32 dqpn; | 222 | __be32 dqpn; |
| 221 | __be32 qkey; | 223 | __be32 qkey; |
| 222 | __be32 reservd[2]; | 224 | __be16 vlan; |
| 225 | u8 mac[6]; | ||
| 223 | }; | 226 | }; |
| 224 | 227 | ||
| 225 | struct mlx4_wqe_lso_seg { | 228 | struct mlx4_wqe_lso_seg { |
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index fa0d52b8e622..b5fc9f39122b 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h | |||
| @@ -39,7 +39,9 @@ | |||
| 39 | #include <linux/if_arp.h> | 39 | #include <linux/if_arp.h> |
| 40 | #include <linux/netdevice.h> | 40 | #include <linux/netdevice.h> |
| 41 | #include <linux/socket.h> | 41 | #include <linux/socket.h> |
| 42 | #include <linux/if_vlan.h> | ||
| 42 | #include <rdma/ib_verbs.h> | 43 | #include <rdma/ib_verbs.h> |
| 44 | #include <rdma/ib_pack.h> | ||
| 43 | 45 | ||
| 44 | struct rdma_addr_client { | 46 | struct rdma_addr_client { |
| 45 | atomic_t refcount; | 47 | atomic_t refcount; |
| @@ -63,6 +65,7 @@ struct rdma_dev_addr { | |||
| 63 | unsigned char broadcast[MAX_ADDR_LEN]; | 65 | unsigned char broadcast[MAX_ADDR_LEN]; |
| 64 | unsigned short dev_type; | 66 | unsigned short dev_type; |
| 65 | int bound_dev_if; | 67 | int bound_dev_if; |
| 68 | enum rdma_transport_type transport; | ||
| 66 | }; | 69 | }; |
| 67 | 70 | ||
| 68 | /** | 71 | /** |
| @@ -127,9 +130,51 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr) | |||
| 127 | return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; | 130 | return dev_addr->dev_type == ARPHRD_INFINIBAND ? 4 : 0; |
| 128 | } | 131 | } |
| 129 | 132 | ||
| 133 | static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid) | ||
| 134 | { | ||
| 135 | memset(gid->raw, 0, 16); | ||
| 136 | *((__be32 *) gid->raw) = cpu_to_be32(0xfe800000); | ||
| 137 | if (vid < 0x1000) { | ||
| 138 | gid->raw[12] = vid & 0xff; | ||
| 139 | gid->raw[11] = vid >> 8; | ||
| 140 | } else { | ||
| 141 | gid->raw[12] = 0xfe; | ||
| 142 | gid->raw[11] = 0xff; | ||
| 143 | } | ||
| 144 | memcpy(gid->raw + 13, mac + 3, 3); | ||
| 145 | memcpy(gid->raw + 8, mac, 3); | ||
| 146 | gid->raw[8] ^= 2; | ||
| 147 | } | ||
| 148 | |||
| 149 | static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) | ||
| 150 | { | ||
| 151 | return dev->priv_flags & IFF_802_1Q_VLAN ? | ||
| 152 | vlan_dev_vlan_id(dev) : 0xffff; | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, | ||
| 156 | union ib_gid *gid) | ||
| 157 | { | ||
| 158 | struct net_device *dev; | ||
| 159 | u16 vid = 0xffff; | ||
| 160 | |||
| 161 | dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); | ||
| 162 | if (dev) { | ||
| 163 | vid = rdma_vlan_dev_vlan_id(dev); | ||
| 164 | dev_put(dev); | ||
| 165 | } | ||
| 166 | |||
| 167 | iboe_mac_vlan_to_ll(gid, dev_addr->src_dev_addr, vid); | ||
| 168 | } | ||
| 169 | |||
| 130 | static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) | 170 | static inline void rdma_addr_get_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) |
| 131 | { | 171 | { |
| 132 | memcpy(gid, dev_addr->src_dev_addr + rdma_addr_gid_offset(dev_addr), sizeof *gid); | 172 | if (dev_addr->transport == RDMA_TRANSPORT_IB && |
| 173 | dev_addr->dev_type != ARPHRD_INFINIBAND) | ||
| 174 | iboe_addr_get_sgid(dev_addr, gid); | ||
| 175 | else | ||
| 176 | memcpy(gid, dev_addr->src_dev_addr + | ||
| 177 | rdma_addr_gid_offset(dev_addr), sizeof *gid); | ||
| 133 | } | 178 | } |
| 134 | 179 | ||
| 135 | static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) | 180 | static inline void rdma_addr_set_sgid(struct rdma_dev_addr *dev_addr, union ib_gid *gid) |
| @@ -147,4 +192,91 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g | |||
| 147 | memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); | 192 | memcpy(dev_addr->dst_dev_addr + rdma_addr_gid_offset(dev_addr), gid, sizeof *gid); |
| 148 | } | 193 | } |
| 149 | 194 | ||
| 195 | static inline enum ib_mtu iboe_get_mtu(int mtu) | ||
| 196 | { | ||
| 197 | /* | ||
| 198 | * reduce IB headers from effective IBoE MTU. 28 stands for | ||
| 199 | * atomic header which is the biggest possible header after BTH | ||
| 200 | */ | ||
| 201 | mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28; | ||
| 202 | |||
| 203 | if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) | ||
| 204 | return IB_MTU_4096; | ||
| 205 | else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048)) | ||
| 206 | return IB_MTU_2048; | ||
| 207 | else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) | ||
| 208 | return IB_MTU_1024; | ||
| 209 | else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512)) | ||
| 210 | return IB_MTU_512; | ||
| 211 | else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256)) | ||
| 212 | return IB_MTU_256; | ||
| 213 | else | ||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | |||
| 217 | static inline int iboe_get_rate(struct net_device *dev) | ||
| 218 | { | ||
| 219 | struct ethtool_cmd cmd; | ||
| 220 | |||
| 221 | if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings || | ||
| 222 | dev->ethtool_ops->get_settings(dev, &cmd)) | ||
| 223 | return IB_RATE_PORT_CURRENT; | ||
| 224 | |||
| 225 | if (cmd.speed >= 40000) | ||
| 226 | return IB_RATE_40_GBPS; | ||
| 227 | else if (cmd.speed >= 30000) | ||
| 228 | return IB_RATE_30_GBPS; | ||
| 229 | else if (cmd.speed >= 20000) | ||
| 230 | return IB_RATE_20_GBPS; | ||
| 231 | else if (cmd.speed >= 10000) | ||
| 232 | return IB_RATE_10_GBPS; | ||
| 233 | else | ||
| 234 | return IB_RATE_PORT_CURRENT; | ||
| 235 | } | ||
| 236 | |||
| 237 | static inline int rdma_link_local_addr(struct in6_addr *addr) | ||
| 238 | { | ||
| 239 | if (addr->s6_addr32[0] == htonl(0xfe800000) && | ||
| 240 | addr->s6_addr32[1] == 0) | ||
| 241 | return 1; | ||
| 242 | |||
| 243 | return 0; | ||
| 244 | } | ||
| 245 | |||
| 246 | static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac) | ||
| 247 | { | ||
| 248 | memcpy(mac, &addr->s6_addr[8], 3); | ||
| 249 | memcpy(mac + 3, &addr->s6_addr[13], 3); | ||
| 250 | mac[0] ^= 2; | ||
| 251 | } | ||
| 252 | |||
| 253 | static inline int rdma_is_multicast_addr(struct in6_addr *addr) | ||
| 254 | { | ||
| 255 | return addr->s6_addr[0] == 0xff; | ||
| 256 | } | ||
| 257 | |||
| 258 | static inline void rdma_get_mcast_mac(struct in6_addr *addr, u8 *mac) | ||
| 259 | { | ||
| 260 | int i; | ||
| 261 | |||
| 262 | mac[0] = 0x33; | ||
| 263 | mac[1] = 0x33; | ||
| 264 | for (i = 2; i < 6; ++i) | ||
| 265 | mac[i] = addr->s6_addr[i + 10]; | ||
| 266 | } | ||
| 267 | |||
| 268 | static inline u16 rdma_get_vlan_id(union ib_gid *dgid) | ||
| 269 | { | ||
| 270 | u16 vid; | ||
| 271 | |||
| 272 | vid = dgid->raw[11] << 8 | dgid->raw[12]; | ||
| 273 | return vid < 0x1000 ? vid : 0xffff; | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev) | ||
| 277 | { | ||
| 278 | return dev->priv_flags & IFF_802_1Q_VLAN ? | ||
| 279 | vlan_dev_real_dev(dev) : 0; | ||
| 280 | } | ||
| 281 | |||
| 150 | #endif /* IB_ADDR_H */ | 282 | #endif /* IB_ADDR_H */ |
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index cbb50f4da3dd..b37fe3b10a9d 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | 37 | ||
| 38 | enum { | 38 | enum { |
| 39 | IB_LRH_BYTES = 8, | 39 | IB_LRH_BYTES = 8, |
| 40 | IB_ETH_BYTES = 14, | ||
| 41 | IB_VLAN_BYTES = 4, | ||
| 40 | IB_GRH_BYTES = 40, | 42 | IB_GRH_BYTES = 40, |
| 41 | IB_BTH_BYTES = 12, | 43 | IB_BTH_BYTES = 12, |
| 42 | IB_DETH_BYTES = 8 | 44 | IB_DETH_BYTES = 8 |
| @@ -210,14 +212,32 @@ struct ib_unpacked_deth { | |||
| 210 | __be32 source_qpn; | 212 | __be32 source_qpn; |
| 211 | }; | 213 | }; |
| 212 | 214 | ||
| 215 | struct ib_unpacked_eth { | ||
| 216 | u8 dmac_h[4]; | ||
| 217 | u8 dmac_l[2]; | ||
| 218 | u8 smac_h[2]; | ||
| 219 | u8 smac_l[4]; | ||
| 220 | __be16 type; | ||
| 221 | }; | ||
| 222 | |||
| 223 | struct ib_unpacked_vlan { | ||
| 224 | __be16 tag; | ||
| 225 | __be16 type; | ||
| 226 | }; | ||
| 227 | |||
| 213 | struct ib_ud_header { | 228 | struct ib_ud_header { |
| 229 | int lrh_present; | ||
| 214 | struct ib_unpacked_lrh lrh; | 230 | struct ib_unpacked_lrh lrh; |
| 215 | int grh_present; | 231 | int eth_present; |
| 216 | struct ib_unpacked_grh grh; | 232 | struct ib_unpacked_eth eth; |
| 217 | struct ib_unpacked_bth bth; | 233 | int vlan_present; |
| 234 | struct ib_unpacked_vlan vlan; | ||
| 235 | int grh_present; | ||
| 236 | struct ib_unpacked_grh grh; | ||
| 237 | struct ib_unpacked_bth bth; | ||
| 218 | struct ib_unpacked_deth deth; | 238 | struct ib_unpacked_deth deth; |
| 219 | int immediate_present; | 239 | int immediate_present; |
| 220 | __be32 immediate_data; | 240 | __be32 immediate_data; |
| 221 | }; | 241 | }; |
| 222 | 242 | ||
| 223 | void ib_pack(const struct ib_field *desc, | 243 | void ib_pack(const struct ib_field *desc, |
| @@ -230,9 +250,12 @@ void ib_unpack(const struct ib_field *desc, | |||
| 230 | void *buf, | 250 | void *buf, |
| 231 | void *structure); | 251 | void *structure); |
| 232 | 252 | ||
| 233 | void ib_ud_header_init(int payload_bytes, | 253 | void ib_ud_header_init(int payload_bytes, |
| 234 | int grh_present, | 254 | int lrh_present, |
| 235 | int immediate_present, | 255 | int eth_present, |
| 256 | int vlan_present, | ||
| 257 | int grh_present, | ||
| 258 | int immediate_present, | ||
| 236 | struct ib_ud_header *header); | 259 | struct ib_ud_header *header); |
| 237 | 260 | ||
| 238 | int ib_ud_header_pack(struct ib_ud_header *header, | 261 | int ib_ud_header_pack(struct ib_ud_header *header, |
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h index a17f77106149..fe5b05177a2c 100644 --- a/include/rdma/ib_user_verbs.h +++ b/include/rdma/ib_user_verbs.h | |||
| @@ -205,7 +205,8 @@ struct ib_uverbs_query_port_resp { | |||
| 205 | __u8 active_width; | 205 | __u8 active_width; |
| 206 | __u8 active_speed; | 206 | __u8 active_speed; |
| 207 | __u8 phys_state; | 207 | __u8 phys_state; |
| 208 | __u8 reserved[3]; | 208 | __u8 link_layer; |
| 209 | __u8 reserved[2]; | ||
| 209 | }; | 210 | }; |
| 210 | 211 | ||
| 211 | struct ib_uverbs_alloc_pd { | 212 | struct ib_uverbs_alloc_pd { |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 857b3b9cf120..e04c4888d1fd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -75,6 +75,12 @@ enum rdma_transport_type { | |||
| 75 | enum rdma_transport_type | 75 | enum rdma_transport_type |
| 76 | rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; | 76 | rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__; |
| 77 | 77 | ||
| 78 | enum rdma_link_layer { | ||
| 79 | IB_LINK_LAYER_UNSPECIFIED, | ||
| 80 | IB_LINK_LAYER_INFINIBAND, | ||
| 81 | IB_LINK_LAYER_ETHERNET, | ||
| 82 | }; | ||
| 83 | |||
| 78 | enum ib_device_cap_flags { | 84 | enum ib_device_cap_flags { |
| 79 | IB_DEVICE_RESIZE_MAX_WR = 1, | 85 | IB_DEVICE_RESIZE_MAX_WR = 1, |
| 80 | IB_DEVICE_BAD_PKEY_CNTR = (1<<1), | 86 | IB_DEVICE_BAD_PKEY_CNTR = (1<<1), |
| @@ -1010,6 +1016,8 @@ struct ib_device { | |||
| 1010 | int (*query_port)(struct ib_device *device, | 1016 | int (*query_port)(struct ib_device *device, |
| 1011 | u8 port_num, | 1017 | u8 port_num, |
| 1012 | struct ib_port_attr *port_attr); | 1018 | struct ib_port_attr *port_attr); |
| 1019 | enum rdma_link_layer (*get_link_layer)(struct ib_device *device, | ||
| 1020 | u8 port_num); | ||
| 1013 | int (*query_gid)(struct ib_device *device, | 1021 | int (*query_gid)(struct ib_device *device, |
| 1014 | u8 port_num, int index, | 1022 | u8 port_num, int index, |
| 1015 | union ib_gid *gid); | 1023 | union ib_gid *gid); |
| @@ -1222,6 +1230,9 @@ int ib_query_device(struct ib_device *device, | |||
| 1222 | int ib_query_port(struct ib_device *device, | 1230 | int ib_query_port(struct ib_device *device, |
| 1223 | u8 port_num, struct ib_port_attr *port_attr); | 1231 | u8 port_num, struct ib_port_attr *port_attr); |
| 1224 | 1232 | ||
| 1233 | enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, | ||
| 1234 | u8 port_num); | ||
| 1235 | |||
| 1225 | int ib_query_gid(struct ib_device *device, | 1236 | int ib_query_gid(struct ib_device *device, |
| 1226 | u8 port_num, int index, union ib_gid *gid); | 1237 | u8 port_num, int index, union ib_gid *gid); |
| 1227 | 1238 | ||
diff --git a/include/scsi/srp.h b/include/scsi/srp.h index ad178fa78f66..1ae84db4c9fb 100644 --- a/include/scsi/srp.h +++ b/include/scsi/srp.h | |||
| @@ -239,4 +239,42 @@ struct srp_rsp { | |||
| 239 | u8 data[0]; | 239 | u8 data[0]; |
| 240 | } __attribute__((packed)); | 240 | } __attribute__((packed)); |
| 241 | 241 | ||
| 242 | struct srp_cred_req { | ||
| 243 | u8 opcode; | ||
| 244 | u8 sol_not; | ||
| 245 | u8 reserved[2]; | ||
| 246 | __be32 req_lim_delta; | ||
| 247 | u64 tag; | ||
| 248 | }; | ||
| 249 | |||
| 250 | struct srp_cred_rsp { | ||
| 251 | u8 opcode; | ||
| 252 | u8 reserved[7]; | ||
| 253 | u64 tag; | ||
| 254 | }; | ||
| 255 | |||
| 256 | /* | ||
| 257 | * The SRP spec defines the fixed portion of the AER_REQ structure to be | ||
| 258 | * 36 bytes, so it needs to be packed to avoid having it padded to 40 bytes | ||
| 259 | * on 64-bit architectures. | ||
| 260 | */ | ||
| 261 | struct srp_aer_req { | ||
| 262 | u8 opcode; | ||
| 263 | u8 sol_not; | ||
| 264 | u8 reserved[2]; | ||
| 265 | __be32 req_lim_delta; | ||
| 266 | u64 tag; | ||
| 267 | u32 reserved2; | ||
| 268 | __be64 lun; | ||
| 269 | __be32 sense_data_len; | ||
| 270 | u32 reserved3; | ||
| 271 | u8 sense_data[0]; | ||
| 272 | } __attribute__((packed)); | ||
| 273 | |||
| 274 | struct srp_aer_rsp { | ||
| 275 | u8 opcode; | ||
| 276 | u8 reserved[7]; | ||
| 277 | u64 tag; | ||
| 278 | }; | ||
| 279 | |||
| 242 | #endif /* SCSI_SRP_H */ | 280 | #endif /* SCSI_SRP_H */ |
