diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-07 21:14:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-08-07 21:14:07 -0400 |
commit | 273b2578392bbf6e5c47a8a3d1ee461ce6fc7182 (patch) | |
tree | 1b00bd21574b1f8db084be4f682d7251a3a0ff3e | |
parent | 8e43e12d638f732fa32600c324711f4be8fe0b1d (diff) | |
parent | 06a91a02e9b249695f964bb59c8b02152c21e90c (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/mad: Test ib_create_send_mad() return with IS_ERR(), not == NULL
IB/mlx4: Allow 4K messages for UD QPs
mlx4_core: Add ethernet fields to CQE struct
IB/ipath: Fix printk format warnings
RDMA/cxgb3: Fix deadlock initializing iw_cxgb3 device
RDMA/cxgb3: Fix up MW access rights
RDMA/cxgb3: Fix QP capabilities
RDMA/cma: Remove padding arrays by using struct sockaddr_storage
IB/ipath: Use unsigned long for irq flags
IPoIB/cm: Set correct SG list in ipoib_cm_init_rx_wr()
-rw-r--r-- | drivers/infiniband/core/cma.c | 37 | ||||
-rw-r--r-- | drivers/infiniband/core/mad_rmpp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_hal.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 28 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_qp.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_iba7220.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_intr.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/cq.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 2 | ||||
-rw-r--r-- | include/linux/mlx4/cq.h | 36 | ||||
-rw-r--r-- | include/rdma/rdma_cm.h | 8 |
16 files changed, 108 insertions, 122 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index e980ff3335db..d951896ff7fc 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -155,9 +155,7 @@ struct cma_multicast { | |||
155 | } multicast; | 155 | } multicast; |
156 | struct list_head list; | 156 | struct list_head list; |
157 | void *context; | 157 | void *context; |
158 | struct sockaddr addr; | 158 | struct sockaddr_storage addr; |
159 | u8 pad[sizeof(struct sockaddr_in6) - | ||
160 | sizeof(struct sockaddr)]; | ||
161 | }; | 159 | }; |
162 | 160 | ||
163 | struct cma_work { | 161 | struct cma_work { |
@@ -786,8 +784,8 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv, | |||
786 | cma_cancel_route(id_priv); | 784 | cma_cancel_route(id_priv); |
787 | break; | 785 | break; |
788 | case CMA_LISTEN: | 786 | case CMA_LISTEN: |
789 | if (cma_any_addr(&id_priv->id.route.addr.src_addr) && | 787 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) |
790 | !id_priv->cma_dev) | 788 | && !id_priv->cma_dev) |
791 | cma_cancel_listens(id_priv); | 789 | cma_cancel_listens(id_priv); |
792 | break; | 790 | break; |
793 | default: | 791 | default: |
@@ -1026,7 +1024,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1026 | rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; | 1024 | rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; |
1027 | 1025 | ||
1028 | ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | 1026 | ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
1029 | ret = rdma_translate_ip(&id->route.addr.src_addr, | 1027 | ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, |
1030 | &id->route.addr.dev_addr); | 1028 | &id->route.addr.dev_addr); |
1031 | if (ret) | 1029 | if (ret) |
1032 | goto destroy_id; | 1030 | goto destroy_id; |
@@ -1064,7 +1062,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1064 | cma_save_net_info(&id->route.addr, &listen_id->route.addr, | 1062 | cma_save_net_info(&id->route.addr, &listen_id->route.addr, |
1065 | ip_ver, port, src, dst); | 1063 | ip_ver, port, src, dst); |
1066 | 1064 | ||
1067 | ret = rdma_translate_ip(&id->route.addr.src_addr, | 1065 | ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, |
1068 | &id->route.addr.dev_addr); | 1066 | &id->route.addr.dev_addr); |
1069 | if (ret) | 1067 | if (ret) |
1070 | goto err; | 1068 | goto err; |
@@ -1377,7 +1375,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) | |||
1377 | if (IS_ERR(id_priv->cm_id.ib)) | 1375 | if (IS_ERR(id_priv->cm_id.ib)) |
1378 | return PTR_ERR(id_priv->cm_id.ib); | 1376 | return PTR_ERR(id_priv->cm_id.ib); |
1379 | 1377 | ||
1380 | addr = &id_priv->id.route.addr.src_addr; | 1378 | addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; |
1381 | svc_id = cma_get_service_id(id_priv->id.ps, addr); | 1379 | svc_id = cma_get_service_id(id_priv->id.ps, addr); |
1382 | if (cma_any_addr(addr)) | 1380 | if (cma_any_addr(addr)) |
1383 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); | 1381 | ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); |
@@ -1443,7 +1441,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1443 | 1441 | ||
1444 | dev_id_priv->state = CMA_ADDR_BOUND; | 1442 | dev_id_priv->state = CMA_ADDR_BOUND; |
1445 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, | 1443 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, |
1446 | ip_addr_size(&id_priv->id.route.addr.src_addr)); | 1444 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
1447 | 1445 | ||
1448 | cma_attach_to_dev(dev_id_priv, cma_dev); | 1446 | cma_attach_to_dev(dev_id_priv, cma_dev); |
1449 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); | 1447 | list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); |
@@ -1563,13 +1561,14 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
1563 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); | 1561 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); |
1564 | path_rec.numb_path = 1; | 1562 | path_rec.numb_path = 1; |
1565 | path_rec.reversible = 1; | 1563 | path_rec.reversible = 1; |
1566 | path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); | 1564 | path_rec.service_id = cma_get_service_id(id_priv->id.ps, |
1565 | (struct sockaddr *) &addr->dst_addr); | ||
1567 | 1566 | ||
1568 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | 1567 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | |
1569 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | 1568 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | |
1570 | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; | 1569 | IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; |
1571 | 1570 | ||
1572 | if (addr->src_addr.sa_family == AF_INET) { | 1571 | if (addr->src_addr.ss_family == AF_INET) { |
1573 | path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); | 1572 | path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); |
1574 | comp_mask |= IB_SA_PATH_REC_QOS_CLASS; | 1573 | comp_mask |= IB_SA_PATH_REC_QOS_CLASS; |
1575 | } else { | 1574 | } else { |
@@ -1848,7 +1847,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
1848 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); | 1847 | ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); |
1849 | ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); | 1848 | ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); |
1850 | 1849 | ||
1851 | if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { | 1850 | if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { |
1852 | src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; | 1851 | src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; |
1853 | dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; | 1852 | dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; |
1854 | src_in->sin_family = dst_in->sin_family; | 1853 | src_in->sin_family = dst_in->sin_family; |
@@ -1897,7 +1896,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
1897 | if (cma_any_addr(dst_addr)) | 1896 | if (cma_any_addr(dst_addr)) |
1898 | ret = cma_resolve_loopback(id_priv); | 1897 | ret = cma_resolve_loopback(id_priv); |
1899 | else | 1898 | else |
1900 | ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, | 1899 | ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, |
1901 | dst_addr, &id->route.addr.dev_addr, | 1900 | dst_addr, &id->route.addr.dev_addr, |
1902 | timeout_ms, addr_handler, id_priv); | 1901 | timeout_ms, addr_handler, id_priv); |
1903 | if (ret) | 1902 | if (ret) |
@@ -2021,11 +2020,11 @@ static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) | |||
2021 | * We don't support binding to any address if anyone is bound to | 2020 | * We don't support binding to any address if anyone is bound to |
2022 | * a specific address on the same port. | 2021 | * a specific address on the same port. |
2023 | */ | 2022 | */ |
2024 | if (cma_any_addr(&id_priv->id.route.addr.src_addr)) | 2023 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) |
2025 | return -EADDRNOTAVAIL; | 2024 | return -EADDRNOTAVAIL; |
2026 | 2025 | ||
2027 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { | 2026 | hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { |
2028 | if (cma_any_addr(&cur_id->id.route.addr.src_addr)) | 2027 | if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) |
2029 | return -EADDRNOTAVAIL; | 2028 | return -EADDRNOTAVAIL; |
2030 | 2029 | ||
2031 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; | 2030 | cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; |
@@ -2060,7 +2059,7 @@ static int cma_get_port(struct rdma_id_private *id_priv) | |||
2060 | } | 2059 | } |
2061 | 2060 | ||
2062 | mutex_lock(&lock); | 2061 | mutex_lock(&lock); |
2063 | if (cma_any_port(&id_priv->id.route.addr.src_addr)) | 2062 | if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) |
2064 | ret = cma_alloc_any_port(ps, id_priv); | 2063 | ret = cma_alloc_any_port(ps, id_priv); |
2065 | else | 2064 | else |
2066 | ret = cma_use_port(ps, id_priv); | 2065 | ret = cma_use_port(ps, id_priv); |
@@ -2232,7 +2231,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, | |||
2232 | 2231 | ||
2233 | req.path = route->path_rec; | 2232 | req.path = route->path_rec; |
2234 | req.service_id = cma_get_service_id(id_priv->id.ps, | 2233 | req.service_id = cma_get_service_id(id_priv->id.ps, |
2235 | &route->addr.dst_addr); | 2234 | (struct sockaddr *) &route->addr.dst_addr); |
2236 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); | 2235 | req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); |
2237 | req.max_cm_retries = CMA_MAX_CM_RETRIES; | 2236 | req.max_cm_retries = CMA_MAX_CM_RETRIES; |
2238 | 2237 | ||
@@ -2283,7 +2282,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, | |||
2283 | req.alternate_path = &route->path_rec[1]; | 2282 | req.alternate_path = &route->path_rec[1]; |
2284 | 2283 | ||
2285 | req.service_id = cma_get_service_id(id_priv->id.ps, | 2284 | req.service_id = cma_get_service_id(id_priv->id.ps, |
2286 | &route->addr.dst_addr); | 2285 | (struct sockaddr *) &route->addr.dst_addr); |
2287 | req.qp_num = id_priv->qp_num; | 2286 | req.qp_num = id_priv->qp_num; |
2288 | req.qp_type = IB_QPT_RC; | 2287 | req.qp_type = IB_QPT_RC; |
2289 | req.starting_psn = id_priv->seq_num; | 2288 | req.starting_psn = id_priv->seq_num; |
@@ -2667,7 +2666,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, | |||
2667 | if (ret) | 2666 | if (ret) |
2668 | return ret; | 2667 | return ret; |
2669 | 2668 | ||
2670 | cma_set_mgid(id_priv, &mc->addr, &rec.mgid); | 2669 | cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); |
2671 | if (id_priv->id.ps == RDMA_PS_UDP) | 2670 | if (id_priv->id.ps == RDMA_PS_UDP) |
2672 | rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); | 2671 | rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); |
2673 | ib_addr_get_sgid(dev_addr, &rec.port_gid); | 2672 | ib_addr_get_sgid(dev_addr, &rec.port_gid); |
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c index d0ef7d61c037..3af2b84cd838 100644 --- a/drivers/infiniband/core/mad_rmpp.c +++ b/drivers/infiniband/core/mad_rmpp.c | |||
@@ -133,7 +133,7 @@ static void ack_recv(struct mad_rmpp_recv *rmpp_recv, | |||
133 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, | 133 | msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, |
134 | recv_wc->wc->pkey_index, 1, hdr_len, | 134 | recv_wc->wc->pkey_index, 1, hdr_len, |
135 | 0, GFP_KERNEL); | 135 | 0, GFP_KERNEL); |
136 | if (!msg) | 136 | if (IS_ERR(msg)) |
137 | return; | 137 | return; |
138 | 138 | ||
139 | format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); | 139 | format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b41dd26bbfa1..3ddacf39b7ba 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -81,9 +81,7 @@ struct ucma_multicast { | |||
81 | 81 | ||
82 | u64 uid; | 82 | u64 uid; |
83 | struct list_head list; | 83 | struct list_head list; |
84 | struct sockaddr addr; | 84 | struct sockaddr_storage addr; |
85 | u8 pad[sizeof(struct sockaddr_in6) - | ||
86 | sizeof(struct sockaddr)]; | ||
87 | }; | 85 | }; |
88 | 86 | ||
89 | struct ucma_event { | 87 | struct ucma_event { |
@@ -603,11 +601,11 @@ static ssize_t ucma_query_route(struct ucma_file *file, | |||
603 | return PTR_ERR(ctx); | 601 | return PTR_ERR(ctx); |
604 | 602 | ||
605 | memset(&resp, 0, sizeof resp); | 603 | memset(&resp, 0, sizeof resp); |
606 | addr = &ctx->cm_id->route.addr.src_addr; | 604 | addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; |
607 | memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? | 605 | memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? |
608 | sizeof(struct sockaddr_in) : | 606 | sizeof(struct sockaddr_in) : |
609 | sizeof(struct sockaddr_in6)); | 607 | sizeof(struct sockaddr_in6)); |
610 | addr = &ctx->cm_id->route.addr.dst_addr; | 608 | addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; |
611 | memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? | 609 | memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? |
612 | sizeof(struct sockaddr_in) : | 610 | sizeof(struct sockaddr_in) : |
613 | sizeof(struct sockaddr_in6)); | 611 | sizeof(struct sockaddr_in6)); |
@@ -913,7 +911,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, | |||
913 | 911 | ||
914 | mc->uid = cmd.uid; | 912 | mc->uid = cmd.uid; |
915 | memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); | 913 | memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); |
916 | ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); | 914 | ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); |
917 | if (ret) | 915 | if (ret) |
918 | goto err2; | 916 | goto err2; |
919 | 917 | ||
@@ -929,7 +927,7 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, | |||
929 | return 0; | 927 | return 0; |
930 | 928 | ||
931 | err3: | 929 | err3: |
932 | rdma_leave_multicast(ctx->cm_id, &mc->addr); | 930 | rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); |
933 | ucma_cleanup_mc_events(mc); | 931 | ucma_cleanup_mc_events(mc); |
934 | err2: | 932 | err2: |
935 | mutex_lock(&mut); | 933 | mutex_lock(&mut); |
@@ -975,7 +973,7 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, | |||
975 | goto out; | 973 | goto out; |
976 | } | 974 | } |
977 | 975 | ||
978 | rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); | 976 | rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); |
979 | mutex_lock(&mc->ctx->file->mut); | 977 | mutex_lock(&mc->ctx->file->mut); |
980 | ucma_cleanup_mc_events(mc); | 978 | ucma_cleanup_mc_events(mc); |
981 | list_del(&mc->list); | 979 | list_del(&mc->list); |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index f6d5747153a5..4dcf08b3fd83 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c | |||
@@ -725,9 +725,9 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, | |||
725 | V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); | 725 | V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); |
726 | BUG_ON(page_size >= 28); | 726 | BUG_ON(page_size >= 28); |
727 | tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | | 727 | tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | |
728 | F_TPT_MW_BIND_ENABLE | | 728 | ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) | |
729 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | | 729 | V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | |
730 | V_TPT_PAGE_SIZE(page_size)); | 730 | V_TPT_PAGE_SIZE(page_size)); |
731 | tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : | 731 | tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : |
732 | cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); | 732 | cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); |
733 | tpt.len = cpu_to_be32(len); | 733 | tpt.len = cpu_to_be32(len); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index b89640aa6e10..eb778bfd6f66 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -1187,28 +1187,6 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, | |||
1187 | return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); | 1187 | return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | static int fw_supports_fastreg(struct iwch_dev *iwch_dev) | ||
1191 | { | ||
1192 | struct ethtool_drvinfo info; | ||
1193 | struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; | ||
1194 | char *cp, *next; | ||
1195 | unsigned fw_maj, fw_min; | ||
1196 | |||
1197 | rtnl_lock(); | ||
1198 | lldev->ethtool_ops->get_drvinfo(lldev, &info); | ||
1199 | rtnl_unlock(); | ||
1200 | |||
1201 | next = info.fw_version+1; | ||
1202 | cp = strsep(&next, "."); | ||
1203 | sscanf(cp, "%i", &fw_maj); | ||
1204 | cp = strsep(&next, "."); | ||
1205 | sscanf(cp, "%i", &fw_min); | ||
1206 | |||
1207 | PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min); | ||
1208 | |||
1209 | return fw_maj > 6 || (fw_maj == 6 && fw_min > 0); | ||
1210 | } | ||
1211 | |||
1212 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) | 1190 | static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) |
1213 | { | 1191 | { |
1214 | struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, | 1192 | struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, |
@@ -1325,12 +1303,12 @@ int iwch_register_device(struct iwch_dev *dev) | |||
1325 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); | 1303 | memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); |
1326 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); | 1304 | memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); |
1327 | dev->ibdev.owner = THIS_MODULE; | 1305 | dev->ibdev.owner = THIS_MODULE; |
1328 | dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; | 1306 | dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | |
1307 | IB_DEVICE_MEM_WINDOW | | ||
1308 | IB_DEVICE_MEM_MGT_EXTENSIONS; | ||
1329 | 1309 | ||
1330 | /* cxgb3 supports STag 0. */ | 1310 | /* cxgb3 supports STag 0. */ |
1331 | dev->ibdev.local_dma_lkey = 0; | 1311 | dev->ibdev.local_dma_lkey = 0; |
1332 | if (fw_supports_fastreg(dev)) | ||
1333 | dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | ||
1334 | 1312 | ||
1335 | dev->ibdev.uverbs_cmd_mask = | 1313 | dev->ibdev.uverbs_cmd_mask = |
1336 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | 1314 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index f5ceca05c435..a237d49bdcc9 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -293,9 +293,16 @@ static inline u32 iwch_ib_to_tpt_access(int acc) | |||
293 | return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | | 293 | return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | |
294 | (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | | 294 | (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | |
295 | (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | | 295 | (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | |
296 | (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) | | ||
296 | TPT_LOCAL_READ; | 297 | TPT_LOCAL_READ; |
297 | } | 298 | } |
298 | 299 | ||
300 | static inline u32 iwch_ib_to_tpt_bind_access(int acc) | ||
301 | { | ||
302 | return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | | ||
303 | (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0); | ||
304 | } | ||
305 | |||
299 | enum iwch_mmid_state { | 306 | enum iwch_mmid_state { |
300 | IWCH_STAG_STATE_VALID, | 307 | IWCH_STAG_STATE_VALID, |
301 | IWCH_STAG_STATE_INVALID | 308 | IWCH_STAG_STATE_INVALID |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 9a3be3a9d5dc..3e4585c2318a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -565,7 +565,7 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
565 | wqe->bind.type = TPT_VATO; | 565 | wqe->bind.type = TPT_VATO; |
566 | 566 | ||
567 | /* TBD: check perms */ | 567 | /* TBD: check perms */ |
568 | wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags); | 568 | wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); |
569 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); | 569 | wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); |
570 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); | 570 | wqe->bind.mw_stag = cpu_to_be32(mw->rkey); |
571 | wqe->bind.mw_len = cpu_to_be32(mw_bind->length); | 571 | wqe->bind.mw_len = cpu_to_be32(mw_bind->length); |
@@ -879,20 +879,13 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
879 | (qhp->attr.mpa_attr.xmit_marker_enabled << 1) | | 879 | (qhp->attr.mpa_attr.xmit_marker_enabled << 1) | |
880 | (qhp->attr.mpa_attr.crc_enabled << 2); | 880 | (qhp->attr.mpa_attr.crc_enabled << 2); |
881 | 881 | ||
882 | /* | 882 | init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE | |
883 | * XXX - The IWCM doesn't quite handle getting these | 883 | uP_RI_QP_RDMA_WRITE_ENABLE | |
884 | * attrs set before going into RTS. For now, just turn | 884 | uP_RI_QP_BIND_ENABLE; |
885 | * them on always... | 885 | if (!qhp->ibqp.uobject) |
886 | */ | 886 | init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE | |
887 | #if 0 | 887 | uP_RI_QP_FAST_REGISTER_ENABLE; |
888 | init_attr.qpcaps = qhp->attr.enableRdmaRead | | 888 | |
889 | (qhp->attr.enableRdmaWrite << 1) | | ||
890 | (qhp->attr.enableBind << 2) | | ||
891 | (qhp->attr.enable_stag0_fastreg << 3) | | ||
892 | (qhp->attr.enable_stag0_fastreg << 4); | ||
893 | #else | ||
894 | init_attr.qpcaps = 0x1f; | ||
895 | #endif | ||
896 | init_attr.tcp_emss = qhp->ep->emss; | 889 | init_attr.tcp_emss = qhp->ep->emss; |
897 | init_attr.ord = qhp->attr.max_ord; | 890 | init_attr.ord = qhp->attr.max_ord; |
898 | init_attr.ird = qhp->attr.max_ird; | 891 | init_attr.ird = qhp->attr.max_ird; |
@@ -900,8 +893,6 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp, | |||
900 | init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); | 893 | init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); |
901 | init_attr.rqe_count = iwch_rqes_posted(qhp); | 894 | init_attr.rqe_count = iwch_rqes_posted(qhp); |
902 | init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; | 895 | init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; |
903 | if (!qhp->ibqp.uobject) | ||
904 | init_attr.flags |= PRIV_QP; | ||
905 | if (peer2peer) { | 896 | if (peer2peer) { |
906 | init_attr.rtr_type = RTR_READ; | 897 | init_attr.rtr_type = RTR_READ; |
907 | if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) | 898 | if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator) |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index daad09a45910..ad0aab60b051 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -1259,7 +1259,7 @@ reloop: | |||
1259 | */ | 1259 | */ |
1260 | ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" | 1260 | ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" |
1261 | " %x, len %x hdrq+%x rhf: %Lx\n", | 1261 | " %x, len %x hdrq+%x rhf: %Lx\n", |
1262 | etail, tlen, l, | 1262 | etail, tlen, l, (unsigned long long) |
1263 | le64_to_cpu(*(__le64 *) rhf_addr)); | 1263 | le64_to_cpu(*(__le64 *) rhf_addr)); |
1264 | if (ipath_debug & __IPATH_ERRPKTDBG) { | 1264 | if (ipath_debug & __IPATH_ERRPKTDBG) { |
1265 | u32 j, *d, dw = rsize-2; | 1265 | u32 j, *d, dw = rsize-2; |
@@ -1457,7 +1457,8 @@ static void ipath_reset_availshadow(struct ipath_devdata *dd) | |||
1457 | 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ | 1457 | 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ |
1458 | if (oldval != dd->ipath_pioavailshadow[i]) | 1458 | if (oldval != dd->ipath_pioavailshadow[i]) |
1459 | ipath_dbg("shadow[%d] was %Lx, now %lx\n", | 1459 | ipath_dbg("shadow[%d] was %Lx, now %lx\n", |
1460 | i, oldval, dd->ipath_pioavailshadow[i]); | 1460 | i, (unsigned long long) oldval, |
1461 | dd->ipath_pioavailshadow[i]); | ||
1461 | } | 1462 | } |
1462 | spin_unlock_irqrestore(&ipath_pioavail_lock, flags); | 1463 | spin_unlock_irqrestore(&ipath_pioavail_lock, flags); |
1463 | } | 1464 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba7220.c b/drivers/infiniband/hw/ipath/ipath_iba7220.c index fadbfbf55a6a..d90f5e9a54fa 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/drivers/infiniband/hw/ipath/ipath_iba7220.c | |||
@@ -1032,7 +1032,7 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) | |||
1032 | ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", | 1032 | ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", |
1033 | (unsigned long long) | 1033 | (unsigned long long) |
1034 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), | 1034 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), |
1035 | prev_val); | 1035 | (unsigned long long) prev_val); |
1036 | 1036 | ||
1037 | guid = be64_to_cpu(dd->ipath_guid); | 1037 | guid = be64_to_cpu(dd->ipath_guid); |
1038 | 1038 | ||
@@ -1042,7 +1042,8 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) | |||
1042 | ipath_dbg("No GUID for heartbeat, faking %llx\n", | 1042 | ipath_dbg("No GUID for heartbeat, faking %llx\n", |
1043 | (unsigned long long)guid); | 1043 | (unsigned long long)guid); |
1044 | } else | 1044 | } else |
1045 | ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid); | 1045 | ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", |
1046 | (unsigned long long) guid); | ||
1046 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); | 1047 | ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); |
1047 | return ret; | 1048 | return ret; |
1048 | } | 1049 | } |
@@ -2505,7 +2506,7 @@ done: | |||
2505 | if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { | 2506 | if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { |
2506 | ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", | 2507 | ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", |
2507 | ipath_ib_state(dd, dd->ipath_lastibcstat), | 2508 | ipath_ib_state(dd, dd->ipath_lastibcstat), |
2508 | jiffies_to_msecs(jiffies)-startms); | 2509 | (unsigned long long) jiffies_to_msecs(jiffies)-startms); |
2509 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; | 2510 | dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; |
2510 | if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { | 2511 | if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { |
2511 | dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; | 2512 | dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED; |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 26900b3b7a4e..6c21b4b5ec71 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -356,9 +356,10 @@ static void handle_e_ibstatuschanged(struct ipath_devdata *dd, | |||
356 | dd->ipath_cregs->cr_iblinkerrrecovcnt); | 356 | dd->ipath_cregs->cr_iblinkerrrecovcnt); |
357 | if (linkrecov != dd->ipath_lastlinkrecov) { | 357 | if (linkrecov != dd->ipath_lastlinkrecov) { |
358 | ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n", | 358 | ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n", |
359 | ibcs, ib_linkstate(dd, ibcs), | 359 | (unsigned long long) ibcs, |
360 | ib_linkstate(dd, ibcs), | ||
360 | ipath_ibcstatus_str[ltstate], | 361 | ipath_ibcstatus_str[ltstate], |
361 | linkrecov); | 362 | (unsigned long long) linkrecov); |
362 | /* and no more until active again */ | 363 | /* and no more until active again */ |
363 | dd->ipath_lastlinkrecov = 0; | 364 | dd->ipath_lastlinkrecov = 0; |
364 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); | 365 | ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); |
@@ -1118,9 +1119,11 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1118 | if (unlikely(istat & ~dd->ipath_i_bitsextant)) | 1119 | if (unlikely(istat & ~dd->ipath_i_bitsextant)) |
1119 | ipath_dev_err(dd, | 1120 | ipath_dev_err(dd, |
1120 | "interrupt with unknown interrupts %Lx set\n", | 1121 | "interrupt with unknown interrupts %Lx set\n", |
1122 | (unsigned long long) | ||
1121 | istat & ~dd->ipath_i_bitsextant); | 1123 | istat & ~dd->ipath_i_bitsextant); |
1122 | else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */ | 1124 | else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */ |
1123 | ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat); | 1125 | ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", |
1126 | (unsigned long long) istat); | ||
1124 | 1127 | ||
1125 | if (istat & INFINIPATH_I_ERROR) { | 1128 | if (istat & INFINIPATH_I_ERROR) { |
1126 | ipath_stats.sps_errints++; | 1129 | ipath_stats.sps_errints++; |
@@ -1128,7 +1131,8 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1128 | dd->ipath_kregs->kr_errorstatus); | 1131 | dd->ipath_kregs->kr_errorstatus); |
1129 | if (!estat) | 1132 | if (!estat) |
1130 | dev_info(&dd->pcidev->dev, "error interrupt (%Lx), " | 1133 | dev_info(&dd->pcidev->dev, "error interrupt (%Lx), " |
1131 | "but no error bits set!\n", istat); | 1134 | "but no error bits set!\n", |
1135 | (unsigned long long) istat); | ||
1132 | else if (estat == -1LL) | 1136 | else if (estat == -1LL) |
1133 | /* | 1137 | /* |
1134 | * should we try clearing all, or hope next read | 1138 | * should we try clearing all, or hope next read |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 55c718828826..b766e40e9ebf 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1021,7 +1021,7 @@ static void sdma_complete(void *cookie, int status) | |||
1021 | struct ipath_verbs_txreq *tx = cookie; | 1021 | struct ipath_verbs_txreq *tx = cookie; |
1022 | struct ipath_qp *qp = tx->qp; | 1022 | struct ipath_qp *qp = tx->qp; |
1023 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | 1023 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); |
1024 | unsigned int flags; | 1024 | unsigned long flags; |
1025 | enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? | 1025 | enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? |
1026 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; | 1026 | IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; |
1027 | 1027 | ||
@@ -1051,7 +1051,7 @@ static void sdma_complete(void *cookie, int status) | |||
1051 | 1051 | ||
1052 | static void decrement_dma_busy(struct ipath_qp *qp) | 1052 | static void decrement_dma_busy(struct ipath_qp *qp) |
1053 | { | 1053 | { |
1054 | unsigned int flags; | 1054 | unsigned long flags; |
1055 | 1055 | ||
1056 | if (atomic_dec_and_test(&qp->s_dma_busy)) { | 1056 | if (atomic_dec_and_test(&qp->s_dma_busy)) { |
1057 | spin_lock_irqsave(&qp->s_lock, flags); | 1057 | spin_lock_irqsave(&qp->s_lock, flags); |
@@ -1221,7 +1221,7 @@ static int ipath_verbs_send_pio(struct ipath_qp *qp, | |||
1221 | unsigned flush_wc; | 1221 | unsigned flush_wc; |
1222 | u32 control; | 1222 | u32 control; |
1223 | int ret; | 1223 | int ret; |
1224 | unsigned int flags; | 1224 | unsigned long flags; |
1225 | 1225 | ||
1226 | piobuf = ipath_getpiobuf(dd, plen, NULL); | 1226 | piobuf = ipath_getpiobuf(dd, plen, NULL); |
1227 | if (unlikely(piobuf == NULL)) { | 1227 | if (unlikely(piobuf == NULL)) { |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a1464574bfdd..d0866a3636e2 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -515,17 +515,17 @@ static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, | |||
515 | wc->vendor_err = cqe->vendor_err_syndrome; | 515 | wc->vendor_err = cqe->vendor_err_syndrome; |
516 | } | 516 | } |
517 | 517 | ||
518 | static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum) | 518 | static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) |
519 | { | 519 | { |
520 | return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | | 520 | return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | |
521 | MLX4_CQE_IPOIB_STATUS_IPV4F | | 521 | MLX4_CQE_STATUS_IPV4F | |
522 | MLX4_CQE_IPOIB_STATUS_IPV4OPT | | 522 | MLX4_CQE_STATUS_IPV4OPT | |
523 | MLX4_CQE_IPOIB_STATUS_IPV6 | | 523 | MLX4_CQE_STATUS_IPV6 | |
524 | MLX4_CQE_IPOIB_STATUS_IPOK)) == | 524 | MLX4_CQE_STATUS_IPOK)) == |
525 | cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | | 525 | cpu_to_be16(MLX4_CQE_STATUS_IPV4 | |
526 | MLX4_CQE_IPOIB_STATUS_IPOK)) && | 526 | MLX4_CQE_STATUS_IPOK)) && |
527 | (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP | | 527 | (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | |
528 | MLX4_CQE_IPOIB_STATUS_TCP)) && | 528 | MLX4_CQE_STATUS_TCP)) && |
529 | checksum == cpu_to_be16(0xffff); | 529 | checksum == cpu_to_be16(0xffff); |
530 | } | 530 | } |
531 | 531 | ||
@@ -582,17 +582,17 @@ repoll: | |||
582 | } | 582 | } |
583 | 583 | ||
584 | if (!*cur_qp || | 584 | if (!*cur_qp || |
585 | (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { | 585 | (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { |
586 | /* | 586 | /* |
587 | * We do not have to take the QP table lock here, | 587 | * We do not have to take the QP table lock here, |
588 | * because CQs will be locked while QPs are removed | 588 | * because CQs will be locked while QPs are removed |
589 | * from the table. | 589 | * from the table. |
590 | */ | 590 | */ |
591 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, | 591 | mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, |
592 | be32_to_cpu(cqe->my_qpn)); | 592 | be32_to_cpu(cqe->vlan_my_qpn)); |
593 | if (unlikely(!mqp)) { | 593 | if (unlikely(!mqp)) { |
594 | printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", | 594 | printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", |
595 | cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); | 595 | cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); |
596 | return -EINVAL; | 596 | return -EINVAL; |
597 | } | 597 | } |
598 | 598 | ||
@@ -692,14 +692,13 @@ repoll: | |||
692 | } | 692 | } |
693 | 693 | ||
694 | wc->slid = be16_to_cpu(cqe->rlid); | 694 | wc->slid = be16_to_cpu(cqe->rlid); |
695 | wc->sl = cqe->sl >> 4; | 695 | wc->sl = be16_to_cpu(cqe->sl_vid >> 12); |
696 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); | 696 | g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); |
697 | wc->src_qp = g_mlpath_rqpn & 0xffffff; | 697 | wc->src_qp = g_mlpath_rqpn & 0xffffff; |
698 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; | 698 | wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; |
699 | wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; | 699 | wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; |
700 | wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; | 700 | wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; |
701 | wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status, | 701 | wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); |
702 | cqe->checksum); | ||
703 | } | 702 | } |
704 | 703 | ||
705 | return 0; | 704 | return 0; |
@@ -767,7 +766,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) | |||
767 | */ | 766 | */ |
768 | while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { | 767 | while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { |
769 | cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); | 768 | cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); |
770 | if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { | 769 | if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { |
771 | if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) | 770 | if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) |
772 | mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); | 771 | mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); |
773 | ++nfreed; | 772 | ++nfreed; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f7bc7dd8578a..f29dbb767e87 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -902,7 +902,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
902 | context->mtu_msgmax = (IB_MTU_4096 << 5) | | 902 | context->mtu_msgmax = (IB_MTU_4096 << 5) | |
903 | ilog2(dev->dev->caps.max_gso_sz); | 903 | ilog2(dev->dev->caps.max_gso_sz); |
904 | else | 904 | else |
905 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; | 905 | context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; |
906 | } else if (attr_mask & IB_QP_PATH_MTU) { | 906 | } else if (attr_mask & IB_QP_PATH_MTU) { |
907 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { | 907 | if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { |
908 | printk(KERN_ERR "path MTU (%u) is invalid\n", | 908 | printk(KERN_ERR "path MTU (%u) is invalid\n", |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0f2d3045061a..7ebc400a4b3d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -337,7 +337,7 @@ static void ipoib_cm_init_rx_wr(struct net_device *dev, | |||
337 | sge[i].length = PAGE_SIZE; | 337 | sge[i].length = PAGE_SIZE; |
338 | 338 | ||
339 | wr->next = NULL; | 339 | wr->next = NULL; |
340 | wr->sg_list = priv->cm.rx_sge; | 340 | wr->sg_list = sge; |
341 | wr->num_sge = priv->cm.num_frags; | 341 | wr->num_sge = priv->cm.num_frags; |
342 | } | 342 | } |
343 | 343 | ||
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 071cf96cf01f..6f65b2c8bb89 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h | |||
@@ -39,17 +39,18 @@ | |||
39 | #include <linux/mlx4/doorbell.h> | 39 | #include <linux/mlx4/doorbell.h> |
40 | 40 | ||
41 | struct mlx4_cqe { | 41 | struct mlx4_cqe { |
42 | __be32 my_qpn; | 42 | __be32 vlan_my_qpn; |
43 | __be32 immed_rss_invalid; | 43 | __be32 immed_rss_invalid; |
44 | __be32 g_mlpath_rqpn; | 44 | __be32 g_mlpath_rqpn; |
45 | u8 sl; | 45 | __be16 sl_vid; |
46 | u8 reserved1; | ||
47 | __be16 rlid; | 46 | __be16 rlid; |
48 | __be32 ipoib_status; | 47 | __be16 status; |
48 | u8 ipv6_ext_mask; | ||
49 | u8 badfcs_enc; | ||
49 | __be32 byte_cnt; | 50 | __be32 byte_cnt; |
50 | __be16 wqe_index; | 51 | __be16 wqe_index; |
51 | __be16 checksum; | 52 | __be16 checksum; |
52 | u8 reserved2[3]; | 53 | u8 reserved[3]; |
53 | u8 owner_sr_opcode; | 54 | u8 owner_sr_opcode; |
54 | }; | 55 | }; |
55 | 56 | ||
@@ -64,6 +65,11 @@ struct mlx4_err_cqe { | |||
64 | }; | 65 | }; |
65 | 66 | ||
66 | enum { | 67 | enum { |
68 | MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, | ||
69 | MLX4_CQE_QPN_MASK = 0xffffff, | ||
70 | }; | ||
71 | |||
72 | enum { | ||
67 | MLX4_CQE_OWNER_MASK = 0x80, | 73 | MLX4_CQE_OWNER_MASK = 0x80, |
68 | MLX4_CQE_IS_SEND_MASK = 0x40, | 74 | MLX4_CQE_IS_SEND_MASK = 0x40, |
69 | MLX4_CQE_OPCODE_MASK = 0x1f | 75 | MLX4_CQE_OPCODE_MASK = 0x1f |
@@ -86,13 +92,19 @@ enum { | |||
86 | }; | 92 | }; |
87 | 93 | ||
88 | enum { | 94 | enum { |
89 | MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, | 95 | MLX4_CQE_STATUS_IPV4 = 1 << 6, |
90 | MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, | 96 | MLX4_CQE_STATUS_IPV4F = 1 << 7, |
91 | MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, | 97 | MLX4_CQE_STATUS_IPV6 = 1 << 8, |
92 | MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, | 98 | MLX4_CQE_STATUS_IPV4OPT = 1 << 9, |
93 | MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, | 99 | MLX4_CQE_STATUS_TCP = 1 << 10, |
94 | MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, | 100 | MLX4_CQE_STATUS_UDP = 1 << 11, |
95 | MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, | 101 | MLX4_CQE_STATUS_IPOK = 1 << 12, |
102 | }; | ||
103 | |||
104 | enum { | ||
105 | MLX4_CQE_LLC = 1, | ||
106 | MLX4_CQE_SNAP = 1 << 1, | ||
107 | MLX4_CQE_BAD_FCS = 1 << 4, | ||
96 | }; | 108 | }; |
97 | 109 | ||
98 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, | 110 | static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd, |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index df7faf09d66f..c6b2962315b3 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -71,12 +71,8 @@ enum rdma_port_space { | |||
71 | }; | 71 | }; |
72 | 72 | ||
73 | struct rdma_addr { | 73 | struct rdma_addr { |
74 | struct sockaddr src_addr; | 74 | struct sockaddr_storage src_addr; |
75 | u8 src_pad[sizeof(struct sockaddr_in6) - | 75 | struct sockaddr_storage dst_addr; |
76 | sizeof(struct sockaddr)]; | ||
77 | struct sockaddr dst_addr; | ||
78 | u8 dst_pad[sizeof(struct sockaddr_in6) - | ||
79 | sizeof(struct sockaddr)]; | ||
80 | struct rdma_dev_addr dev_addr; | 76 | struct rdma_dev_addr dev_addr; |
81 | }; | 77 | }; |
82 | 78 | ||