diff options
31 files changed, 568 insertions, 219 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index b399b34a2496..19395134063d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5042,17 +5042,19 @@ S: Orphan | |||
5042 | F: drivers/video/fbdev/imsttfb.c | 5042 | F: drivers/video/fbdev/imsttfb.c |
5043 | 5043 | ||
5044 | INFINIBAND SUBSYSTEM | 5044 | INFINIBAND SUBSYSTEM |
5045 | M: Roland Dreier <roland@kernel.org> | 5045 | M: Doug Ledford <dledford@redhat.com> |
5046 | M: Sean Hefty <sean.hefty@intel.com> | 5046 | M: Sean Hefty <sean.hefty@intel.com> |
5047 | M: Hal Rosenstock <hal.rosenstock@gmail.com> | 5047 | M: Hal Rosenstock <hal.rosenstock@gmail.com> |
5048 | L: linux-rdma@vger.kernel.org | 5048 | L: linux-rdma@vger.kernel.org |
5049 | W: http://www.openfabrics.org/ | 5049 | W: http://www.openfabrics.org/ |
5050 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5050 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
5051 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git | 5051 | T: git git://github.com/dledford/linux.git |
5052 | S: Supported | 5052 | S: Supported |
5053 | F: Documentation/infiniband/ | 5053 | F: Documentation/infiniband/ |
5054 | F: drivers/infiniband/ | 5054 | F: drivers/infiniband/ |
5055 | F: include/uapi/linux/if_infiniband.h | 5055 | F: include/uapi/linux/if_infiniband.h |
5056 | F: include/uapi/rdma/ | ||
5057 | F: include/rdma/ | ||
5056 | 5058 | ||
5057 | INOTIFY | 5059 | INOTIFY |
5058 | M: John McCutchan <john@johnmccutchan.com> | 5060 | M: John McCutchan <john@johnmccutchan.com> |
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f80da50d84a5..38339d220d7f 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, | |||
472 | } sgid_addr, dgid_addr; | 472 | } sgid_addr, dgid_addr; |
473 | 473 | ||
474 | 474 | ||
475 | ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); | 475 | rdma_gid2ip(&sgid_addr._sockaddr, sgid); |
476 | if (ret) | 476 | rdma_gid2ip(&dgid_addr._sockaddr, dgid); |
477 | return ret; | ||
478 | |||
479 | ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); | ||
480 | if (ret) | ||
481 | return ret; | ||
482 | 477 | ||
483 | memset(&dev_addr, 0, sizeof(dev_addr)); | 478 | memset(&dev_addr, 0, sizeof(dev_addr)); |
484 | 479 | ||
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) | |||
512 | struct sockaddr_in6 _sockaddr_in6; | 507 | struct sockaddr_in6 _sockaddr_in6; |
513 | } gid_addr; | 508 | } gid_addr; |
514 | 509 | ||
515 | ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); | 510 | rdma_gid2ip(&gid_addr._sockaddr, sgid); |
516 | 511 | ||
517 | if (ret) | ||
518 | return ret; | ||
519 | memset(&dev_addr, 0, sizeof(dev_addr)); | 512 | memset(&dev_addr, 0, sizeof(dev_addr)); |
520 | ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); | 513 | ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); |
521 | if (ret) | 514 | if (ret) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index e28a494e2a3a..0c1419105ff0 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) | |||
437 | return cm_id_priv; | 437 | return cm_id_priv; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) | 440 | static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask) |
441 | { | 441 | { |
442 | int i; | 442 | int i; |
443 | 443 | ||
444 | for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) | 444 | for (i = 0; i < IB_CM_COMPARE_SIZE; i++) |
445 | ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & | 445 | dst[i] = src[i] & mask[i]; |
446 | ((unsigned long *) mask)[i]; | ||
447 | } | 446 | } |
448 | 447 | ||
449 | static int cm_compare_data(struct ib_cm_compare_data *src_data, | 448 | static int cm_compare_data(struct ib_cm_compare_data *src_data, |
450 | struct ib_cm_compare_data *dst_data) | 449 | struct ib_cm_compare_data *dst_data) |
451 | { | 450 | { |
452 | u8 src[IB_CM_COMPARE_SIZE]; | 451 | u32 src[IB_CM_COMPARE_SIZE]; |
453 | u8 dst[IB_CM_COMPARE_SIZE]; | 452 | u32 dst[IB_CM_COMPARE_SIZE]; |
454 | 453 | ||
455 | if (!src_data || !dst_data) | 454 | if (!src_data || !dst_data) |
456 | return 0; | 455 | return 0; |
457 | 456 | ||
458 | cm_mask_copy(src, src_data->data, dst_data->mask); | 457 | cm_mask_copy(src, src_data->data, dst_data->mask); |
459 | cm_mask_copy(dst, dst_data->data, src_data->mask); | 458 | cm_mask_copy(dst, dst_data->data, src_data->mask); |
460 | return memcmp(src, dst, IB_CM_COMPARE_SIZE); | 459 | return memcmp(src, dst, sizeof(src)); |
461 | } | 460 | } |
462 | 461 | ||
463 | static int cm_compare_private_data(u8 *private_data, | 462 | static int cm_compare_private_data(u32 *private_data, |
464 | struct ib_cm_compare_data *dst_data) | 463 | struct ib_cm_compare_data *dst_data) |
465 | { | 464 | { |
466 | u8 src[IB_CM_COMPARE_SIZE]; | 465 | u32 src[IB_CM_COMPARE_SIZE]; |
467 | 466 | ||
468 | if (!dst_data) | 467 | if (!dst_data) |
469 | return 0; | 468 | return 0; |
470 | 469 | ||
471 | cm_mask_copy(src, private_data, dst_data->mask); | 470 | cm_mask_copy(src, private_data, dst_data->mask); |
472 | return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); | 471 | return memcmp(src, dst_data->data, sizeof(src)); |
473 | } | 472 | } |
474 | 473 | ||
475 | /* | 474 | /* |
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) | |||
538 | 537 | ||
539 | static struct cm_id_private * cm_find_listen(struct ib_device *device, | 538 | static struct cm_id_private * cm_find_listen(struct ib_device *device, |
540 | __be64 service_id, | 539 | __be64 service_id, |
541 | u8 *private_data) | 540 | u32 *private_data) |
542 | { | 541 | { |
543 | struct rb_node *node = cm.listen_service_table.rb_node; | 542 | struct rb_node *node = cm.listen_service_table.rb_node; |
544 | struct cm_id_private *cm_id_priv; | 543 | struct cm_id_private *cm_id_priv; |
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, | |||
953 | cm_mask_copy(cm_id_priv->compare_data->data, | 952 | cm_mask_copy(cm_id_priv->compare_data->data, |
954 | compare_data->data, compare_data->mask); | 953 | compare_data->data, compare_data->mask); |
955 | memcpy(cm_id_priv->compare_data->mask, compare_data->mask, | 954 | memcpy(cm_id_priv->compare_data->mask, compare_data->mask, |
956 | IB_CM_COMPARE_SIZE); | 955 | sizeof(compare_data->mask)); |
957 | } | 956 | } |
958 | 957 | ||
959 | cm_id->state = IB_CM_LISTEN; | 958 | cm_id->state = IB_CM_LISTEN; |
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index be068f47e47e..8b76f0ef965e 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -103,7 +103,7 @@ struct cm_req_msg { | |||
103 | /* local ACK timeout:5, rsvd:3 */ | 103 | /* local ACK timeout:5, rsvd:3 */ |
104 | u8 alt_offset139; | 104 | u8 alt_offset139; |
105 | 105 | ||
106 | u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE]; | 106 | u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; |
107 | 107 | ||
108 | } __attribute__ ((packed)); | 108 | } __attribute__ ((packed)); |
109 | 109 | ||
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg { | |||
801 | __be16 rsvd; | 801 | __be16 rsvd; |
802 | __be64 service_id; | 802 | __be64 service_id; |
803 | 803 | ||
804 | u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; | 804 | u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; |
805 | } __attribute__ ((packed)); | 805 | } __attribute__ ((packed)); |
806 | 806 | ||
807 | struct cm_sidr_rep_msg { | 807 | struct cm_sidr_rep_msg { |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d570030d899c..06441a43c3aa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id | |||
859 | memcpy(&ib->sib_addr, &path->dgid, 16); | 859 | memcpy(&ib->sib_addr, &path->dgid, 16); |
860 | } | 860 | } |
861 | 861 | ||
862 | static __be16 ss_get_port(const struct sockaddr_storage *ss) | ||
863 | { | ||
864 | if (ss->ss_family == AF_INET) | ||
865 | return ((struct sockaddr_in *)ss)->sin_port; | ||
866 | else if (ss->ss_family == AF_INET6) | ||
867 | return ((struct sockaddr_in6 *)ss)->sin6_port; | ||
868 | BUG(); | ||
869 | } | ||
870 | |||
862 | static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, | 871 | static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, |
863 | struct cma_hdr *hdr) | 872 | struct cma_hdr *hdr) |
864 | { | 873 | { |
865 | struct sockaddr_in *listen4, *ip4; | 874 | struct sockaddr_in *ip4; |
866 | 875 | ||
867 | listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; | ||
868 | ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; | 876 | ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; |
869 | ip4->sin_family = listen4->sin_family; | 877 | ip4->sin_family = AF_INET; |
870 | ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; | 878 | ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; |
871 | ip4->sin_port = listen4->sin_port; | 879 | ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr); |
872 | 880 | ||
873 | ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; | 881 | ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; |
874 | ip4->sin_family = listen4->sin_family; | 882 | ip4->sin_family = AF_INET; |
875 | ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; | 883 | ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; |
876 | ip4->sin_port = hdr->port; | 884 | ip4->sin_port = hdr->port; |
877 | } | 885 | } |
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i | |||
879 | static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, | 887 | static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, |
880 | struct cma_hdr *hdr) | 888 | struct cma_hdr *hdr) |
881 | { | 889 | { |
882 | struct sockaddr_in6 *listen6, *ip6; | 890 | struct sockaddr_in6 *ip6; |
883 | 891 | ||
884 | listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; | ||
885 | ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; | 892 | ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; |
886 | ip6->sin6_family = listen6->sin6_family; | 893 | ip6->sin6_family = AF_INET6; |
887 | ip6->sin6_addr = hdr->dst_addr.ip6; | 894 | ip6->sin6_addr = hdr->dst_addr.ip6; |
888 | ip6->sin6_port = listen6->sin6_port; | 895 | ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr); |
889 | 896 | ||
890 | ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; | 897 | ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; |
891 | ip6->sin6_family = listen6->sin6_family; | 898 | ip6->sin6_family = AF_INET6; |
892 | ip6->sin6_addr = hdr->src_addr.ip6; | 899 | ip6->sin6_addr = hdr->src_addr.ip6; |
893 | ip6->sin6_port = hdr->port; | 900 | ip6->sin6_port = hdr->port; |
894 | } | 901 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index b85ddbc979e0..ab081702566f 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
@@ -468,7 +468,8 @@ add_mapping_response_exit: | |||
468 | } | 468 | } |
469 | EXPORT_SYMBOL(iwpm_add_mapping_cb); | 469 | EXPORT_SYMBOL(iwpm_add_mapping_cb); |
470 | 470 | ||
471 | /* netlink attribute policy for the response to add and query mapping request */ | 471 | /* netlink attribute policy for the response to add and query mapping request |
472 | * and response with remote address info */ | ||
472 | static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { | 473 | static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { |
473 | [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, | 474 | [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, |
474 | [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | 475 | [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, |
@@ -559,6 +560,76 @@ query_mapping_response_exit: | |||
559 | } | 560 | } |
560 | EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); | 561 | EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); |
561 | 562 | ||
563 | /* | ||
564 | * iwpm_remote_info_cb - Process a port mapper message, containing | ||
565 | * the remote connecting peer address info | ||
566 | */ | ||
567 | int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
568 | { | ||
569 | struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; | ||
570 | struct sockaddr_storage *local_sockaddr, *remote_sockaddr; | ||
571 | struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; | ||
572 | struct iwpm_remote_info *rem_info; | ||
573 | const char *msg_type; | ||
574 | u8 nl_client; | ||
575 | int ret = -EINVAL; | ||
576 | |||
577 | msg_type = "Remote Mapping info"; | ||
578 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, | ||
579 | resp_query_policy, nltb, msg_type)) | ||
580 | return ret; | ||
581 | |||
582 | nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); | ||
583 | if (!iwpm_valid_client(nl_client)) { | ||
584 | pr_info("%s: Invalid port mapper client = %d\n", | ||
585 | __func__, nl_client); | ||
586 | return ret; | ||
587 | } | ||
588 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
589 | |||
590 | local_sockaddr = (struct sockaddr_storage *) | ||
591 | nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]); | ||
592 | remote_sockaddr = (struct sockaddr_storage *) | ||
593 | nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]); | ||
594 | mapped_loc_sockaddr = (struct sockaddr_storage *) | ||
595 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); | ||
596 | mapped_rem_sockaddr = (struct sockaddr_storage *) | ||
597 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); | ||
598 | |||
599 | if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || | ||
600 | mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { | ||
601 | pr_info("%s: Sockaddr family doesn't match the requested one\n", | ||
602 | __func__); | ||
603 | return ret; | ||
604 | } | ||
605 | rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC); | ||
606 | if (!rem_info) { | ||
607 | pr_err("%s: Unable to allocate a remote info\n", __func__); | ||
608 | ret = -ENOMEM; | ||
609 | return ret; | ||
610 | } | ||
611 | memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr, | ||
612 | sizeof(struct sockaddr_storage)); | ||
613 | memcpy(&rem_info->remote_sockaddr, remote_sockaddr, | ||
614 | sizeof(struct sockaddr_storage)); | ||
615 | memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr, | ||
616 | sizeof(struct sockaddr_storage)); | ||
617 | rem_info->nl_client = nl_client; | ||
618 | |||
619 | iwpm_add_remote_info(rem_info); | ||
620 | |||
621 | iwpm_print_sockaddr(local_sockaddr, | ||
622 | "remote_info: Local sockaddr:"); | ||
623 | iwpm_print_sockaddr(mapped_loc_sockaddr, | ||
624 | "remote_info: Mapped local sockaddr:"); | ||
625 | iwpm_print_sockaddr(remote_sockaddr, | ||
626 | "remote_info: Remote sockaddr:"); | ||
627 | iwpm_print_sockaddr(mapped_rem_sockaddr, | ||
628 | "remote_info: Mapped remote sockaddr:"); | ||
629 | return ret; | ||
630 | } | ||
631 | EXPORT_SYMBOL(iwpm_remote_info_cb); | ||
632 | |||
562 | /* netlink attribute policy for the received request for mapping info */ | 633 | /* netlink attribute policy for the received request for mapping info */ |
563 | static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { | 634 | static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { |
564 | [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, | 635 | [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 69e9f84c1605..a626795bf9c7 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -33,8 +33,10 @@ | |||
33 | 33 | ||
34 | #include "iwpm_util.h" | 34 | #include "iwpm_util.h" |
35 | 35 | ||
36 | #define IWPM_HASH_BUCKET_SIZE 512 | 36 | #define IWPM_MAPINFO_HASH_SIZE 512 |
37 | #define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) | 37 | #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) |
38 | #define IWPM_REMINFO_HASH_SIZE 64 | ||
39 | #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) | ||
38 | 40 | ||
39 | static LIST_HEAD(iwpm_nlmsg_req_list); | 41 | static LIST_HEAD(iwpm_nlmsg_req_list); |
40 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | 42 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); |
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | |||
42 | static struct hlist_head *iwpm_hash_bucket; | 44 | static struct hlist_head *iwpm_hash_bucket; |
43 | static DEFINE_SPINLOCK(iwpm_mapinfo_lock); | 45 | static DEFINE_SPINLOCK(iwpm_mapinfo_lock); |
44 | 46 | ||
47 | static struct hlist_head *iwpm_reminfo_bucket; | ||
48 | static DEFINE_SPINLOCK(iwpm_reminfo_lock); | ||
49 | |||
45 | static DEFINE_MUTEX(iwpm_admin_lock); | 50 | static DEFINE_MUTEX(iwpm_admin_lock); |
46 | static struct iwpm_admin_data iwpm_admin; | 51 | static struct iwpm_admin_data iwpm_admin; |
47 | 52 | ||
48 | int iwpm_init(u8 nl_client) | 53 | int iwpm_init(u8 nl_client) |
49 | { | 54 | { |
55 | int ret = 0; | ||
50 | if (iwpm_valid_client(nl_client)) | 56 | if (iwpm_valid_client(nl_client)) |
51 | return -EINVAL; | 57 | return -EINVAL; |
52 | mutex_lock(&iwpm_admin_lock); | 58 | mutex_lock(&iwpm_admin_lock); |
53 | if (atomic_read(&iwpm_admin.refcount) == 0) { | 59 | if (atomic_read(&iwpm_admin.refcount) == 0) { |
54 | iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * | 60 | iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE * |
55 | sizeof(struct hlist_head), GFP_KERNEL); | 61 | sizeof(struct hlist_head), GFP_KERNEL); |
56 | if (!iwpm_hash_bucket) { | 62 | if (!iwpm_hash_bucket) { |
57 | mutex_unlock(&iwpm_admin_lock); | 63 | ret = -ENOMEM; |
58 | pr_err("%s Unable to create mapinfo hash table\n", __func__); | 64 | pr_err("%s Unable to create mapinfo hash table\n", __func__); |
59 | return -ENOMEM; | 65 | goto init_exit; |
66 | } | ||
67 | iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE * | ||
68 | sizeof(struct hlist_head), GFP_KERNEL); | ||
69 | if (!iwpm_reminfo_bucket) { | ||
70 | kfree(iwpm_hash_bucket); | ||
71 | ret = -ENOMEM; | ||
72 | pr_err("%s Unable to create reminfo hash table\n", __func__); | ||
73 | goto init_exit; | ||
60 | } | 74 | } |
61 | } | 75 | } |
62 | atomic_inc(&iwpm_admin.refcount); | 76 | atomic_inc(&iwpm_admin.refcount); |
77 | init_exit: | ||
63 | mutex_unlock(&iwpm_admin_lock); | 78 | mutex_unlock(&iwpm_admin_lock); |
64 | iwpm_set_valid(nl_client, 1); | 79 | if (!ret) { |
65 | return 0; | 80 | iwpm_set_valid(nl_client, 1); |
81 | pr_debug("%s: Mapinfo and reminfo tables are created\n", | ||
82 | __func__); | ||
83 | } | ||
84 | return ret; | ||
66 | } | 85 | } |
67 | EXPORT_SYMBOL(iwpm_init); | 86 | EXPORT_SYMBOL(iwpm_init); |
68 | 87 | ||
69 | static void free_hash_bucket(void); | 88 | static void free_hash_bucket(void); |
89 | static void free_reminfo_bucket(void); | ||
70 | 90 | ||
71 | int iwpm_exit(u8 nl_client) | 91 | int iwpm_exit(u8 nl_client) |
72 | { | 92 | { |
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client) | |||
81 | } | 101 | } |
82 | if (atomic_dec_and_test(&iwpm_admin.refcount)) { | 102 | if (atomic_dec_and_test(&iwpm_admin.refcount)) { |
83 | free_hash_bucket(); | 103 | free_hash_bucket(); |
84 | pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); | 104 | free_reminfo_bucket(); |
105 | pr_debug("%s: Resources are destroyed\n", __func__); | ||
85 | } | 106 | } |
86 | mutex_unlock(&iwpm_admin_lock); | 107 | mutex_unlock(&iwpm_admin_lock); |
87 | iwpm_set_valid(nl_client, 0); | 108 | iwpm_set_valid(nl_client, 0); |
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client) | |||
89 | } | 110 | } |
90 | EXPORT_SYMBOL(iwpm_exit); | 111 | EXPORT_SYMBOL(iwpm_exit); |
91 | 112 | ||
92 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, | 113 | static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *, |
93 | struct sockaddr_storage *); | 114 | struct sockaddr_storage *); |
94 | 115 | ||
95 | int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | 116 | int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, |
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
99 | struct hlist_head *hash_bucket_head; | 120 | struct hlist_head *hash_bucket_head; |
100 | struct iwpm_mapping_info *map_info; | 121 | struct iwpm_mapping_info *map_info; |
101 | unsigned long flags; | 122 | unsigned long flags; |
123 | int ret = -EINVAL; | ||
102 | 124 | ||
103 | if (!iwpm_valid_client(nl_client)) | 125 | if (!iwpm_valid_client(nl_client)) |
104 | return -EINVAL; | 126 | return ret; |
105 | map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); | 127 | map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); |
106 | if (!map_info) { | 128 | if (!map_info) { |
107 | pr_err("%s: Unable to allocate a mapping info\n", __func__); | 129 | pr_err("%s: Unable to allocate a mapping info\n", __func__); |
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
115 | 137 | ||
116 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 138 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
117 | if (iwpm_hash_bucket) { | 139 | if (iwpm_hash_bucket) { |
118 | hash_bucket_head = get_hash_bucket_head( | 140 | hash_bucket_head = get_mapinfo_hash_bucket( |
119 | &map_info->local_sockaddr, | 141 | &map_info->local_sockaddr, |
120 | &map_info->mapped_sockaddr); | 142 | &map_info->mapped_sockaddr); |
121 | hlist_add_head(&map_info->hlist_node, hash_bucket_head); | 143 | if (hash_bucket_head) { |
144 | hlist_add_head(&map_info->hlist_node, hash_bucket_head); | ||
145 | ret = 0; | ||
146 | } | ||
122 | } | 147 | } |
123 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 148 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
124 | return 0; | 149 | return ret; |
125 | } | 150 | } |
126 | EXPORT_SYMBOL(iwpm_create_mapinfo); | 151 | EXPORT_SYMBOL(iwpm_create_mapinfo); |
127 | 152 | ||
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
136 | 161 | ||
137 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 162 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
138 | if (iwpm_hash_bucket) { | 163 | if (iwpm_hash_bucket) { |
139 | hash_bucket_head = get_hash_bucket_head( | 164 | hash_bucket_head = get_mapinfo_hash_bucket( |
140 | local_sockaddr, | 165 | local_sockaddr, |
141 | mapped_local_addr); | 166 | mapped_local_addr); |
167 | if (!hash_bucket_head) | ||
168 | goto remove_mapinfo_exit; | ||
169 | |||
142 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | 170 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, |
143 | hash_bucket_head, hlist_node) { | 171 | hash_bucket_head, hlist_node) { |
144 | 172 | ||
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
152 | } | 180 | } |
153 | } | 181 | } |
154 | } | 182 | } |
183 | remove_mapinfo_exit: | ||
155 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 184 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
156 | return ret; | 185 | return ret; |
157 | } | 186 | } |
@@ -166,7 +195,7 @@ static void free_hash_bucket(void) | |||
166 | 195 | ||
167 | /* remove all the mapinfo data from the list */ | 196 | /* remove all the mapinfo data from the list */ |
168 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 197 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
169 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 198 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
170 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | 199 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, |
171 | &iwpm_hash_bucket[i], hlist_node) { | 200 | &iwpm_hash_bucket[i], hlist_node) { |
172 | 201 | ||
@@ -180,6 +209,96 @@ static void free_hash_bucket(void) | |||
180 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 209 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
181 | } | 210 | } |
182 | 211 | ||
212 | static void free_reminfo_bucket(void) | ||
213 | { | ||
214 | struct hlist_node *tmp_hlist_node; | ||
215 | struct iwpm_remote_info *rem_info; | ||
216 | unsigned long flags; | ||
217 | int i; | ||
218 | |||
219 | /* remove all the remote info from the list */ | ||
220 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
221 | for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) { | ||
222 | hlist_for_each_entry_safe(rem_info, tmp_hlist_node, | ||
223 | &iwpm_reminfo_bucket[i], hlist_node) { | ||
224 | |||
225 | hlist_del_init(&rem_info->hlist_node); | ||
226 | kfree(rem_info); | ||
227 | } | ||
228 | } | ||
229 | /* free the hash list */ | ||
230 | kfree(iwpm_reminfo_bucket); | ||
231 | iwpm_reminfo_bucket = NULL; | ||
232 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
233 | } | ||
234 | |||
235 | static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *, | ||
236 | struct sockaddr_storage *); | ||
237 | |||
238 | void iwpm_add_remote_info(struct iwpm_remote_info *rem_info) | ||
239 | { | ||
240 | struct hlist_head *hash_bucket_head; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
244 | if (iwpm_reminfo_bucket) { | ||
245 | hash_bucket_head = get_reminfo_hash_bucket( | ||
246 | &rem_info->mapped_loc_sockaddr, | ||
247 | &rem_info->mapped_rem_sockaddr); | ||
248 | if (hash_bucket_head) | ||
249 | hlist_add_head(&rem_info->hlist_node, hash_bucket_head); | ||
250 | } | ||
251 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
252 | } | ||
253 | |||
254 | int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, | ||
255 | struct sockaddr_storage *mapped_rem_addr, | ||
256 | struct sockaddr_storage *remote_addr, | ||
257 | u8 nl_client) | ||
258 | { | ||
259 | struct hlist_node *tmp_hlist_node; | ||
260 | struct hlist_head *hash_bucket_head; | ||
261 | struct iwpm_remote_info *rem_info = NULL; | ||
262 | unsigned long flags; | ||
263 | int ret = -EINVAL; | ||
264 | |||
265 | if (!iwpm_valid_client(nl_client)) { | ||
266 | pr_info("%s: Invalid client = %d\n", __func__, nl_client); | ||
267 | return ret; | ||
268 | } | ||
269 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
270 | if (iwpm_reminfo_bucket) { | ||
271 | hash_bucket_head = get_reminfo_hash_bucket( | ||
272 | mapped_loc_addr, | ||
273 | mapped_rem_addr); | ||
274 | if (!hash_bucket_head) | ||
275 | goto get_remote_info_exit; | ||
276 | hlist_for_each_entry_safe(rem_info, tmp_hlist_node, | ||
277 | hash_bucket_head, hlist_node) { | ||
278 | |||
279 | if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr, | ||
280 | mapped_loc_addr) && | ||
281 | !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr, | ||
282 | mapped_rem_addr)) { | ||
283 | |||
284 | memcpy(remote_addr, &rem_info->remote_sockaddr, | ||
285 | sizeof(struct sockaddr_storage)); | ||
286 | iwpm_print_sockaddr(remote_addr, | ||
287 | "get_remote_info: Remote sockaddr:"); | ||
288 | |||
289 | hlist_del_init(&rem_info->hlist_node); | ||
290 | kfree(rem_info); | ||
291 | ret = 0; | ||
292 | break; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | get_remote_info_exit: | ||
297 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
298 | return ret; | ||
299 | } | ||
300 | EXPORT_SYMBOL(iwpm_get_remote_info); | ||
301 | |||
183 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, | 302 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, |
184 | u8 nl_client, gfp_t gfp) | 303 | u8 nl_client, gfp_t gfp) |
185 | { | 304 | { |
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) | |||
409 | return hash; | 528 | return hash; |
410 | } | 529 | } |
411 | 530 | ||
412 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage | 531 | static int get_hash_bucket(struct sockaddr_storage *a_sockaddr, |
413 | *local_sockaddr, | 532 | struct sockaddr_storage *b_sockaddr, u32 *hash) |
414 | struct sockaddr_storage | ||
415 | *mapped_sockaddr) | ||
416 | { | 533 | { |
417 | u32 local_hash, mapped_hash, hash; | 534 | u32 a_hash, b_hash; |
418 | 535 | ||
419 | if (local_sockaddr->ss_family == AF_INET) { | 536 | if (a_sockaddr->ss_family == AF_INET) { |
420 | local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); | 537 | a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr); |
421 | mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); | 538 | b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr); |
422 | 539 | ||
423 | } else if (local_sockaddr->ss_family == AF_INET6) { | 540 | } else if (a_sockaddr->ss_family == AF_INET6) { |
424 | local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); | 541 | a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr); |
425 | mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); | 542 | b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr); |
426 | } else { | 543 | } else { |
427 | pr_err("%s: Invalid sockaddr family\n", __func__); | 544 | pr_err("%s: Invalid sockaddr family\n", __func__); |
428 | return NULL; | 545 | return -EINVAL; |
429 | } | 546 | } |
430 | 547 | ||
431 | if (local_hash == mapped_hash) /* if port mapper isn't available */ | 548 | if (a_hash == b_hash) /* if port mapper isn't available */ |
432 | hash = local_hash; | 549 | *hash = a_hash; |
433 | else | 550 | else |
434 | hash = jhash_2words(local_hash, mapped_hash, 0); | 551 | *hash = jhash_2words(a_hash, b_hash, 0); |
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage | ||
556 | *local_sockaddr, struct sockaddr_storage | ||
557 | *mapped_sockaddr) | ||
558 | { | ||
559 | u32 hash; | ||
560 | int ret; | ||
435 | 561 | ||
436 | return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; | 562 | ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash); |
563 | if (ret) | ||
564 | return NULL; | ||
565 | return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK]; | ||
566 | } | ||
567 | |||
568 | static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage | ||
569 | *mapped_loc_sockaddr, struct sockaddr_storage | ||
570 | *mapped_rem_sockaddr) | ||
571 | { | ||
572 | u32 hash; | ||
573 | int ret; | ||
574 | |||
575 | ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash); | ||
576 | if (ret) | ||
577 | return NULL; | ||
578 | return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK]; | ||
437 | } | 579 | } |
438 | 580 | ||
439 | static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | 581 | static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) |
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) | |||
512 | } | 654 | } |
513 | skb_num++; | 655 | skb_num++; |
514 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 656 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
515 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 657 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
516 | hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], | 658 | hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], |
517 | hlist_node) { | 659 | hlist_node) { |
518 | if (map_info->nl_client != nl_client) | 660 | if (map_info->nl_client != nl_client) |
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void) | |||
595 | 737 | ||
596 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 738 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
597 | if (iwpm_hash_bucket) { | 739 | if (iwpm_hash_bucket) { |
598 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 740 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
599 | if (!hlist_empty(&iwpm_hash_bucket[i])) { | 741 | if (!hlist_empty(&iwpm_hash_bucket[i])) { |
600 | full_bucket = 1; | 742 | full_bucket = 1; |
601 | break; | 743 | break; |
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h index 9777c869a140..ee2d9ff095be 100644 --- a/drivers/infiniband/core/iwpm_util.h +++ b/drivers/infiniband/core/iwpm_util.h | |||
@@ -76,6 +76,14 @@ struct iwpm_mapping_info { | |||
76 | u8 nl_client; | 76 | u8 nl_client; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct iwpm_remote_info { | ||
80 | struct hlist_node hlist_node; | ||
81 | struct sockaddr_storage remote_sockaddr; | ||
82 | struct sockaddr_storage mapped_loc_sockaddr; | ||
83 | struct sockaddr_storage mapped_rem_sockaddr; | ||
84 | u8 nl_client; | ||
85 | }; | ||
86 | |||
79 | struct iwpm_admin_data { | 87 | struct iwpm_admin_data { |
80 | atomic_t refcount; | 88 | atomic_t refcount; |
81 | atomic_t nlmsg_seq; | 89 | atomic_t nlmsg_seq; |
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request); | |||
128 | int iwpm_get_nlmsg_seq(void); | 136 | int iwpm_get_nlmsg_seq(void); |
129 | 137 | ||
130 | /** | 138 | /** |
139 | * iwpm_add_reminfo - Add remote address info of the connecting peer | ||
140 | * to the remote info hash table | ||
141 | * @reminfo: The remote info to be added | ||
142 | */ | ||
143 | void iwpm_add_remote_info(struct iwpm_remote_info *reminfo); | ||
144 | |||
145 | /** | ||
131 | * iwpm_valid_client - Check if the port mapper client is valid | 146 | * iwpm_valid_client - Check if the port mapper client is valid |
132 | * @nl_client: The index of the netlink client | 147 | * @nl_client: The index of the netlink client |
133 | * | 148 | * |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8b8cc6fa0ab0..40becdb3196e 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page( | |||
446 | int remove_existing_mapping = 0; | 446 | int remove_existing_mapping = 0; |
447 | int ret = 0; | 447 | int ret = 0; |
448 | 448 | ||
449 | mutex_lock(&umem->odp_data->umem_mutex); | ||
450 | /* | 449 | /* |
451 | * Note: we avoid writing if seq is different from the initial seq, to | 450 | * Note: we avoid writing if seq is different from the initial seq, to |
452 | * handle case of a racing notifier. This check also allows us to bail | 451 | * handle case of a racing notifier. This check also allows us to bail |
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page( | |||
479 | } | 478 | } |
480 | 479 | ||
481 | out: | 480 | out: |
482 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
483 | |||
484 | /* On Demand Paging - avoid pinning the page */ | 481 | /* On Demand Paging - avoid pinning the page */ |
485 | if (umem->context->invalidate_range || !stored_page) | 482 | if (umem->context->invalidate_range || !stored_page) |
486 | put_page(page); | 483 | put_page(page); |
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
586 | 583 | ||
587 | bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); | 584 | bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); |
588 | user_virt += npages << PAGE_SHIFT; | 585 | user_virt += npages << PAGE_SHIFT; |
586 | mutex_lock(&umem->odp_data->umem_mutex); | ||
589 | for (j = 0; j < npages; ++j) { | 587 | for (j = 0; j < npages; ++j) { |
590 | ret = ib_umem_odp_map_dma_single_page( | 588 | ret = ib_umem_odp_map_dma_single_page( |
591 | umem, k, base_virt_addr, local_page_list[j], | 589 | umem, k, base_virt_addr, local_page_list[j], |
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
594 | break; | 592 | break; |
595 | k++; | 593 | k++; |
596 | } | 594 | } |
595 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
597 | 596 | ||
598 | if (ret < 0) { | 597 | if (ret < 0) { |
599 | /* Release left over pages when handling errors. */ | 598 | /* Release left over pages when handling errors. */ |
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
633 | * faults from completion. We might be racing with other | 632 | * faults from completion. We might be racing with other |
634 | * invalidations, so we must make sure we free each page only | 633 | * invalidations, so we must make sure we free each page only |
635 | * once. */ | 634 | * once. */ |
635 | mutex_lock(&umem->odp_data->umem_mutex); | ||
636 | for (addr = virt; addr < bound; addr += (u64)umem->page_size) { | 636 | for (addr = virt; addr < bound; addr += (u64)umem->page_size) { |
637 | idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; | 637 | idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; |
638 | mutex_lock(&umem->odp_data->umem_mutex); | ||
639 | if (umem->odp_data->page_list[idx]) { | 638 | if (umem->odp_data->page_list[idx]) { |
640 | struct page *page = umem->odp_data->page_list[idx]; | 639 | struct page *page = umem->odp_data->page_list[idx]; |
641 | struct page *head_page = compound_head(page); | ||
642 | dma_addr_t dma = umem->odp_data->dma_list[idx]; | 640 | dma_addr_t dma = umem->odp_data->dma_list[idx]; |
643 | dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; | 641 | dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; |
644 | 642 | ||
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
646 | 644 | ||
647 | ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, | 645 | ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, |
648 | DMA_BIDIRECTIONAL); | 646 | DMA_BIDIRECTIONAL); |
649 | if (dma & ODP_WRITE_ALLOWED_BIT) | 647 | if (dma & ODP_WRITE_ALLOWED_BIT) { |
648 | struct page *head_page = compound_head(page); | ||
650 | /* | 649 | /* |
651 | * set_page_dirty prefers being called with | 650 | * set_page_dirty prefers being called with |
652 | * the page lock. However, MMU notifiers are | 651 | * the page lock. However, MMU notifiers are |
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
657 | * be removed. | 656 | * be removed. |
658 | */ | 657 | */ |
659 | set_page_dirty(head_page); | 658 | set_page_dirty(head_page); |
659 | } | ||
660 | /* on demand pinning support */ | 660 | /* on demand pinning support */ |
661 | if (!umem->context->invalidate_range) | 661 | if (!umem->context->invalidate_range) |
662 | put_page(page); | 662 | put_page(page); |
663 | umem->odp_data->page_list[idx] = NULL; | 663 | umem->odp_data->page_list[idx] = NULL; |
664 | umem->odp_data->dma_list[idx] = 0; | 664 | umem->odp_data->dma_list[idx] = 0; |
665 | } | 665 | } |
666 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
667 | } | 666 | } |
667 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
668 | } | 668 | } |
669 | EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); | 669 | EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 57176ddd4c50..bb95a6c0477b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, | |||
583 | sizeof(ep->com.mapped_remote_addr)); | 583 | sizeof(ep->com.mapped_remote_addr)); |
584 | } | 584 | } |
585 | 585 | ||
586 | static int get_remote_addr(struct c4iw_ep *ep) | ||
587 | { | ||
588 | int ret; | ||
589 | |||
590 | print_addr(&ep->com, __func__, "get_remote_addr"); | ||
591 | |||
592 | ret = iwpm_get_remote_info(&ep->com.mapped_local_addr, | ||
593 | &ep->com.mapped_remote_addr, | ||
594 | &ep->com.remote_addr, RDMA_NL_C4IW); | ||
595 | if (ret) | ||
596 | pr_info(MOD "Unable to find remote peer addr info - err %d\n", | ||
597 | ret); | ||
598 | |||
599 | return ret; | ||
600 | } | ||
601 | |||
586 | static void best_mtu(const unsigned short *mtus, unsigned short mtu, | 602 | static void best_mtu(const unsigned short *mtus, unsigned short mtu, |
587 | unsigned int *idx, int use_ts, int ipv6) | 603 | unsigned int *idx, int use_ts, int ipv6) |
588 | { | 604 | { |
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep) | |||
675 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | 691 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { |
676 | opt2 |= T5_OPT_2_VALID_F; | 692 | opt2 |= T5_OPT_2_VALID_F; |
677 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); | 693 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); |
678 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | 694 | opt2 |= T5_ISS_F; |
679 | } | 695 | } |
680 | t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); | 696 | t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); |
681 | 697 | ||
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2042 | status, status2errno(status)); | 2058 | status, status2errno(status)); |
2043 | 2059 | ||
2044 | if (is_neg_adv(status)) { | 2060 | if (is_neg_adv(status)) { |
2045 | dev_warn(&dev->rdev.lldi.pdev->dev, | 2061 | PDBG("%s Connection problems for atid %u status %u (%s)\n", |
2046 | "Connection problems for atid %u status %u (%s)\n", | 2062 | __func__, atid, status, neg_adv_str(status)); |
2047 | atid, status, neg_adv_str(status)); | 2063 | ep->stats.connect_neg_adv++; |
2064 | mutex_lock(&dev->rdev.stats.lock); | ||
2065 | dev->rdev.stats.neg_adv++; | ||
2066 | mutex_unlock(&dev->rdev.stats.lock); | ||
2048 | return 0; | 2067 | return 0; |
2049 | } | 2068 | } |
2050 | 2069 | ||
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2214 | u32 isn = (prandom_u32() & ~7UL) - 1; | 2233 | u32 isn = (prandom_u32() & ~7UL) - 1; |
2215 | opt2 |= T5_OPT_2_VALID_F; | 2234 | opt2 |= T5_OPT_2_VALID_F; |
2216 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); | 2235 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); |
2217 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | 2236 | opt2 |= T5_ISS_F; |
2218 | rpl5 = (void *)rpl; | 2237 | rpl5 = (void *)rpl; |
2219 | memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); | 2238 | memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); |
2220 | if (peer2peer) | 2239 | if (peer2peer) |
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2352 | state_set(&child_ep->com, CONNECTING); | 2371 | state_set(&child_ep->com, CONNECTING); |
2353 | child_ep->com.dev = dev; | 2372 | child_ep->com.dev = dev; |
2354 | child_ep->com.cm_id = NULL; | 2373 | child_ep->com.cm_id = NULL; |
2374 | |||
2375 | /* | ||
2376 | * The mapped_local and mapped_remote addresses get setup with | ||
2377 | * the actual 4-tuple. The local address will be based on the | ||
2378 | * actual local address of the connection, but on the port number | ||
2379 | * of the parent listening endpoint. The remote address is | ||
2380 | * setup based on a query to the IWPM since we don't know what it | ||
2381 | * originally was before mapping. If no mapping was done, then | ||
2382 | * mapped_remote == remote, and mapped_local == local. | ||
2383 | */ | ||
2355 | if (iptype == 4) { | 2384 | if (iptype == 4) { |
2356 | struct sockaddr_in *sin = (struct sockaddr_in *) | 2385 | struct sockaddr_in *sin = (struct sockaddr_in *) |
2357 | &child_ep->com.local_addr; | 2386 | &child_ep->com.mapped_local_addr; |
2387 | |||
2358 | sin->sin_family = PF_INET; | 2388 | sin->sin_family = PF_INET; |
2359 | sin->sin_port = local_port; | 2389 | sin->sin_port = local_port; |
2360 | sin->sin_addr.s_addr = *(__be32 *)local_ip; | 2390 | sin->sin_addr.s_addr = *(__be32 *)local_ip; |
2361 | sin = (struct sockaddr_in *)&child_ep->com.remote_addr; | 2391 | |
2392 | sin = (struct sockaddr_in *)&child_ep->com.local_addr; | ||
2393 | sin->sin_family = PF_INET; | ||
2394 | sin->sin_port = ((struct sockaddr_in *) | ||
2395 | &parent_ep->com.local_addr)->sin_port; | ||
2396 | sin->sin_addr.s_addr = *(__be32 *)local_ip; | ||
2397 | |||
2398 | sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr; | ||
2362 | sin->sin_family = PF_INET; | 2399 | sin->sin_family = PF_INET; |
2363 | sin->sin_port = peer_port; | 2400 | sin->sin_port = peer_port; |
2364 | sin->sin_addr.s_addr = *(__be32 *)peer_ip; | 2401 | sin->sin_addr.s_addr = *(__be32 *)peer_ip; |
2365 | } else { | 2402 | } else { |
2366 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) | 2403 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) |
2367 | &child_ep->com.local_addr; | 2404 | &child_ep->com.mapped_local_addr; |
2405 | |||
2368 | sin6->sin6_family = PF_INET6; | 2406 | sin6->sin6_family = PF_INET6; |
2369 | sin6->sin6_port = local_port; | 2407 | sin6->sin6_port = local_port; |
2370 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); | 2408 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); |
2371 | sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; | 2409 | |
2410 | sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; | ||
2411 | sin6->sin6_family = PF_INET6; | ||
2412 | sin6->sin6_port = ((struct sockaddr_in6 *) | ||
2413 | &parent_ep->com.local_addr)->sin6_port; | ||
2414 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); | ||
2415 | |||
2416 | sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr; | ||
2372 | sin6->sin6_family = PF_INET6; | 2417 | sin6->sin6_family = PF_INET6; |
2373 | sin6->sin6_port = peer_port; | 2418 | sin6->sin6_port = peer_port; |
2374 | memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); | 2419 | memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); |
2375 | } | 2420 | } |
2421 | memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr, | ||
2422 | sizeof(child_ep->com.remote_addr)); | ||
2423 | get_remote_addr(child_ep); | ||
2424 | |||
2376 | c4iw_get_ep(&parent_ep->com); | 2425 | c4iw_get_ep(&parent_ep->com); |
2377 | child_ep->parent_ep = parent_ep; | 2426 | child_ep->parent_ep = parent_ep; |
2378 | child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); | 2427 | child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2520 | 2569 | ||
2521 | ep = lookup_tid(t, tid); | 2570 | ep = lookup_tid(t, tid); |
2522 | if (is_neg_adv(req->status)) { | 2571 | if (is_neg_adv(req->status)) { |
2523 | dev_warn(&dev->rdev.lldi.pdev->dev, | 2572 | PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", |
2524 | "Negative advice on abort - tid %u status %d (%s)\n", | 2573 | __func__, ep->hwtid, req->status, |
2525 | ep->hwtid, req->status, neg_adv_str(req->status)); | 2574 | neg_adv_str(req->status)); |
2575 | ep->stats.abort_neg_adv++; | ||
2576 | mutex_lock(&dev->rdev.stats.lock); | ||
2577 | dev->rdev.stats.neg_adv++; | ||
2578 | mutex_unlock(&dev->rdev.stats.lock); | ||
2526 | return 0; | 2579 | return 0; |
2527 | } | 2580 | } |
2528 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | 2581 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
3571 | * TP will ignore any value > 0 for MSS index. | 3624 | * TP will ignore any value > 0 for MSS index. |
3572 | */ | 3625 | */ |
3573 | req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); | 3626 | req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); |
3574 | req->cookie = (unsigned long)skb; | 3627 | req->cookie = (uintptr_t)skb; |
3575 | 3628 | ||
3576 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | 3629 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); |
3577 | ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | 3630 | ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); |
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3931 | return 0; | 3984 | return 0; |
3932 | } | 3985 | } |
3933 | if (is_neg_adv(req->status)) { | 3986 | if (is_neg_adv(req->status)) { |
3934 | dev_warn(&dev->rdev.lldi.pdev->dev, | 3987 | PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", |
3935 | "Negative advice on abort - tid %u status %d (%s)\n", | 3988 | __func__, ep->hwtid, req->status, |
3936 | ep->hwtid, req->status, neg_adv_str(req->status)); | 3989 | neg_adv_str(req->status)); |
3990 | ep->stats.abort_neg_adv++; | ||
3991 | dev->rdev.stats.neg_adv++; | ||
3937 | kfree_skb(skb); | 3992 | kfree_skb(skb); |
3938 | return 0; | 3993 | return 0; |
3939 | } | 3994 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ab7692ac2044..68ddb3710215 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
55 | FW_RI_RES_WR_NRES_V(1) | | 55 | FW_RI_RES_WR_NRES_V(1) | |
56 | FW_WR_COMPL_F); | 56 | FW_WR_COMPL_F); |
57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
58 | res_wr->cookie = (unsigned long) &wr_wait; | 58 | res_wr->cookie = (uintptr_t)&wr_wait; |
59 | res = res_wr->res; | 59 | res = res_wr->res; |
60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
61 | res->u.cq.op = FW_RI_RES_OP_RESET; | 61 | res->u.cq.op = FW_RI_RES_OP_RESET; |
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
125 | FW_RI_RES_WR_NRES_V(1) | | 125 | FW_RI_RES_WR_NRES_V(1) | |
126 | FW_WR_COMPL_F); | 126 | FW_WR_COMPL_F); |
127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
128 | res_wr->cookie = (unsigned long) &wr_wait; | 128 | res_wr->cookie = (uintptr_t)&wr_wait; |
129 | res = res_wr->res; | 129 | res = res_wr->res; |
130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
131 | res->u.cq.op = FW_RI_RES_OP_WRITE; | 131 | res->u.cq.op = FW_RI_RES_OP_WRITE; |
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
156 | goto err4; | 156 | goto err4; |
157 | 157 | ||
158 | cq->gen = 1; | 158 | cq->gen = 1; |
159 | cq->gts = rdev->lldi.gts_reg; | ||
160 | cq->rdev = rdev; | 159 | cq->rdev = rdev; |
161 | if (user) { | 160 | if (user) { |
162 | cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 161 | u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK; |
163 | (cq->cqid << rdev->cqshift); | 162 | |
164 | cq->ugts &= PAGE_MASK; | 163 | cq->ugts = (u64)rdev->bar2_pa + off; |
164 | } else if (is_t4(rdev->lldi.adapter_type)) { | ||
165 | cq->gts = rdev->lldi.gts_reg; | ||
166 | cq->qid_mask = -1U; | ||
167 | } else { | ||
168 | u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12; | ||
169 | |||
170 | cq->gts = rdev->bar2_kva + off; | ||
171 | cq->qid_mask = rdev->qpmask; | ||
165 | } | 172 | } |
166 | return 0; | 173 | return 0; |
167 | err4: | 174 | err4: |
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
970 | } | 977 | } |
971 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", | 978 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", |
972 | __func__, chp->cq.cqid, chp, chp->cq.size, | 979 | __func__, chp->cq.cqid, chp, chp->cq.size, |
973 | chp->cq.memsize, | 980 | chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); |
974 | (unsigned long long) chp->cq.dma_addr); | ||
975 | return &chp->ibcq; | 981 | return &chp->ibcq; |
976 | err5: | 982 | err5: |
977 | kfree(mm2); | 983 | kfree(mm2); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 8fb295e4a9ab..cf54d6922dc4 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = { | |||
93 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | 93 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
94 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | 94 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
95 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | 95 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, |
96 | [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, | ||
96 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | 97 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, |
97 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | 98 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} |
98 | }; | 99 | }; |
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v) | |||
151 | int prev_ts_set = 0; | 152 | int prev_ts_set = 0; |
152 | int idx, end; | 153 | int idx, end; |
153 | 154 | ||
154 | #define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) | 155 | #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) |
155 | 156 | ||
156 | idx = atomic_read(&dev->rdev.wr_log_idx) & | 157 | idx = atomic_read(&dev->rdev.wr_log_idx) & |
157 | (dev->rdev.wr_log_size - 1); | 158 | (dev->rdev.wr_log_size - 1); |
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v) | |||
489 | dev->rdev.stats.act_ofld_conn_fails); | 490 | dev->rdev.stats.act_ofld_conn_fails); |
490 | seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", | 491 | seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", |
491 | dev->rdev.stats.pas_ofld_conn_fails); | 492 | dev->rdev.stats.pas_ofld_conn_fails); |
493 | seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); | ||
492 | seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); | 494 | seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); |
493 | return 0; | 495 | return 0; |
494 | } | 496 | } |
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data) | |||
560 | cc = snprintf(epd->buf + epd->pos, space, | 562 | cc = snprintf(epd->buf + epd->pos, space, |
561 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 563 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
562 | "history 0x%lx hwtid %d atid %d " | 564 | "history 0x%lx hwtid %d atid %d " |
565 | "conn_na %u abort_na %u " | ||
563 | "%pI4:%d/%d <-> %pI4:%d/%d\n", | 566 | "%pI4:%d/%d <-> %pI4:%d/%d\n", |
564 | ep, ep->com.cm_id, ep->com.qp, | 567 | ep, ep->com.cm_id, ep->com.qp, |
565 | (int)ep->com.state, ep->com.flags, | 568 | (int)ep->com.state, ep->com.flags, |
566 | ep->com.history, ep->hwtid, ep->atid, | 569 | ep->com.history, ep->hwtid, ep->atid, |
570 | ep->stats.connect_neg_adv, | ||
571 | ep->stats.abort_neg_adv, | ||
567 | &lsin->sin_addr, ntohs(lsin->sin_port), | 572 | &lsin->sin_addr, ntohs(lsin->sin_port), |
568 | ntohs(mapped_lsin->sin_port), | 573 | ntohs(mapped_lsin->sin_port), |
569 | &rsin->sin_addr, ntohs(rsin->sin_port), | 574 | &rsin->sin_addr, ntohs(rsin->sin_port), |
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data) | |||
581 | cc = snprintf(epd->buf + epd->pos, space, | 586 | cc = snprintf(epd->buf + epd->pos, space, |
582 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 587 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
583 | "history 0x%lx hwtid %d atid %d " | 588 | "history 0x%lx hwtid %d atid %d " |
589 | "conn_na %u abort_na %u " | ||
584 | "%pI6:%d/%d <-> %pI6:%d/%d\n", | 590 | "%pI6:%d/%d <-> %pI6:%d/%d\n", |
585 | ep, ep->com.cm_id, ep->com.qp, | 591 | ep, ep->com.cm_id, ep->com.qp, |
586 | (int)ep->com.state, ep->com.flags, | 592 | (int)ep->com.state, ep->com.flags, |
587 | ep->com.history, ep->hwtid, ep->atid, | 593 | ep->com.history, ep->hwtid, ep->atid, |
594 | ep->stats.connect_neg_adv, | ||
595 | ep->stats.abort_neg_adv, | ||
588 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), | 596 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), |
589 | ntohs(mapped_lsin6->sin6_port), | 597 | ntohs(mapped_lsin6->sin6_port), |
590 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port), | 598 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port), |
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
765 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); | 773 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); |
766 | 774 | ||
767 | /* | 775 | /* |
776 | * This implementation assumes udb_density == ucq_density! Eventually | ||
777 | * we might need to support this but for now fail the open. Also the | ||
778 | * cqid and qpid range must match for now. | ||
779 | */ | ||
780 | if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { | ||
781 | pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", | ||
782 | pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, | ||
783 | rdev->lldi.ucq_density); | ||
784 | err = -EINVAL; | ||
785 | goto err1; | ||
786 | } | ||
787 | if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || | ||
788 | rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { | ||
789 | pr_err(MOD "%s: unsupported qp and cq id ranges " | ||
790 | "qp start %u size %u cq start %u size %u\n", | ||
791 | pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, | ||
792 | rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, | ||
793 | rdev->lldi.vr->cq.size); | ||
794 | err = -EINVAL; | ||
795 | goto err1; | ||
796 | } | ||
797 | |||
798 | /* | ||
768 | * qpshift is the number of bits to shift the qpid left in order | 799 | * qpshift is the number of bits to shift the qpid left in order |
769 | * to get the correct address of the doorbell for that qp. | 800 | * to get the correct address of the doorbell for that qp. |
770 | */ | 801 | */ |
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
784 | rdev->lldi.vr->qp.size, | 815 | rdev->lldi.vr->qp.size, |
785 | rdev->lldi.vr->cq.start, | 816 | rdev->lldi.vr->cq.start, |
786 | rdev->lldi.vr->cq.size); | 817 | rdev->lldi.vr->cq.size); |
787 | PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " | 818 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " |
788 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", | 819 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", |
789 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), | 820 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), |
790 | (u64)pci_resource_start(rdev->lldi.pdev, 2), | 821 | (void *)pci_resource_start(rdev->lldi.pdev, 2), |
791 | rdev->lldi.db_reg, | 822 | rdev->lldi.db_reg, |
792 | rdev->lldi.gts_reg, | 823 | rdev->lldi.gts_reg, |
793 | rdev->qpshift, rdev->qpmask, | 824 | rdev->qpshift, rdev->qpmask, |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d87e1650f643..97bb5550a6cf 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -137,6 +137,7 @@ struct c4iw_stats { | |||
137 | u64 tcam_full; | 137 | u64 tcam_full; |
138 | u64 act_ofld_conn_fails; | 138 | u64 act_ofld_conn_fails; |
139 | u64 pas_ofld_conn_fails; | 139 | u64 pas_ofld_conn_fails; |
140 | u64 neg_adv; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | struct c4iw_hw_queue { | 143 | struct c4iw_hw_queue { |
@@ -814,6 +815,11 @@ struct c4iw_listen_ep { | |||
814 | int backlog; | 815 | int backlog; |
815 | }; | 816 | }; |
816 | 817 | ||
818 | struct c4iw_ep_stats { | ||
819 | unsigned connect_neg_adv; | ||
820 | unsigned abort_neg_adv; | ||
821 | }; | ||
822 | |||
817 | struct c4iw_ep { | 823 | struct c4iw_ep { |
818 | struct c4iw_ep_common com; | 824 | struct c4iw_ep_common com; |
819 | struct c4iw_ep *parent_ep; | 825 | struct c4iw_ep *parent_ep; |
@@ -846,6 +852,7 @@ struct c4iw_ep { | |||
846 | unsigned int retry_count; | 852 | unsigned int retry_count; |
847 | int snd_win; | 853 | int snd_win; |
848 | int rcv_win; | 854 | int rcv_win; |
855 | struct c4iw_ep_stats stats; | ||
849 | }; | 856 | }; |
850 | 857 | ||
851 | static inline void print_addr(struct c4iw_ep_common *epc, const char *func, | 858 | static inline void print_addr(struct c4iw_ep_common *epc, const char *func, |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ef0cf9f5c44..cff815b91707 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
144 | if (i == (num_wqe-1)) { | 144 | if (i == (num_wqe-1)) { |
145 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | | 145 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | |
146 | FW_WR_COMPL_F); | 146 | FW_WR_COMPL_F); |
147 | req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; | 147 | req->wr.wr_lo = (__force __be64)&wr_wait; |
148 | } else | 148 | } else |
149 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); | 149 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); |
150 | req->wr.wr_mid = cpu_to_be32( | 150 | req->wr.wr_mid = cpu_to_be32( |
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) | |||
676 | mhp->attr.zbva = 0; | 676 | mhp->attr.zbva = 0; |
677 | mhp->attr.va_fbo = 0; | 677 | mhp->attr.va_fbo = 0; |
678 | mhp->attr.page_size = 0; | 678 | mhp->attr.page_size = 0; |
679 | mhp->attr.len = ~0UL; | 679 | mhp->attr.len = ~0ULL; |
680 | mhp->attr.pbl_size = 0; | 680 | mhp->attr.pbl_size = 0; |
681 | 681 | ||
682 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, | 682 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, |
683 | FW_RI_STAG_NSMR, mhp->attr.perms, | 683 | FW_RI_STAG_NSMR, mhp->attr.perms, |
684 | mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); | 684 | mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); |
685 | if (ret) | 685 | if (ret) |
686 | goto err1; | 686 | goto err1; |
687 | 687 | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 15cae5a31018..389ced335bc5 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
275 | FW_RI_RES_WR_NRES_V(2) | | 275 | FW_RI_RES_WR_NRES_V(2) | |
276 | FW_WR_COMPL_F); | 276 | FW_WR_COMPL_F); |
277 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 277 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
278 | res_wr->cookie = (unsigned long) &wr_wait; | 278 | res_wr->cookie = (uintptr_t)&wr_wait; |
279 | res = res_wr->res; | 279 | res = res_wr->res; |
280 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | 280 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; |
281 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | 281 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; |
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1209 | wqe->flowid_len16 = cpu_to_be32( | 1209 | wqe->flowid_len16 = cpu_to_be32( |
1210 | FW_WR_FLOWID_V(ep->hwtid) | | 1210 | FW_WR_FLOWID_V(ep->hwtid) | |
1211 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | 1211 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); |
1212 | wqe->cookie = (unsigned long) &ep->com.wr_wait; | 1212 | wqe->cookie = (uintptr_t)&ep->com.wr_wait; |
1213 | 1213 | ||
1214 | wqe->u.fini.type = FW_RI_TYPE_FINI; | 1214 | wqe->u.fini.type = FW_RI_TYPE_FINI; |
1215 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1215 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
1279 | FW_WR_FLOWID_V(qhp->ep->hwtid) | | 1279 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1280 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | 1280 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); |
1281 | 1281 | ||
1282 | wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; | 1282 | wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; |
1283 | 1283 | ||
1284 | wqe->u.init.type = FW_RI_TYPE_INIT; | 1284 | wqe->u.init.type = FW_RI_TYPE_INIT; |
1285 | wqe->u.init.mpareqbit_p2ptype = | 1285 | wqe->u.init.mpareqbit_p2ptype = |
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1766 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | 1766 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); |
1767 | insert_mmap(ucontext, mm2); | 1767 | insert_mmap(ucontext, mm2); |
1768 | mm3->key = uresp.sq_db_gts_key; | 1768 | mm3->key = uresp.sq_db_gts_key; |
1769 | mm3->addr = (__force unsigned long) qhp->wq.sq.udb; | 1769 | mm3->addr = (__force unsigned long)qhp->wq.sq.udb; |
1770 | mm3->len = PAGE_SIZE; | 1770 | mm3->len = PAGE_SIZE; |
1771 | insert_mmap(ucontext, mm3); | 1771 | insert_mmap(ucontext, mm3); |
1772 | mm4->key = uresp.rq_db_gts_key; | 1772 | mm4->key = uresp.rq_db_gts_key; |
1773 | mm4->addr = (__force unsigned long) qhp->wq.rq.udb; | 1773 | mm4->addr = (__force unsigned long)qhp->wq.rq.udb; |
1774 | mm4->len = PAGE_SIZE; | 1774 | mm4->len = PAGE_SIZE; |
1775 | insert_mmap(ucontext, mm4); | 1775 | insert_mmap(ucontext, mm4); |
1776 | if (mm5) { | 1776 | if (mm5) { |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 871cdcac7be2..7f2a6c244d25 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -539,6 +539,7 @@ struct t4_cq { | |||
539 | size_t memsize; | 539 | size_t memsize; |
540 | __be64 bits_type_ts; | 540 | __be64 bits_type_ts; |
541 | u32 cqid; | 541 | u32 cqid; |
542 | u32 qid_mask; | ||
542 | int vector; | 543 | int vector; |
543 | u16 size; /* including status page */ | 544 | u16 size; /* including status page */ |
544 | u16 cidx; | 545 | u16 cidx; |
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) | |||
563 | set_bit(CQ_ARMED, &cq->flags); | 564 | set_bit(CQ_ARMED, &cq->flags); |
564 | while (cq->cidx_inc > CIDXINC_M) { | 565 | while (cq->cidx_inc > CIDXINC_M) { |
565 | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | | 566 | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | |
566 | INGRESSQID_V(cq->cqid); | 567 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
567 | writel(val, cq->gts); | 568 | writel(val, cq->gts); |
568 | cq->cidx_inc -= CIDXINC_M; | 569 | cq->cidx_inc -= CIDXINC_M; |
569 | } | 570 | } |
570 | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | | 571 | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | |
571 | INGRESSQID_V(cq->cqid); | 572 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
572 | writel(val, cq->gts); | 573 | writel(val, cq->gts); |
573 | cq->cidx_inc = 0; | 574 | cq->cidx_inc = 0; |
574 | return 0; | 575 | return 0; |
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) | |||
601 | u32 val; | 602 | u32 val; |
602 | 603 | ||
603 | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | | 604 | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | |
604 | INGRESSQID_V(cq->cqid); | 605 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
605 | writel(val, cq->gts); | 606 | writel(val, cq->gts); |
606 | cq->cidx_inc = 0; | 607 | cq->cidx_inc = 0; |
607 | } | 608 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 5e53327fc647..343e8daf2270 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */ | |||
848 | #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) | 848 | #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) |
849 | #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) | 849 | #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) |
850 | 850 | ||
851 | #define CONG_CNTRL_VALID (1 << 18) | 851 | #define T5_ISS_S 18 |
852 | #define T5_ISS_V(x) ((x) << T5_ISS_S) | ||
853 | #define T5_ISS_F T5_ISS_V(1U) | ||
852 | 854 | ||
853 | #endif /* _T4FW_RI_API_H_ */ | 855 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3b2a6dc8ea99..9f9d5c563a61 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = { | |||
116 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, | 116 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, |
117 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | 117 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
118 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | 118 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
119 | [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, | ||
119 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | 120 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, |
120 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | 121 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, |
121 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | 122 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 6f09a72e78d7..72b43417cbe3 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic, | |||
596 | memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); | 596 | memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); |
597 | } | 597 | } |
598 | 598 | ||
599 | static void record_sockaddr_info(struct sockaddr_storage *addr_info, | ||
600 | nes_addr_t *ip_addr, u16 *port_num) | ||
601 | { | ||
602 | struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info; | ||
603 | |||
604 | if (in_addr->sin_family == AF_INET) { | ||
605 | *ip_addr = ntohl(in_addr->sin_addr.s_addr); | ||
606 | *port_num = ntohs(in_addr->sin_port); | ||
607 | } | ||
608 | } | ||
609 | |||
599 | /* | 610 | /* |
600 | * nes_record_pm_msg - Save the received mapping info | 611 | * nes_record_pm_msg - Save the received mapping info |
601 | */ | 612 | */ |
602 | static void nes_record_pm_msg(struct nes_cm_info *cm_info, | 613 | static void nes_record_pm_msg(struct nes_cm_info *cm_info, |
603 | struct iwpm_sa_data *pm_msg) | 614 | struct iwpm_sa_data *pm_msg) |
604 | { | 615 | { |
605 | struct sockaddr_in *mapped_loc_addr = | 616 | record_sockaddr_info(&pm_msg->mapped_loc_addr, |
606 | (struct sockaddr_in *)&pm_msg->mapped_loc_addr; | 617 | &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port); |
607 | struct sockaddr_in *mapped_rem_addr = | 618 | |
608 | (struct sockaddr_in *)&pm_msg->mapped_rem_addr; | 619 | record_sockaddr_info(&pm_msg->mapped_rem_addr, |
609 | 620 | &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port); | |
610 | if (mapped_loc_addr->sin_family == AF_INET) { | 621 | } |
611 | cm_info->mapped_loc_addr = | 622 | |
612 | ntohl(mapped_loc_addr->sin_addr.s_addr); | 623 | /* |
613 | cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); | 624 | * nes_get_reminfo - Get the address info of the remote connecting peer |
614 | } | 625 | */ |
615 | if (mapped_rem_addr->sin_family == AF_INET) { | 626 | static int nes_get_remote_addr(struct nes_cm_node *cm_node) |
616 | cm_info->mapped_rem_addr = | 627 | { |
617 | ntohl(mapped_rem_addr->sin_addr.s_addr); | 628 | struct sockaddr_storage mapped_loc_addr, mapped_rem_addr; |
618 | cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); | 629 | struct sockaddr_storage remote_addr; |
619 | } | 630 | int ret; |
631 | |||
632 | nes_create_sockaddr(htonl(cm_node->mapped_loc_addr), | ||
633 | htons(cm_node->mapped_loc_port), &mapped_loc_addr); | ||
634 | nes_create_sockaddr(htonl(cm_node->mapped_rem_addr), | ||
635 | htons(cm_node->mapped_rem_port), &mapped_rem_addr); | ||
636 | |||
637 | ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr, | ||
638 | &remote_addr, RDMA_NL_NES); | ||
639 | if (ret) | ||
640 | nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n"); | ||
641 | else | ||
642 | record_sockaddr_info(&remote_addr, &cm_node->rem_addr, | ||
643 | &cm_node->rem_port); | ||
644 | return ret; | ||
620 | } | 645 | } |
621 | 646 | ||
622 | /** | 647 | /** |
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1566 | return NULL; | 1591 | return NULL; |
1567 | 1592 | ||
1568 | /* set our node specific transport info */ | 1593 | /* set our node specific transport info */ |
1569 | cm_node->loc_addr = cm_info->loc_addr; | 1594 | if (listener) { |
1595 | cm_node->loc_addr = listener->loc_addr; | ||
1596 | cm_node->loc_port = listener->loc_port; | ||
1597 | } else { | ||
1598 | cm_node->loc_addr = cm_info->loc_addr; | ||
1599 | cm_node->loc_port = cm_info->loc_port; | ||
1600 | } | ||
1570 | cm_node->rem_addr = cm_info->rem_addr; | 1601 | cm_node->rem_addr = cm_info->rem_addr; |
1571 | cm_node->loc_port = cm_info->loc_port; | ||
1572 | cm_node->rem_port = cm_info->rem_port; | 1602 | cm_node->rem_port = cm_info->rem_port; |
1573 | 1603 | ||
1574 | cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; | 1604 | cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; |
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
2151 | cm_node->state = NES_CM_STATE_ESTABLISHED; | 2181 | cm_node->state = NES_CM_STATE_ESTABLISHED; |
2152 | if (datasize) { | 2182 | if (datasize) { |
2153 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 2183 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
2184 | nes_get_remote_addr(cm_node); | ||
2154 | handle_rcv_mpa(cm_node, skb); | 2185 | handle_rcv_mpa(cm_node, skb); |
2155 | } else { /* rcvd ACK only */ | 2186 | } else { /* rcvd ACK only */ |
2156 | dev_kfree_skb_any(skb); | 2187 | dev_kfree_skb_any(skb); |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index ffd48bfc4923..ba5173e24973 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit); | |||
1136 | extern u32 qib_cpulist_count; | 1136 | extern u32 qib_cpulist_count; |
1137 | extern unsigned long *qib_cpulist; | 1137 | extern unsigned long *qib_cpulist; |
1138 | 1138 | ||
1139 | extern unsigned qib_wc_pat; | ||
1140 | extern unsigned qib_cc_table_size; | 1139 | extern unsigned qib_cc_table_size; |
1141 | int qib_init(struct qib_devdata *, int); | 1140 | int qib_init(struct qib_devdata *, int); |
1142 | int init_chip_wc_pat(struct qib_devdata *dd, u32); | 1141 | int init_chip_wc_pat(struct qib_devdata *dd, u32); |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9ea6c440a00c..725881890c4a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, | |||
835 | vma->vm_flags &= ~VM_MAYREAD; | 835 | vma->vm_flags &= ~VM_MAYREAD; |
836 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | 836 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; |
837 | 837 | ||
838 | if (qib_wc_pat) | 838 | /* We used PAT if wc_cookie == 0 */ |
839 | if (!dd->wc_cookie) | ||
839 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 840 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
840 | 841 | ||
841 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | 842 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 0d2ba59af30a..4b927809d1a1 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
3315 | qib_6120_config_ctxts(dd); | 3315 | qib_6120_config_ctxts(dd); |
3316 | qib_set_ctxtcnt(dd); | 3316 | qib_set_ctxtcnt(dd); |
3317 | 3317 | ||
3318 | if (qib_wc_pat) { | 3318 | ret = init_chip_wc_pat(dd, 0); |
3319 | ret = init_chip_wc_pat(dd, 0); | 3319 | if (ret) |
3320 | if (ret) | 3320 | goto bail; |
3321 | goto bail; | ||
3322 | } | ||
3323 | set_6120_baseaddrs(dd); /* set chip access pointers now */ | 3321 | set_6120_baseaddrs(dd); /* set chip access pointers now */ |
3324 | 3322 | ||
3325 | ret = 0; | 3323 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 22affda8af88..00b2af211157 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
4126 | qib_7220_config_ctxts(dd); | 4126 | qib_7220_config_ctxts(dd); |
4127 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ | 4127 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ |
4128 | 4128 | ||
4129 | if (qib_wc_pat) { | 4129 | ret = init_chip_wc_pat(dd, 0); |
4130 | ret = init_chip_wc_pat(dd, 0); | 4130 | if (ret) |
4131 | if (ret) | 4131 | goto bail; |
4132 | goto bail; | ||
4133 | } | ||
4134 | set_7220_baseaddrs(dd); /* set chip access pointers now */ | 4132 | set_7220_baseaddrs(dd); /* set chip access pointers now */ |
4135 | 4133 | ||
4136 | ret = 0; | 4134 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ef97b71c8f7d..f32b4628e991 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6429 | unsigned features, pidx, sbufcnt; | 6429 | unsigned features, pidx, sbufcnt; |
6430 | int ret, mtu; | 6430 | int ret, mtu; |
6431 | u32 sbufs, updthresh; | 6431 | u32 sbufs, updthresh; |
6432 | resource_size_t vl15off; | ||
6432 | 6433 | ||
6433 | /* pport structs are contiguous, allocated after devdata */ | 6434 | /* pport structs are contiguous, allocated after devdata */ |
6434 | ppd = (struct qib_pportdata *)(dd + 1); | 6435 | ppd = (struct qib_pportdata *)(dd + 1); |
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6677 | qib_7322_config_ctxts(dd); | 6678 | qib_7322_config_ctxts(dd); |
6678 | qib_set_ctxtcnt(dd); | 6679 | qib_set_ctxtcnt(dd); |
6679 | 6680 | ||
6680 | if (qib_wc_pat) { | 6681 | /* |
6681 | resource_size_t vl15off; | 6682 | * We do not set WC on the VL15 buffers to avoid |
6682 | /* | 6683 | * a rare problem with unaligned writes from |
6683 | * We do not set WC on the VL15 buffers to avoid | 6684 | * interrupt-flushed store buffers, so we need |
6684 | * a rare problem with unaligned writes from | 6685 | * to map those separately here. We can't solve |
6685 | * interrupt-flushed store buffers, so we need | 6686 | * this for the rarely used mtrr case. |
6686 | * to map those separately here. We can't solve | 6687 | */ |
6687 | * this for the rarely used mtrr case. | 6688 | ret = init_chip_wc_pat(dd, 0); |
6688 | */ | 6689 | if (ret) |
6689 | ret = init_chip_wc_pat(dd, 0); | 6690 | goto bail; |
6690 | if (ret) | ||
6691 | goto bail; | ||
6692 | 6691 | ||
6693 | /* vl15 buffers start just after the 4k buffers */ | 6692 | /* vl15 buffers start just after the 4k buffers */ |
6694 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + | 6693 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + |
6695 | dd->piobcnt4k * dd->align4k; | 6694 | dd->piobcnt4k * dd->align4k; |
6696 | dd->piovl15base = ioremap_nocache(vl15off, | 6695 | dd->piovl15base = ioremap_nocache(vl15off, |
6697 | NUM_VL15_BUFS * dd->align4k); | 6696 | NUM_VL15_BUFS * dd->align4k); |
6698 | if (!dd->piovl15base) { | 6697 | if (!dd->piovl15base) { |
6699 | ret = -ENOMEM; | 6698 | ret = -ENOMEM; |
6700 | goto bail; | 6699 | goto bail; |
6701 | } | ||
6702 | } | 6700 | } |
6701 | |||
6703 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | 6702 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ |
6704 | 6703 | ||
6705 | ret = 0; | 6704 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 2ee36953e234..7e00470adc30 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); | |||
91 | unsigned qib_cc_table_size; | 91 | unsigned qib_cc_table_size; |
92 | module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); | 92 | module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); |
93 | MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); | 93 | MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); |
94 | /* | ||
95 | * qib_wc_pat parameter: | ||
96 | * 0 is WC via MTRR | ||
97 | * 1 is WC via PAT | ||
98 | * If PAT initialization fails, code reverts back to MTRR | ||
99 | */ | ||
100 | unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | ||
101 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | ||
102 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | ||
103 | 94 | ||
104 | static void verify_interrupt(unsigned long); | 95 | static void verify_interrupt(unsigned long); |
105 | 96 | ||
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd) | |||
1377 | spin_unlock(&dd->pport[pidx].cc_shadow_lock); | 1368 | spin_unlock(&dd->pport[pidx].cc_shadow_lock); |
1378 | } | 1369 | } |
1379 | 1370 | ||
1380 | if (!qib_wc_pat) | 1371 | qib_disable_wc(dd); |
1381 | qib_disable_wc(dd); | ||
1382 | 1372 | ||
1383 | if (dd->pioavailregs_dma) { | 1373 | if (dd->pioavailregs_dma) { |
1384 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | 1374 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, |
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1547 | goto bail; | 1537 | goto bail; |
1548 | } | 1538 | } |
1549 | 1539 | ||
1550 | if (!qib_wc_pat) { | 1540 | ret = qib_enable_wc(dd); |
1551 | ret = qib_enable_wc(dd); | 1541 | if (ret) { |
1552 | if (ret) { | 1542 | qib_dev_err(dd, |
1553 | qib_dev_err(dd, | 1543 | "Write combining not enabled (err %d): performance may be poor\n", |
1554 | "Write combining not enabled (err %d): performance may be poor\n", | 1544 | -ret); |
1555 | -ret); | 1545 | ret = 0; |
1556 | ret = 0; | ||
1557 | } | ||
1558 | } | 1546 | } |
1559 | 1547 | ||
1560 | qib_verify_pioperf(dd); | 1548 | qib_verify_pioperf(dd); |
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 81b225f2300a..6d61ef98721c 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c | |||
@@ -116,21 +116,9 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | if (!ret) { | 118 | if (!ret) { |
119 | int cookie; | 119 | dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen); |
120 | 120 | if (dd->wc_cookie < 0) | |
121 | cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); | 121 | ret = -EINVAL; |
122 | if (cookie < 0) { | ||
123 | { | ||
124 | qib_devinfo(dd->pcidev, | ||
125 | "mtrr_add() WC for PIO bufs failed (%d)\n", | ||
126 | cookie); | ||
127 | ret = -EINVAL; | ||
128 | } | ||
129 | } else { | ||
130 | dd->wc_cookie = cookie; | ||
131 | dd->wc_base = (unsigned long) pioaddr; | ||
132 | dd->wc_len = (unsigned long) piolen; | ||
133 | } | ||
134 | } | 122 | } |
135 | 123 | ||
136 | return ret; | 124 | return ret; |
@@ -142,18 +130,7 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
142 | */ | 130 | */ |
143 | void qib_disable_wc(struct qib_devdata *dd) | 131 | void qib_disable_wc(struct qib_devdata *dd) |
144 | { | 132 | { |
145 | if (dd->wc_cookie) { | 133 | arch_phys_wc_del(dd->wc_cookie); |
146 | int r; | ||
147 | |||
148 | r = mtrr_del(dd->wc_cookie, dd->wc_base, | ||
149 | dd->wc_len); | ||
150 | if (r < 0) | ||
151 | qib_devinfo(dd->pcidev, | ||
152 | "mtrr_del(%lx, %lx, %lx) failed: %d\n", | ||
153 | dd->wc_cookie, dd->wc_base, | ||
154 | dd->wc_len, r); | ||
155 | dd->wc_cookie = 0; /* even on failure */ | ||
156 | } | ||
157 | } | 134 | } |
158 | 135 | ||
159 | /** | 136 | /** |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959adb6c7d..cf32a778e7d0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i | |||
386 | rx->rx_ring[i].mapping, | 386 | rx->rx_ring[i].mapping, |
387 | GFP_KERNEL)) { | 387 | GFP_KERNEL)) { |
388 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 388 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
389 | ret = -ENOMEM; | 389 | ret = -ENOMEM; |
390 | goto err_count; | 390 | goto err_count; |
391 | } | 391 | } |
392 | ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); | 392 | ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); |
393 | if (ret) { | 393 | if (ret) { |
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index ce55906b54a0..ac54c27a2bfd 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h | |||
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ | 162 | /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ |
163 | static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) | 163 | static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) |
164 | { | 164 | { |
165 | if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { | 165 | if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { |
166 | struct sockaddr_in *out_in = (struct sockaddr_in *)out; | 166 | struct sockaddr_in *out_in = (struct sockaddr_in *)out; |
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) | |||
173 | out_in->sin6_family = AF_INET6; | 173 | out_in->sin6_family = AF_INET6; |
174 | memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); | 174 | memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); |
175 | } | 175 | } |
176 | return 0; | ||
177 | } | 176 | } |
178 | 177 | ||
179 | static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, | 178 | static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, |
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 0e3ff30647d5..39ed2d2fbd51 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h | |||
@@ -105,7 +105,8 @@ enum ib_cm_data_size { | |||
105 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, | 105 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, |
106 | IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, | 106 | IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, |
107 | IB_CM_SIDR_REP_INFO_LENGTH = 72, | 107 | IB_CM_SIDR_REP_INFO_LENGTH = 72, |
108 | IB_CM_COMPARE_SIZE = 64 | 108 | /* compare done u32 at a time */ |
109 | IB_CM_COMPARE_SIZE = (64 / sizeof(u32)) | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | struct ib_cm_id; | 112 | struct ib_cm_id; |
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id); | |||
337 | #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) | 338 | #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) |
338 | 339 | ||
339 | struct ib_cm_compare_data { | 340 | struct ib_cm_compare_data { |
340 | u8 data[IB_CM_COMPARE_SIZE]; | 341 | u32 data[IB_CM_COMPARE_SIZE]; |
341 | u8 mask[IB_CM_COMPARE_SIZE]; | 342 | u32 mask[IB_CM_COMPARE_SIZE]; |
342 | }; | 343 | }; |
343 | 344 | ||
344 | /** | 345 | /** |
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index 928b2775e992..fda31673a562 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h | |||
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *); | |||
148 | int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); | 148 | int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * iwpm_remote_info_cb - Process remote connecting peer address info, which | ||
152 | * the port mapper has received from the connecting peer | ||
153 | * | ||
154 | * @cb: Contains the received message (payload and netlink header) | ||
155 | * | ||
156 | * Stores the IPv4/IPv6 address info in a hash table | ||
157 | */ | ||
158 | int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *); | ||
159 | |||
160 | /** | ||
151 | * iwpm_mapping_error_cb - Process port mapper notification for error | 161 | * iwpm_mapping_error_cb - Process port mapper notification for error |
152 | * | 162 | * |
153 | * @skb: | 163 | * @skb: |
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | |||
175 | int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | 185 | int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); |
176 | 186 | ||
177 | /** | 187 | /** |
188 | * iwpm_get_remote_info - Get the remote connecting peer address info | ||
189 | * | ||
190 | * @mapped_loc_addr: Mapped local address of the listening peer | ||
191 | * @mapped_rem_addr: Mapped remote address of the connecting peer | ||
192 | * @remote_addr: To store the remote address of the connecting peer | ||
193 | * @nl_client: The index of the netlink client | ||
194 | * | ||
195 | * The remote address info is retrieved and provided to the client in | ||
196 | * the remote_addr. After that it is removed from the hash table | ||
197 | */ | ||
198 | int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, | ||
199 | struct sockaddr_storage *mapped_rem_addr, | ||
200 | struct sockaddr_storage *remote_addr, u8 nl_client); | ||
201 | |||
202 | /** | ||
178 | * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address | 203 | * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address |
179 | * info in a hash table | 204 | * info in a hash table |
180 | * @local_addr: Local ip/tcp address | 205 | * @local_addr: Local ip/tcp address |
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index de69170a30ce..6e4bb4270ca2 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h | |||
@@ -37,6 +37,7 @@ enum { | |||
37 | RDMA_NL_IWPM_ADD_MAPPING, | 37 | RDMA_NL_IWPM_ADD_MAPPING, |
38 | RDMA_NL_IWPM_QUERY_MAPPING, | 38 | RDMA_NL_IWPM_QUERY_MAPPING, |
39 | RDMA_NL_IWPM_REMOVE_MAPPING, | 39 | RDMA_NL_IWPM_REMOVE_MAPPING, |
40 | RDMA_NL_IWPM_REMOTE_INFO, | ||
40 | RDMA_NL_IWPM_HANDLE_ERR, | 41 | RDMA_NL_IWPM_HANDLE_ERR, |
41 | RDMA_NL_IWPM_MAPINFO, | 42 | RDMA_NL_IWPM_MAPINFO, |
42 | RDMA_NL_IWPM_MAPINFO_NUM, | 43 | RDMA_NL_IWPM_MAPINFO_NUM, |