aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/addr.c13
-rw-r--r--drivers/infiniband/core/cm.c25
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c59
-rw-r--r--drivers/infiniband/core/iwpm_msg.c75
-rw-r--r--drivers/infiniband/core/iwpm_util.c208
-rw-r--r--drivers/infiniband/core/iwpm_util.h15
-rw-r--r--drivers/infiniband/core/umem_odp.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c87
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c41
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mcast.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c65
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c12
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c83
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h9
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c12
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c32
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
34 files changed, 643 insertions, 274 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f80da50d84a5..38339d220d7f 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
472 } sgid_addr, dgid_addr; 472 } sgid_addr, dgid_addr;
473 473
474 474
475 ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); 475 rdma_gid2ip(&sgid_addr._sockaddr, sgid);
476 if (ret) 476 rdma_gid2ip(&dgid_addr._sockaddr, dgid);
477 return ret;
478
479 ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
480 if (ret)
481 return ret;
482 477
483 memset(&dev_addr, 0, sizeof(dev_addr)); 478 memset(&dev_addr, 0, sizeof(dev_addr));
484 479
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
512 struct sockaddr_in6 _sockaddr_in6; 507 struct sockaddr_in6 _sockaddr_in6;
513 } gid_addr; 508 } gid_addr;
514 509
515 ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); 510 rdma_gid2ip(&gid_addr._sockaddr, sgid);
516 511
517 if (ret)
518 return ret;
519 memset(&dev_addr, 0, sizeof(dev_addr)); 512 memset(&dev_addr, 0, sizeof(dev_addr));
520 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); 513 ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
521 if (ret) 514 if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e28a494e2a3a..0271608a51c4 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
437 return cm_id_priv; 437 return cm_id_priv;
438} 438}
439 439
440static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) 440static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
441{ 441{
442 int i; 442 int i;
443 443
444 for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) 444 for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
445 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & 445 dst[i] = src[i] & mask[i];
446 ((unsigned long *) mask)[i];
447} 446}
448 447
449static int cm_compare_data(struct ib_cm_compare_data *src_data, 448static int cm_compare_data(struct ib_cm_compare_data *src_data,
450 struct ib_cm_compare_data *dst_data) 449 struct ib_cm_compare_data *dst_data)
451{ 450{
452 u8 src[IB_CM_COMPARE_SIZE]; 451 u32 src[IB_CM_COMPARE_SIZE];
453 u8 dst[IB_CM_COMPARE_SIZE]; 452 u32 dst[IB_CM_COMPARE_SIZE];
454 453
455 if (!src_data || !dst_data) 454 if (!src_data || !dst_data)
456 return 0; 455 return 0;
457 456
458 cm_mask_copy(src, src_data->data, dst_data->mask); 457 cm_mask_copy(src, src_data->data, dst_data->mask);
459 cm_mask_copy(dst, dst_data->data, src_data->mask); 458 cm_mask_copy(dst, dst_data->data, src_data->mask);
460 return memcmp(src, dst, IB_CM_COMPARE_SIZE); 459 return memcmp(src, dst, sizeof(src));
461} 460}
462 461
463static int cm_compare_private_data(u8 *private_data, 462static int cm_compare_private_data(u32 *private_data,
464 struct ib_cm_compare_data *dst_data) 463 struct ib_cm_compare_data *dst_data)
465{ 464{
466 u8 src[IB_CM_COMPARE_SIZE]; 465 u32 src[IB_CM_COMPARE_SIZE];
467 466
468 if (!dst_data) 467 if (!dst_data)
469 return 0; 468 return 0;
470 469
471 cm_mask_copy(src, private_data, dst_data->mask); 470 cm_mask_copy(src, private_data, dst_data->mask);
472 return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); 471 return memcmp(src, dst_data->data, sizeof(src));
473} 472}
474 473
475/* 474/*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
538 537
539static struct cm_id_private * cm_find_listen(struct ib_device *device, 538static struct cm_id_private * cm_find_listen(struct ib_device *device,
540 __be64 service_id, 539 __be64 service_id,
541 u8 *private_data) 540 u32 *private_data)
542{ 541{
543 struct rb_node *node = cm.listen_service_table.rb_node; 542 struct rb_node *node = cm.listen_service_table.rb_node;
544 struct cm_id_private *cm_id_priv; 543 struct cm_id_private *cm_id_priv;
@@ -862,6 +861,7 @@ retest:
862 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT); 861 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
863 break; 862 break;
864 case IB_CM_REQ_SENT: 863 case IB_CM_REQ_SENT:
864 case IB_CM_MRA_REQ_RCVD:
865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 865 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
866 spin_unlock_irq(&cm_id_priv->lock); 866 spin_unlock_irq(&cm_id_priv->lock);
867 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 867 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
@@ -880,7 +880,6 @@ retest:
880 NULL, 0, NULL, 0); 880 NULL, 0, NULL, 0);
881 } 881 }
882 break; 882 break;
883 case IB_CM_MRA_REQ_RCVD:
884 case IB_CM_REP_SENT: 883 case IB_CM_REP_SENT:
885 case IB_CM_MRA_REP_RCVD: 884 case IB_CM_MRA_REP_RCVD:
886 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 885 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
953 cm_mask_copy(cm_id_priv->compare_data->data, 952 cm_mask_copy(cm_id_priv->compare_data->data,
954 compare_data->data, compare_data->mask); 953 compare_data->data, compare_data->mask);
955 memcpy(cm_id_priv->compare_data->mask, compare_data->mask, 954 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
956 IB_CM_COMPARE_SIZE); 955 sizeof(compare_data->mask));
957 } 956 }
958 957
959 cm_id->state = IB_CM_LISTEN; 958 cm_id->state = IB_CM_LISTEN;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index be068f47e47e..8b76f0ef965e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -103,7 +103,7 @@ struct cm_req_msg {
103 /* local ACK timeout:5, rsvd:3 */ 103 /* local ACK timeout:5, rsvd:3 */
104 u8 alt_offset139; 104 u8 alt_offset139;
105 105
106 u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE]; 106 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
107 107
108} __attribute__ ((packed)); 108} __attribute__ ((packed));
109 109
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
801 __be16 rsvd; 801 __be16 rsvd;
802 __be64 service_id; 802 __be64 service_id;
803 803
804 u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; 804 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
805} __attribute__ ((packed)); 805} __attribute__ ((packed));
806 806
807struct cm_sidr_rep_msg { 807struct cm_sidr_rep_msg {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d570030d899c..38ffe0981503 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -845,33 +845,49 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; 845 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr; 846 ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
847 ib->sib_family = listen_ib->sib_family; 847 ib->sib_family = listen_ib->sib_family;
848 ib->sib_pkey = path->pkey; 848 if (path) {
849 ib->sib_flowinfo = path->flow_label; 849 ib->sib_pkey = path->pkey;
850 memcpy(&ib->sib_addr, &path->sgid, 16); 850 ib->sib_flowinfo = path->flow_label;
851 memcpy(&ib->sib_addr, &path->sgid, 16);
852 } else {
853 ib->sib_pkey = listen_ib->sib_pkey;
854 ib->sib_flowinfo = listen_ib->sib_flowinfo;
855 ib->sib_addr = listen_ib->sib_addr;
856 }
851 ib->sib_sid = listen_ib->sib_sid; 857 ib->sib_sid = listen_ib->sib_sid;
852 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); 858 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
853 ib->sib_scope_id = listen_ib->sib_scope_id; 859 ib->sib_scope_id = listen_ib->sib_scope_id;
854 860
855 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr; 861 if (path) {
856 ib->sib_family = listen_ib->sib_family; 862 ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
857 ib->sib_pkey = path->pkey; 863 ib->sib_family = listen_ib->sib_family;
858 ib->sib_flowinfo = path->flow_label; 864 ib->sib_pkey = path->pkey;
859 memcpy(&ib->sib_addr, &path->dgid, 16); 865 ib->sib_flowinfo = path->flow_label;
866 memcpy(&ib->sib_addr, &path->dgid, 16);
867 }
868}
869
870static __be16 ss_get_port(const struct sockaddr_storage *ss)
871{
872 if (ss->ss_family == AF_INET)
873 return ((struct sockaddr_in *)ss)->sin_port;
874 else if (ss->ss_family == AF_INET6)
875 return ((struct sockaddr_in6 *)ss)->sin6_port;
876 BUG();
860} 877}
861 878
862static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, 879static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
863 struct cma_hdr *hdr) 880 struct cma_hdr *hdr)
864{ 881{
865 struct sockaddr_in *listen4, *ip4; 882 struct sockaddr_in *ip4;
866 883
867 listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
868 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; 884 ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
869 ip4->sin_family = listen4->sin_family; 885 ip4->sin_family = AF_INET;
870 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; 886 ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
871 ip4->sin_port = listen4->sin_port; 887 ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
872 888
873 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; 889 ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
874 ip4->sin_family = listen4->sin_family; 890 ip4->sin_family = AF_INET;
875 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; 891 ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
876 ip4->sin_port = hdr->port; 892 ip4->sin_port = hdr->port;
877} 893}
@@ -879,16 +895,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
879static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, 895static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
880 struct cma_hdr *hdr) 896 struct cma_hdr *hdr)
881{ 897{
882 struct sockaddr_in6 *listen6, *ip6; 898 struct sockaddr_in6 *ip6;
883 899
884 listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
885 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; 900 ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
886 ip6->sin6_family = listen6->sin6_family; 901 ip6->sin6_family = AF_INET6;
887 ip6->sin6_addr = hdr->dst_addr.ip6; 902 ip6->sin6_addr = hdr->dst_addr.ip6;
888 ip6->sin6_port = listen6->sin6_port; 903 ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
889 904
890 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; 905 ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
891 ip6->sin6_family = listen6->sin6_family; 906 ip6->sin6_family = AF_INET6;
892 ip6->sin6_addr = hdr->src_addr.ip6; 907 ip6->sin6_addr = hdr->src_addr.ip6;
893 ip6->sin6_port = hdr->port; 908 ip6->sin6_port = hdr->port;
894} 909}
@@ -898,9 +913,11 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
898{ 913{
899 struct cma_hdr *hdr; 914 struct cma_hdr *hdr;
900 915
901 if ((listen_id->route.addr.src_addr.ss_family == AF_IB) && 916 if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
902 (ib_event->event == IB_CM_REQ_RECEIVED)) { 917 if (ib_event->event == IB_CM_REQ_RECEIVED)
903 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path); 918 cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
919 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
920 cma_save_ib_info(id, listen_id, NULL);
904 return 0; 921 return 0;
905 } 922 }
906 923
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index b85ddbc979e0..e6ffa2e66c1a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -33,7 +33,7 @@
33 33
34#include "iwpm_util.h" 34#include "iwpm_util.h"
35 35
36static const char iwpm_ulib_name[] = "iWarpPortMapperUser"; 36static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
37static int iwpm_ulib_version = 3; 37static int iwpm_ulib_version = 3;
38static int iwpm_user_pid = IWPM_PID_UNDEFINED; 38static int iwpm_user_pid = IWPM_PID_UNDEFINED;
39static atomic_t echo_nlmsg_seq; 39static atomic_t echo_nlmsg_seq;
@@ -468,7 +468,8 @@ add_mapping_response_exit:
468} 468}
469EXPORT_SYMBOL(iwpm_add_mapping_cb); 469EXPORT_SYMBOL(iwpm_add_mapping_cb);
470 470
471/* netlink attribute policy for the response to add and query mapping request */ 471/* netlink attribute policy for the response to add and query mapping request
472 * and response with remote address info */
472static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { 473static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
473 [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, 474 [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
474 [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, 475 [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
559} 560}
560EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); 561EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
561 562
563/*
564 * iwpm_remote_info_cb - Process a port mapper message, containing
565 * the remote connecting peer address info
566 */
567int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
568{
569 struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
570 struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
571 struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
572 struct iwpm_remote_info *rem_info;
573 const char *msg_type;
574 u8 nl_client;
575 int ret = -EINVAL;
576
577 msg_type = "Remote Mapping info";
578 if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
579 resp_query_policy, nltb, msg_type))
580 return ret;
581
582 nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
583 if (!iwpm_valid_client(nl_client)) {
584 pr_info("%s: Invalid port mapper client = %d\n",
585 __func__, nl_client);
586 return ret;
587 }
588 atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
589
590 local_sockaddr = (struct sockaddr_storage *)
591 nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
592 remote_sockaddr = (struct sockaddr_storage *)
593 nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
594 mapped_loc_sockaddr = (struct sockaddr_storage *)
595 nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
596 mapped_rem_sockaddr = (struct sockaddr_storage *)
597 nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
598
599 if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
600 mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
601 pr_info("%s: Sockaddr family doesn't match the requested one\n",
602 __func__);
603 return ret;
604 }
605 rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
606 if (!rem_info) {
607 pr_err("%s: Unable to allocate a remote info\n", __func__);
608 ret = -ENOMEM;
609 return ret;
610 }
611 memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
612 sizeof(struct sockaddr_storage));
613 memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
614 sizeof(struct sockaddr_storage));
615 memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
616 sizeof(struct sockaddr_storage));
617 rem_info->nl_client = nl_client;
618
619 iwpm_add_remote_info(rem_info);
620
621 iwpm_print_sockaddr(local_sockaddr,
622 "remote_info: Local sockaddr:");
623 iwpm_print_sockaddr(mapped_loc_sockaddr,
624 "remote_info: Mapped local sockaddr:");
625 iwpm_print_sockaddr(remote_sockaddr,
626 "remote_info: Remote sockaddr:");
627 iwpm_print_sockaddr(mapped_rem_sockaddr,
628 "remote_info: Mapped remote sockaddr:");
629 return ret;
630}
631EXPORT_SYMBOL(iwpm_remote_info_cb);
632
562/* netlink attribute policy for the received request for mapping info */ 633/* netlink attribute policy for the received request for mapping info */
563static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { 634static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
564 [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, 635 [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 69e9f84c1605..a626795bf9c7 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -33,8 +33,10 @@
33 33
34#include "iwpm_util.h" 34#include "iwpm_util.h"
35 35
36#define IWPM_HASH_BUCKET_SIZE 512 36#define IWPM_MAPINFO_HASH_SIZE 512
37#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) 37#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
38#define IWPM_REMINFO_HASH_SIZE 64
39#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
38 40
39static LIST_HEAD(iwpm_nlmsg_req_list); 41static LIST_HEAD(iwpm_nlmsg_req_list);
40static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); 42static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
42static struct hlist_head *iwpm_hash_bucket; 44static struct hlist_head *iwpm_hash_bucket;
43static DEFINE_SPINLOCK(iwpm_mapinfo_lock); 45static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
44 46
47static struct hlist_head *iwpm_reminfo_bucket;
48static DEFINE_SPINLOCK(iwpm_reminfo_lock);
49
45static DEFINE_MUTEX(iwpm_admin_lock); 50static DEFINE_MUTEX(iwpm_admin_lock);
46static struct iwpm_admin_data iwpm_admin; 51static struct iwpm_admin_data iwpm_admin;
47 52
48int iwpm_init(u8 nl_client) 53int iwpm_init(u8 nl_client)
49{ 54{
55 int ret = 0;
50 if (iwpm_valid_client(nl_client)) 56 if (iwpm_valid_client(nl_client))
51 return -EINVAL; 57 return -EINVAL;
52 mutex_lock(&iwpm_admin_lock); 58 mutex_lock(&iwpm_admin_lock);
53 if (atomic_read(&iwpm_admin.refcount) == 0) { 59 if (atomic_read(&iwpm_admin.refcount) == 0) {
54 iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * 60 iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
55 sizeof(struct hlist_head), GFP_KERNEL); 61 sizeof(struct hlist_head), GFP_KERNEL);
56 if (!iwpm_hash_bucket) { 62 if (!iwpm_hash_bucket) {
57 mutex_unlock(&iwpm_admin_lock); 63 ret = -ENOMEM;
58 pr_err("%s Unable to create mapinfo hash table\n", __func__); 64 pr_err("%s Unable to create mapinfo hash table\n", __func__);
59 return -ENOMEM; 65 goto init_exit;
66 }
67 iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
68 sizeof(struct hlist_head), GFP_KERNEL);
69 if (!iwpm_reminfo_bucket) {
70 kfree(iwpm_hash_bucket);
71 ret = -ENOMEM;
72 pr_err("%s Unable to create reminfo hash table\n", __func__);
73 goto init_exit;
60 } 74 }
61 } 75 }
62 atomic_inc(&iwpm_admin.refcount); 76 atomic_inc(&iwpm_admin.refcount);
77init_exit:
63 mutex_unlock(&iwpm_admin_lock); 78 mutex_unlock(&iwpm_admin_lock);
64 iwpm_set_valid(nl_client, 1); 79 if (!ret) {
65 return 0; 80 iwpm_set_valid(nl_client, 1);
81 pr_debug("%s: Mapinfo and reminfo tables are created\n",
82 __func__);
83 }
84 return ret;
66} 85}
67EXPORT_SYMBOL(iwpm_init); 86EXPORT_SYMBOL(iwpm_init);
68 87
69static void free_hash_bucket(void); 88static void free_hash_bucket(void);
89static void free_reminfo_bucket(void);
70 90
71int iwpm_exit(u8 nl_client) 91int iwpm_exit(u8 nl_client)
72{ 92{
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
81 } 101 }
82 if (atomic_dec_and_test(&iwpm_admin.refcount)) { 102 if (atomic_dec_and_test(&iwpm_admin.refcount)) {
83 free_hash_bucket(); 103 free_hash_bucket();
84 pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); 104 free_reminfo_bucket();
105 pr_debug("%s: Resources are destroyed\n", __func__);
85 } 106 }
86 mutex_unlock(&iwpm_admin_lock); 107 mutex_unlock(&iwpm_admin_lock);
87 iwpm_set_valid(nl_client, 0); 108 iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
89} 110}
90EXPORT_SYMBOL(iwpm_exit); 111EXPORT_SYMBOL(iwpm_exit);
91 112
92static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, 113static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
93 struct sockaddr_storage *); 114 struct sockaddr_storage *);
94 115
95int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, 116int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
99 struct hlist_head *hash_bucket_head; 120 struct hlist_head *hash_bucket_head;
100 struct iwpm_mapping_info *map_info; 121 struct iwpm_mapping_info *map_info;
101 unsigned long flags; 122 unsigned long flags;
123 int ret = -EINVAL;
102 124
103 if (!iwpm_valid_client(nl_client)) 125 if (!iwpm_valid_client(nl_client))
104 return -EINVAL; 126 return ret;
105 map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); 127 map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
106 if (!map_info) { 128 if (!map_info) {
107 pr_err("%s: Unable to allocate a mapping info\n", __func__); 129 pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
115 137
116 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 138 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
117 if (iwpm_hash_bucket) { 139 if (iwpm_hash_bucket) {
118 hash_bucket_head = get_hash_bucket_head( 140 hash_bucket_head = get_mapinfo_hash_bucket(
119 &map_info->local_sockaddr, 141 &map_info->local_sockaddr,
120 &map_info->mapped_sockaddr); 142 &map_info->mapped_sockaddr);
121 hlist_add_head(&map_info->hlist_node, hash_bucket_head); 143 if (hash_bucket_head) {
144 hlist_add_head(&map_info->hlist_node, hash_bucket_head);
145 ret = 0;
146 }
122 } 147 }
123 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 148 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
124 return 0; 149 return ret;
125} 150}
126EXPORT_SYMBOL(iwpm_create_mapinfo); 151EXPORT_SYMBOL(iwpm_create_mapinfo);
127 152
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
136 161
137 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 162 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
138 if (iwpm_hash_bucket) { 163 if (iwpm_hash_bucket) {
139 hash_bucket_head = get_hash_bucket_head( 164 hash_bucket_head = get_mapinfo_hash_bucket(
140 local_sockaddr, 165 local_sockaddr,
141 mapped_local_addr); 166 mapped_local_addr);
167 if (!hash_bucket_head)
168 goto remove_mapinfo_exit;
169
142 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 170 hlist_for_each_entry_safe(map_info, tmp_hlist_node,
143 hash_bucket_head, hlist_node) { 171 hash_bucket_head, hlist_node) {
144 172
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
152 } 180 }
153 } 181 }
154 } 182 }
183remove_mapinfo_exit:
155 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 184 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
156 return ret; 185 return ret;
157} 186}
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
166 195
167 /* remove all the mapinfo data from the list */ 196 /* remove all the mapinfo data from the list */
168 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 197 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
169 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 198 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
170 hlist_for_each_entry_safe(map_info, tmp_hlist_node, 199 hlist_for_each_entry_safe(map_info, tmp_hlist_node,
171 &iwpm_hash_bucket[i], hlist_node) { 200 &iwpm_hash_bucket[i], hlist_node) {
172 201
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
180 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); 209 spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
181} 210}
182 211
212static void free_reminfo_bucket(void)
213{
214 struct hlist_node *tmp_hlist_node;
215 struct iwpm_remote_info *rem_info;
216 unsigned long flags;
217 int i;
218
219 /* remove all the remote info from the list */
220 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
221 for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
222 hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
223 &iwpm_reminfo_bucket[i], hlist_node) {
224
225 hlist_del_init(&rem_info->hlist_node);
226 kfree(rem_info);
227 }
228 }
229 /* free the hash list */
230 kfree(iwpm_reminfo_bucket);
231 iwpm_reminfo_bucket = NULL;
232 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
233}
234
235static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
236 struct sockaddr_storage *);
237
238void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
239{
240 struct hlist_head *hash_bucket_head;
241 unsigned long flags;
242
243 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
244 if (iwpm_reminfo_bucket) {
245 hash_bucket_head = get_reminfo_hash_bucket(
246 &rem_info->mapped_loc_sockaddr,
247 &rem_info->mapped_rem_sockaddr);
248 if (hash_bucket_head)
249 hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
250 }
251 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
252}
253
254int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
255 struct sockaddr_storage *mapped_rem_addr,
256 struct sockaddr_storage *remote_addr,
257 u8 nl_client)
258{
259 struct hlist_node *tmp_hlist_node;
260 struct hlist_head *hash_bucket_head;
261 struct iwpm_remote_info *rem_info = NULL;
262 unsigned long flags;
263 int ret = -EINVAL;
264
265 if (!iwpm_valid_client(nl_client)) {
266 pr_info("%s: Invalid client = %d\n", __func__, nl_client);
267 return ret;
268 }
269 spin_lock_irqsave(&iwpm_reminfo_lock, flags);
270 if (iwpm_reminfo_bucket) {
271 hash_bucket_head = get_reminfo_hash_bucket(
272 mapped_loc_addr,
273 mapped_rem_addr);
274 if (!hash_bucket_head)
275 goto get_remote_info_exit;
276 hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
277 hash_bucket_head, hlist_node) {
278
279 if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
280 mapped_loc_addr) &&
281 !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
282 mapped_rem_addr)) {
283
284 memcpy(remote_addr, &rem_info->remote_sockaddr,
285 sizeof(struct sockaddr_storage));
286 iwpm_print_sockaddr(remote_addr,
287 "get_remote_info: Remote sockaddr:");
288
289 hlist_del_init(&rem_info->hlist_node);
290 kfree(rem_info);
291 ret = 0;
292 break;
293 }
294 }
295 }
296get_remote_info_exit:
297 spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
298 return ret;
299}
300EXPORT_SYMBOL(iwpm_get_remote_info);
301
183struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, 302struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
184 u8 nl_client, gfp_t gfp) 303 u8 nl_client, gfp_t gfp)
185{ 304{
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
409 return hash; 528 return hash;
410} 529}
411 530
412static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage 531static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
413 *local_sockaddr, 532 struct sockaddr_storage *b_sockaddr, u32 *hash)
414 struct sockaddr_storage
415 *mapped_sockaddr)
416{ 533{
417 u32 local_hash, mapped_hash, hash; 534 u32 a_hash, b_hash;
418 535
419 if (local_sockaddr->ss_family == AF_INET) { 536 if (a_sockaddr->ss_family == AF_INET) {
420 local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); 537 a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
421 mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); 538 b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
422 539
423 } else if (local_sockaddr->ss_family == AF_INET6) { 540 } else if (a_sockaddr->ss_family == AF_INET6) {
424 local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); 541 a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
425 mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); 542 b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
426 } else { 543 } else {
427 pr_err("%s: Invalid sockaddr family\n", __func__); 544 pr_err("%s: Invalid sockaddr family\n", __func__);
428 return NULL; 545 return -EINVAL;
429 } 546 }
430 547
431 if (local_hash == mapped_hash) /* if port mapper isn't available */ 548 if (a_hash == b_hash) /* if port mapper isn't available */
432 hash = local_hash; 549 *hash = a_hash;
433 else 550 else
434 hash = jhash_2words(local_hash, mapped_hash, 0); 551 *hash = jhash_2words(a_hash, b_hash, 0);
552 return 0;
553}
554
555static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
556 *local_sockaddr, struct sockaddr_storage
557 *mapped_sockaddr)
558{
559 u32 hash;
560 int ret;
435 561
436 return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; 562 ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
563 if (ret)
564 return NULL;
565 return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
566}
567
568static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
569 *mapped_loc_sockaddr, struct sockaddr_storage
570 *mapped_rem_sockaddr)
571{
572 u32 hash;
573 int ret;
574
575 ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
576 if (ret)
577 return NULL;
578 return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
437} 579}
438 580
439static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) 581static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
512 } 654 }
513 skb_num++; 655 skb_num++;
514 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 656 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
515 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 657 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
516 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], 658 hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
517 hlist_node) { 659 hlist_node) {
518 if (map_info->nl_client != nl_client) 660 if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
595 737
596 spin_lock_irqsave(&iwpm_mapinfo_lock, flags); 738 spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
597 if (iwpm_hash_bucket) { 739 if (iwpm_hash_bucket) {
598 for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { 740 for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
599 if (!hlist_empty(&iwpm_hash_bucket[i])) { 741 if (!hlist_empty(&iwpm_hash_bucket[i])) {
600 full_bucket = 1; 742 full_bucket = 1;
601 break; 743 break;
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 9777c869a140..ee2d9ff095be 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
76 u8 nl_client; 76 u8 nl_client;
77}; 77};
78 78
79struct iwpm_remote_info {
80 struct hlist_node hlist_node;
81 struct sockaddr_storage remote_sockaddr;
82 struct sockaddr_storage mapped_loc_sockaddr;
83 struct sockaddr_storage mapped_rem_sockaddr;
84 u8 nl_client;
85};
86
79struct iwpm_admin_data { 87struct iwpm_admin_data {
80 atomic_t refcount; 88 atomic_t refcount;
81 atomic_t nlmsg_seq; 89 atomic_t nlmsg_seq;
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
128int iwpm_get_nlmsg_seq(void); 136int iwpm_get_nlmsg_seq(void);
129 137
130/** 138/**
139 * iwpm_add_reminfo - Add remote address info of the connecting peer
140 * to the remote info hash table
141 * @reminfo: The remote info to be added
142 */
143void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
144
145/**
131 * iwpm_valid_client - Check if the port mapper client is valid 146 * iwpm_valid_client - Check if the port mapper client is valid
132 * @nl_client: The index of the netlink client 147 * @nl_client: The index of the netlink client
133 * 148 *
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8b8cc6fa0ab0..40becdb3196e 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
446 int remove_existing_mapping = 0; 446 int remove_existing_mapping = 0;
447 int ret = 0; 447 int ret = 0;
448 448
449 mutex_lock(&umem->odp_data->umem_mutex);
450 /* 449 /*
451 * Note: we avoid writing if seq is different from the initial seq, to 450 * Note: we avoid writing if seq is different from the initial seq, to
452 * handle case of a racing notifier. This check also allows us to bail 451 * handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
479 } 478 }
480 479
481out: 480out:
482 mutex_unlock(&umem->odp_data->umem_mutex);
483
484 /* On Demand Paging - avoid pinning the page */ 481 /* On Demand Paging - avoid pinning the page */
485 if (umem->context->invalidate_range || !stored_page) 482 if (umem->context->invalidate_range || !stored_page)
486 put_page(page); 483 put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
586 583
587 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); 584 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
588 user_virt += npages << PAGE_SHIFT; 585 user_virt += npages << PAGE_SHIFT;
586 mutex_lock(&umem->odp_data->umem_mutex);
589 for (j = 0; j < npages; ++j) { 587 for (j = 0; j < npages; ++j) {
590 ret = ib_umem_odp_map_dma_single_page( 588 ret = ib_umem_odp_map_dma_single_page(
591 umem, k, base_virt_addr, local_page_list[j], 589 umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
594 break; 592 break;
595 k++; 593 k++;
596 } 594 }
595 mutex_unlock(&umem->odp_data->umem_mutex);
597 596
598 if (ret < 0) { 597 if (ret < 0) {
599 /* Release left over pages when handling errors. */ 598 /* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
633 * faults from completion. We might be racing with other 632 * faults from completion. We might be racing with other
634 * invalidations, so we must make sure we free each page only 633 * invalidations, so we must make sure we free each page only
635 * once. */ 634 * once. */
635 mutex_lock(&umem->odp_data->umem_mutex);
636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) { 636 for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; 637 idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
638 mutex_lock(&umem->odp_data->umem_mutex);
639 if (umem->odp_data->page_list[idx]) { 638 if (umem->odp_data->page_list[idx]) {
640 struct page *page = umem->odp_data->page_list[idx]; 639 struct page *page = umem->odp_data->page_list[idx];
641 struct page *head_page = compound_head(page);
642 dma_addr_t dma = umem->odp_data->dma_list[idx]; 640 dma_addr_t dma = umem->odp_data->dma_list[idx];
643 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; 641 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
644 642
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
646 644
647 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, 645 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
648 DMA_BIDIRECTIONAL); 646 DMA_BIDIRECTIONAL);
649 if (dma & ODP_WRITE_ALLOWED_BIT) 647 if (dma & ODP_WRITE_ALLOWED_BIT) {
648 struct page *head_page = compound_head(page);
650 /* 649 /*
651 * set_page_dirty prefers being called with 650 * set_page_dirty prefers being called with
652 * the page lock. However, MMU notifiers are 651 * the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
657 * be removed. 656 * be removed.
658 */ 657 */
659 set_page_dirty(head_page); 658 set_page_dirty(head_page);
659 }
660 /* on demand pinning support */ 660 /* on demand pinning support */
661 if (!umem->context->invalidate_range) 661 if (!umem->context->invalidate_range)
662 put_page(page); 662 put_page(page);
663 umem->odp_data->page_list[idx] = NULL; 663 umem->odp_data->page_list[idx] = NULL;
664 umem->odp_data->dma_list[idx] = 0; 664 umem->odp_data->dma_list[idx] = 0;
665 } 665 }
666 mutex_unlock(&umem->odp_data->umem_mutex);
667 } 666 }
667 mutex_unlock(&umem->odp_data->umem_mutex);
668} 668}
669EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); 669EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 57176ddd4c50..3ad8dc798f52 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
583 sizeof(ep->com.mapped_remote_addr)); 583 sizeof(ep->com.mapped_remote_addr));
584} 584}
585 585
586static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
587{
588 int ret;
589
590 print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
591 print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
592
593 ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
594 &child_ep->com.mapped_remote_addr,
595 &child_ep->com.remote_addr, RDMA_NL_C4IW);
596 if (ret)
597 PDBG("Unable to find remote peer addr info - err %d\n", ret);
598
599 return ret;
600}
601
586static void best_mtu(const unsigned short *mtus, unsigned short mtu, 602static void best_mtu(const unsigned short *mtus, unsigned short mtu,
587 unsigned int *idx, int use_ts, int ipv6) 603 unsigned int *idx, int use_ts, int ipv6)
588{ 604{
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 691 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID_F; 692 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 693 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 694 opt2 |= T5_ISS_F;
679 } 695 }
680 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 696 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
681 697
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2042 status, status2errno(status)); 2058 status, status2errno(status));
2043 2059
2044 if (is_neg_adv(status)) { 2060 if (is_neg_adv(status)) {
2045 dev_warn(&dev->rdev.lldi.pdev->dev, 2061 PDBG("%s Connection problems for atid %u status %u (%s)\n",
2046 "Connection problems for atid %u status %u (%s)\n", 2062 __func__, atid, status, neg_adv_str(status));
2047 atid, status, neg_adv_str(status)); 2063 ep->stats.connect_neg_adv++;
2064 mutex_lock(&dev->rdev.stats.lock);
2065 dev->rdev.stats.neg_adv++;
2066 mutex_unlock(&dev->rdev.stats.lock);
2048 return 0; 2067 return 0;
2049 } 2068 }
2050 2069
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2214 u32 isn = (prandom_u32() & ~7UL) - 1; 2233 u32 isn = (prandom_u32() & ~7UL) - 1;
2215 opt2 |= T5_OPT_2_VALID_F; 2234 opt2 |= T5_OPT_2_VALID_F;
2216 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2235 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2217 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2236 opt2 |= T5_ISS_F;
2218 rpl5 = (void *)rpl; 2237 rpl5 = (void *)rpl;
2219 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2238 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2220 if (peer2peer) 2239 if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2352 state_set(&child_ep->com, CONNECTING); 2371 state_set(&child_ep->com, CONNECTING);
2353 child_ep->com.dev = dev; 2372 child_ep->com.dev = dev;
2354 child_ep->com.cm_id = NULL; 2373 child_ep->com.cm_id = NULL;
2374
2375 /*
2376 * The mapped_local and mapped_remote addresses get setup with
2377 * the actual 4-tuple. The local address will be based on the
2378 * actual local address of the connection, but on the port number
2379 * of the parent listening endpoint. The remote address is
2380 * setup based on a query to the IWPM since we don't know what it
2381 * originally was before mapping. If no mapping was done, then
2382 * mapped_remote == remote, and mapped_local == local.
2383 */
2355 if (iptype == 4) { 2384 if (iptype == 4) {
2356 struct sockaddr_in *sin = (struct sockaddr_in *) 2385 struct sockaddr_in *sin = (struct sockaddr_in *)
2357 &child_ep->com.local_addr; 2386 &child_ep->com.mapped_local_addr;
2387
2358 sin->sin_family = PF_INET; 2388 sin->sin_family = PF_INET;
2359 sin->sin_port = local_port; 2389 sin->sin_port = local_port;
2360 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2390 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2361 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2391
2392 sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2393 sin->sin_family = PF_INET;
2394 sin->sin_port = ((struct sockaddr_in *)
2395 &parent_ep->com.local_addr)->sin_port;
2396 sin->sin_addr.s_addr = *(__be32 *)local_ip;
2397
2398 sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
2362 sin->sin_family = PF_INET; 2399 sin->sin_family = PF_INET;
2363 sin->sin_port = peer_port; 2400 sin->sin_port = peer_port;
2364 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2401 sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2365 } else { 2402 } else {
2366 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 2403 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
2367 &child_ep->com.local_addr; 2404 &child_ep->com.mapped_local_addr;
2405
2368 sin6->sin6_family = PF_INET6; 2406 sin6->sin6_family = PF_INET6;
2369 sin6->sin6_port = local_port; 2407 sin6->sin6_port = local_port;
2370 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2408 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2371 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2409
2410 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2411 sin6->sin6_family = PF_INET6;
2412 sin6->sin6_port = ((struct sockaddr_in6 *)
2413 &parent_ep->com.local_addr)->sin6_port;
2414 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2415
2416 sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
2372 sin6->sin6_family = PF_INET6; 2417 sin6->sin6_family = PF_INET6;
2373 sin6->sin6_port = peer_port; 2418 sin6->sin6_port = peer_port;
2374 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2419 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2375 } 2420 }
2421 memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
2422 sizeof(child_ep->com.remote_addr));
2423 get_remote_addr(parent_ep, child_ep);
2424
2376 c4iw_get_ep(&parent_ep->com); 2425 c4iw_get_ep(&parent_ep->com);
2377 child_ep->parent_ep = parent_ep; 2426 child_ep->parent_ep = parent_ep;
2378 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2427 child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2520 2569
2521 ep = lookup_tid(t, tid); 2570 ep = lookup_tid(t, tid);
2522 if (is_neg_adv(req->status)) { 2571 if (is_neg_adv(req->status)) {
2523 dev_warn(&dev->rdev.lldi.pdev->dev, 2572 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
2524 "Negative advice on abort - tid %u status %d (%s)\n", 2573 __func__, ep->hwtid, req->status,
2525 ep->hwtid, req->status, neg_adv_str(req->status)); 2574 neg_adv_str(req->status));
2575 ep->stats.abort_neg_adv++;
2576 mutex_lock(&dev->rdev.stats.lock);
2577 dev->rdev.stats.neg_adv++;
2578 mutex_unlock(&dev->rdev.stats.lock);
2526 return 0; 2579 return 0;
2527 } 2580 }
2528 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2581 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3571 * TP will ignore any value > 0 for MSS index. 3624 * TP will ignore any value > 0 for MSS index.
3572 */ 3625 */
3573 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3626 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3574 req->cookie = (unsigned long)skb; 3627 req->cookie = (uintptr_t)skb;
3575 3628
3576 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3629 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3577 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3630 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3931 return 0; 3984 return 0;
3932 } 3985 }
3933 if (is_neg_adv(req->status)) { 3986 if (is_neg_adv(req->status)) {
3934 dev_warn(&dev->rdev.lldi.pdev->dev, 3987 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
3935 "Negative advice on abort - tid %u status %d (%s)\n", 3988 __func__, ep->hwtid, req->status,
3936 ep->hwtid, req->status, neg_adv_str(req->status)); 3989 neg_adv_str(req->status));
3990 ep->stats.abort_neg_adv++;
3991 dev->rdev.stats.neg_adv++;
3937 kfree_skb(skb); 3992 kfree_skb(skb);
3938 return 0; 3993 return 0;
3939 } 3994 }
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ab7692ac2044..68ddb3710215 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
55 FW_RI_RES_WR_NRES_V(1) | 55 FW_RI_RES_WR_NRES_V(1) |
56 FW_WR_COMPL_F); 56 FW_WR_COMPL_F);
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait; 58 res_wr->cookie = (uintptr_t)&wr_wait;
59 res = res_wr->res; 59 res = res_wr->res;
60 res->u.cq.restype = FW_RI_RES_TYPE_CQ; 60 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
61 res->u.cq.op = FW_RI_RES_OP_RESET; 61 res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
125 FW_RI_RES_WR_NRES_V(1) | 125 FW_RI_RES_WR_NRES_V(1) |
126 FW_WR_COMPL_F); 126 FW_WR_COMPL_F);
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait; 128 res_wr->cookie = (uintptr_t)&wr_wait;
129 res = res_wr->res; 129 res = res_wr->res;
130 res->u.cq.restype = FW_RI_RES_TYPE_CQ; 130 res->u.cq.restype = FW_RI_RES_TYPE_CQ;
131 res->u.cq.op = FW_RI_RES_OP_WRITE; 131 res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
156 goto err4; 156 goto err4;
157 157
158 cq->gen = 1; 158 cq->gen = 1;
159 cq->gts = rdev->lldi.gts_reg;
160 cq->rdev = rdev; 159 cq->rdev = rdev;
161 if (user) { 160 if (user) {
162 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + 161 u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
163 (cq->cqid << rdev->cqshift); 162
164 cq->ugts &= PAGE_MASK; 163 cq->ugts = (u64)rdev->bar2_pa + off;
164 } else if (is_t4(rdev->lldi.adapter_type)) {
165 cq->gts = rdev->lldi.gts_reg;
166 cq->qid_mask = -1U;
167 } else {
168 u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
169
170 cq->gts = rdev->bar2_kva + off;
171 cq->qid_mask = rdev->qpmask;
165 } 172 }
166 return 0; 173 return 0;
167err4: 174err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
970 } 977 }
971 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", 978 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
972 __func__, chp->cq.cqid, chp, chp->cq.size, 979 __func__, chp->cq.cqid, chp, chp->cq.size,
973 chp->cq.memsize, 980 chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
974 (unsigned long long) chp->cq.dma_addr);
975 return &chp->ibcq; 981 return &chp->ibcq;
976err5: 982err5:
977 kfree(mm2); 983 kfree(mm2);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8fb295e4a9ab..7e895d714b19 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
96 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
96 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 97 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
97 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 98 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
98}; 99};
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
151 int prev_ts_set = 0; 152 int prev_ts_set = 0;
152 int idx, end; 153 int idx, end;
153 154
154#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) 155#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
155 156
156 idx = atomic_read(&dev->rdev.wr_log_idx) & 157 idx = atomic_read(&dev->rdev.wr_log_idx) &
157 (dev->rdev.wr_log_size - 1); 158 (dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
489 dev->rdev.stats.act_ofld_conn_fails); 490 dev->rdev.stats.act_ofld_conn_fails);
490 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 491 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
491 dev->rdev.stats.pas_ofld_conn_fails); 492 dev->rdev.stats.pas_ofld_conn_fails);
493 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
492 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); 494 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
493 return 0; 495 return 0;
494} 496}
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
560 cc = snprintf(epd->buf + epd->pos, space, 562 cc = snprintf(epd->buf + epd->pos, space,
561 "ep %p cm_id %p qp %p state %d flags 0x%lx " 563 "ep %p cm_id %p qp %p state %d flags 0x%lx "
562 "history 0x%lx hwtid %d atid %d " 564 "history 0x%lx hwtid %d atid %d "
565 "conn_na %u abort_na %u "
563 "%pI4:%d/%d <-> %pI4:%d/%d\n", 566 "%pI4:%d/%d <-> %pI4:%d/%d\n",
564 ep, ep->com.cm_id, ep->com.qp, 567 ep, ep->com.cm_id, ep->com.qp,
565 (int)ep->com.state, ep->com.flags, 568 (int)ep->com.state, ep->com.flags,
566 ep->com.history, ep->hwtid, ep->atid, 569 ep->com.history, ep->hwtid, ep->atid,
570 ep->stats.connect_neg_adv,
571 ep->stats.abort_neg_adv,
567 &lsin->sin_addr, ntohs(lsin->sin_port), 572 &lsin->sin_addr, ntohs(lsin->sin_port),
568 ntohs(mapped_lsin->sin_port), 573 ntohs(mapped_lsin->sin_port),
569 &rsin->sin_addr, ntohs(rsin->sin_port), 574 &rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
581 cc = snprintf(epd->buf + epd->pos, space, 586 cc = snprintf(epd->buf + epd->pos, space,
582 "ep %p cm_id %p qp %p state %d flags 0x%lx " 587 "ep %p cm_id %p qp %p state %d flags 0x%lx "
583 "history 0x%lx hwtid %d atid %d " 588 "history 0x%lx hwtid %d atid %d "
589 "conn_na %u abort_na %u "
584 "%pI6:%d/%d <-> %pI6:%d/%d\n", 590 "%pI6:%d/%d <-> %pI6:%d/%d\n",
585 ep, ep->com.cm_id, ep->com.qp, 591 ep, ep->com.cm_id, ep->com.qp,
586 (int)ep->com.state, ep->com.flags, 592 (int)ep->com.state, ep->com.flags,
587 ep->com.history, ep->hwtid, ep->atid, 593 ep->com.history, ep->hwtid, ep->atid,
594 ep->stats.connect_neg_adv,
595 ep->stats.abort_neg_adv,
588 &lsin6->sin6_addr, ntohs(lsin6->sin6_port), 596 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
589 ntohs(mapped_lsin6->sin6_port), 597 ntohs(mapped_lsin6->sin6_port),
590 &rsin6->sin6_addr, ntohs(rsin6->sin6_port), 598 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
765 c4iw_init_dev_ucontext(rdev, &rdev->uctx); 773 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
766 774
767 /* 775 /*
776 * This implementation assumes udb_density == ucq_density! Eventually
777 * we might need to support this but for now fail the open. Also the
778 * cqid and qpid range must match for now.
779 */
780 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
781 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
782 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
783 rdev->lldi.ucq_density);
784 err = -EINVAL;
785 goto err1;
786 }
787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
789 pr_err(MOD "%s: unsupported qp and cq id ranges "
790 "qp start %u size %u cq start %u size %u\n",
791 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
793 rdev->lldi.vr->cq.size);
794 err = -EINVAL;
795 goto err1;
796 }
797
798 /*
768 * qpshift is the number of bits to shift the qpid left in order 799 * qpshift is the number of bits to shift the qpid left in order
769 * to get the correct address of the doorbell for that qp. 800 * to get the correct address of the doorbell for that qp.
770 */ 801 */
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
784 rdev->lldi.vr->qp.size, 815 rdev->lldi.vr->qp.size,
785 rdev->lldi.vr->cq.start, 816 rdev->lldi.vr->cq.start,
786 rdev->lldi.vr->cq.size); 817 rdev->lldi.vr->cq.size);
787 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " 818 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
788 "qpmask 0x%x cqshift %lu cqmask 0x%x\n", 819 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
789 (unsigned)pci_resource_len(rdev->lldi.pdev, 2), 820 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
790 (u64)pci_resource_start(rdev->lldi.pdev, 2), 821 (void *)pci_resource_start(rdev->lldi.pdev, 2),
791 rdev->lldi.db_reg, 822 rdev->lldi.db_reg,
792 rdev->lldi.gts_reg, 823 rdev->lldi.gts_reg,
793 rdev->qpshift, rdev->qpmask, 824 rdev->qpshift, rdev->qpmask,
@@ -1355,7 +1386,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1355 t4_sq_host_wq_pidx(&qp->wq), 1386 t4_sq_host_wq_pidx(&qp->wq),
1356 t4_sq_wq_size(&qp->wq)); 1387 t4_sq_wq_size(&qp->wq));
1357 if (ret) { 1388 if (ret) {
1358 pr_err(KERN_ERR MOD "%s: Fatal error - " 1389 pr_err(MOD "%s: Fatal error - "
1359 "DB overflow recovery failed - " 1390 "DB overflow recovery failed - "
1360 "error syncing SQ qid %u\n", 1391 "error syncing SQ qid %u\n",
1361 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); 1392 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
@@ -1371,7 +1402,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1371 t4_rq_wq_size(&qp->wq)); 1402 t4_rq_wq_size(&qp->wq));
1372 1403
1373 if (ret) { 1404 if (ret) {
1374 pr_err(KERN_ERR MOD "%s: Fatal error - " 1405 pr_err(MOD "%s: Fatal error - "
1375 "DB overflow recovery failed - " 1406 "DB overflow recovery failed - "
1376 "error syncing RQ qid %u\n", 1407 "error syncing RQ qid %u\n",
1377 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); 1408 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d87e1650f643..97bb5550a6cf 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -137,6 +137,7 @@ struct c4iw_stats {
137 u64 tcam_full; 137 u64 tcam_full;
138 u64 act_ofld_conn_fails; 138 u64 act_ofld_conn_fails;
139 u64 pas_ofld_conn_fails; 139 u64 pas_ofld_conn_fails;
140 u64 neg_adv;
140}; 141};
141 142
142struct c4iw_hw_queue { 143struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
814 int backlog; 815 int backlog;
815}; 816};
816 817
818struct c4iw_ep_stats {
819 unsigned connect_neg_adv;
820 unsigned abort_neg_adv;
821};
822
817struct c4iw_ep { 823struct c4iw_ep {
818 struct c4iw_ep_common com; 824 struct c4iw_ep_common com;
819 struct c4iw_ep *parent_ep; 825 struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
846 unsigned int retry_count; 852 unsigned int retry_count;
847 int snd_win; 853 int snd_win;
848 int rcv_win; 854 int rcv_win;
855 struct c4iw_ep_stats stats;
849}; 856};
850 857
851static inline void print_addr(struct c4iw_ep_common *epc, const char *func, 858static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 3ef0cf9f5c44..cff815b91707 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
144 if (i == (num_wqe-1)) { 144 if (i == (num_wqe-1)) {
145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | 145 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
146 FW_WR_COMPL_F); 146 FW_WR_COMPL_F);
147 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 147 req->wr.wr_lo = (__force __be64)&wr_wait;
148 } else 148 } else
149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); 149 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
150 req->wr.wr_mid = cpu_to_be32( 150 req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
676 mhp->attr.zbva = 0; 676 mhp->attr.zbva = 0;
677 mhp->attr.va_fbo = 0; 677 mhp->attr.va_fbo = 0;
678 mhp->attr.page_size = 0; 678 mhp->attr.page_size = 0;
679 mhp->attr.len = ~0UL; 679 mhp->attr.len = ~0ULL;
680 mhp->attr.pbl_size = 0; 680 mhp->attr.pbl_size = 0;
681 681
682 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, 682 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
683 FW_RI_STAG_NSMR, mhp->attr.perms, 683 FW_RI_STAG_NSMR, mhp->attr.perms,
684 mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); 684 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
685 if (ret) 685 if (ret)
686 goto err1; 686 goto err1;
687 687
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 15cae5a31018..389ced335bc5 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
275 FW_RI_RES_WR_NRES_V(2) | 275 FW_RI_RES_WR_NRES_V(2) |
276 FW_WR_COMPL_F); 276 FW_WR_COMPL_F);
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait; 278 res_wr->cookie = (uintptr_t)&wr_wait;
279 res = res_wr->res; 279 res = res_wr->res;
280 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; 280 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
281 res->u.sqrq.op = FW_RI_RES_OP_WRITE; 281 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1209 wqe->flowid_len16 = cpu_to_be32( 1209 wqe->flowid_len16 = cpu_to_be32(
1210 FW_WR_FLOWID_V(ep->hwtid) | 1210 FW_WR_FLOWID_V(ep->hwtid) |
1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); 1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1212 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1212 wqe->cookie = (uintptr_t)&ep->com.wr_wait;
1213 1213
1214 wqe->u.fini.type = FW_RI_TYPE_FINI; 1214 wqe->u.fini.type = FW_RI_TYPE_FINI;
1215 ret = c4iw_ofld_send(&rhp->rdev, skb); 1215 ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1279 FW_WR_FLOWID_V(qhp->ep->hwtid) | 1279 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); 1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1281 1281
1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; 1282 wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
1283 1283
1284 wqe->u.init.type = FW_RI_TYPE_INIT; 1284 wqe->u.init.type = FW_RI_TYPE_INIT;
1285 wqe->u.init.mpareqbit_p2ptype = 1285 wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); 1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1767 insert_mmap(ucontext, mm2); 1767 insert_mmap(ucontext, mm2);
1768 mm3->key = uresp.sq_db_gts_key; 1768 mm3->key = uresp.sq_db_gts_key;
1769 mm3->addr = (__force unsigned long) qhp->wq.sq.udb; 1769 mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
1770 mm3->len = PAGE_SIZE; 1770 mm3->len = PAGE_SIZE;
1771 insert_mmap(ucontext, mm3); 1771 insert_mmap(ucontext, mm3);
1772 mm4->key = uresp.rq_db_gts_key; 1772 mm4->key = uresp.rq_db_gts_key;
1773 mm4->addr = (__force unsigned long) qhp->wq.rq.udb; 1773 mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
1774 mm4->len = PAGE_SIZE; 1774 mm4->len = PAGE_SIZE;
1775 insert_mmap(ucontext, mm4); 1775 insert_mmap(ucontext, mm4);
1776 if (mm5) { 1776 if (mm5) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 871cdcac7be2..7f2a6c244d25 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -539,6 +539,7 @@ struct t4_cq {
539 size_t memsize; 539 size_t memsize;
540 __be64 bits_type_ts; 540 __be64 bits_type_ts;
541 u32 cqid; 541 u32 cqid;
542 u32 qid_mask;
542 int vector; 543 int vector;
543 u16 size; /* including status page */ 544 u16 size; /* including status page */
544 u16 cidx; 545 u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
563 set_bit(CQ_ARMED, &cq->flags); 564 set_bit(CQ_ARMED, &cq->flags);
564 while (cq->cidx_inc > CIDXINC_M) { 565 while (cq->cidx_inc > CIDXINC_M) {
565 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | 566 val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
566 INGRESSQID_V(cq->cqid); 567 INGRESSQID_V(cq->cqid & cq->qid_mask);
567 writel(val, cq->gts); 568 writel(val, cq->gts);
568 cq->cidx_inc -= CIDXINC_M; 569 cq->cidx_inc -= CIDXINC_M;
569 } 570 }
570 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | 571 val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
571 INGRESSQID_V(cq->cqid); 572 INGRESSQID_V(cq->cqid & cq->qid_mask);
572 writel(val, cq->gts); 573 writel(val, cq->gts);
573 cq->cidx_inc = 0; 574 cq->cidx_inc = 0;
574 return 0; 575 return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
601 u32 val; 602 u32 val;
602 603
603 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | 604 val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
604 INGRESSQID_V(cq->cqid); 605 INGRESSQID_V(cq->cqid & cq->qid_mask);
605 writel(val, cq->gts); 606 writel(val, cq->gts);
606 cq->cidx_inc = 0; 607 cq->cidx_inc = 0;
607 } 608 }
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5e53327fc647..343e8daf2270 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */
848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) 848#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) 849#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
850 850
851#define CONG_CNTRL_VALID (1 << 18) 851#define T5_ISS_S 18
852#define T5_ISS_V(x) ((x) << T5_ISS_S)
853#define T5_ISS_F T5_ISS_V(1U)
852 854
853#endif /* _T4FW_RI_API_H_ */ 855#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/infiniband/hw/ehca/ehca_mcast.c
index 120aedf9f989..cec181532924 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/infiniband/hw/ehca/ehca_mcast.c
@@ -77,7 +77,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
77 return -EINVAL; 77 return -EINVAL;
78 } 78 }
79 79
80 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); 80 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
81 81
82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); 82 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
83 interface_id = be64_to_cpu(my_gid.global.interface_id); 83 interface_id = be64_to_cpu(my_gid.global.interface_id);
@@ -114,7 +114,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
114 return -EINVAL; 114 return -EINVAL;
115 } 115 }
116 116
117 memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); 117 memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
118 118
119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); 119 subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
120 interface_id = be64_to_cpu(my_gid.global.interface_id); 120 interface_id = be64_to_cpu(my_gid.global.interface_id);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57070c529dfb..cc64400d41ac 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1569,8 +1569,7 @@ static void reset_gids_task(struct work_struct *work)
1569 MLX4_CMD_TIME_CLASS_B, 1569 MLX4_CMD_TIME_CLASS_B,
1570 MLX4_CMD_WRAPPED); 1570 MLX4_CMD_WRAPPED);
1571 if (err) 1571 if (err)
1572 pr_warn(KERN_WARNING 1572 pr_warn("set port %d command failed\n", gw->port);
1573 "set port %d command failed\n", gw->port);
1574 } 1573 }
1575 1574
1576 mlx4_free_cmd_mailbox(dev, mailbox); 1575 mlx4_free_cmd_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 4d7024b899cb..d35f62d4f4c5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1392,7 +1392,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1392 1392
1393 if (ah->ah_flags & IB_AH_GRH) { 1393 if (ah->ah_flags & IB_AH_GRH) {
1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { 1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1395 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1395 pr_err("sgid_index (%u) too large. max is %d\n",
1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len); 1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1397 return -EINVAL; 1397 return -EINVAL;
1398 } 1398 }
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b2a6dc8ea99..9f9d5c563a61 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
116 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 116 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
117 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 117 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
118 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 118 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
119 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
119 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, 120 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
120 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, 121 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
121 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} 122 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6f09a72e78d7..72b43417cbe3 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
596 memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); 596 memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
597} 597}
598 598
599static void record_sockaddr_info(struct sockaddr_storage *addr_info,
600 nes_addr_t *ip_addr, u16 *port_num)
601{
602 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
603
604 if (in_addr->sin_family == AF_INET) {
605 *ip_addr = ntohl(in_addr->sin_addr.s_addr);
606 *port_num = ntohs(in_addr->sin_port);
607 }
608}
609
599/* 610/*
600 * nes_record_pm_msg - Save the received mapping info 611 * nes_record_pm_msg - Save the received mapping info
601 */ 612 */
602static void nes_record_pm_msg(struct nes_cm_info *cm_info, 613static void nes_record_pm_msg(struct nes_cm_info *cm_info,
603 struct iwpm_sa_data *pm_msg) 614 struct iwpm_sa_data *pm_msg)
604{ 615{
605 struct sockaddr_in *mapped_loc_addr = 616 record_sockaddr_info(&pm_msg->mapped_loc_addr,
606 (struct sockaddr_in *)&pm_msg->mapped_loc_addr; 617 &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
607 struct sockaddr_in *mapped_rem_addr = 618
608 (struct sockaddr_in *)&pm_msg->mapped_rem_addr; 619 record_sockaddr_info(&pm_msg->mapped_rem_addr,
609 620 &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
610 if (mapped_loc_addr->sin_family == AF_INET) { 621}
611 cm_info->mapped_loc_addr = 622
612 ntohl(mapped_loc_addr->sin_addr.s_addr); 623/*
613 cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); 624 * nes_get_reminfo - Get the address info of the remote connecting peer
614 } 625 */
615 if (mapped_rem_addr->sin_family == AF_INET) { 626static int nes_get_remote_addr(struct nes_cm_node *cm_node)
616 cm_info->mapped_rem_addr = 627{
617 ntohl(mapped_rem_addr->sin_addr.s_addr); 628 struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
618 cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); 629 struct sockaddr_storage remote_addr;
619 } 630 int ret;
631
632 nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
633 htons(cm_node->mapped_loc_port), &mapped_loc_addr);
634 nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
635 htons(cm_node->mapped_rem_port), &mapped_rem_addr);
636
637 ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
638 &remote_addr, RDMA_NL_NES);
639 if (ret)
640 nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
641 else
642 record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
643 &cm_node->rem_port);
644 return ret;
620} 645}
621 646
622/** 647/**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
1566 return NULL; 1591 return NULL;
1567 1592
1568 /* set our node specific transport info */ 1593 /* set our node specific transport info */
1569 cm_node->loc_addr = cm_info->loc_addr; 1594 if (listener) {
1595 cm_node->loc_addr = listener->loc_addr;
1596 cm_node->loc_port = listener->loc_port;
1597 } else {
1598 cm_node->loc_addr = cm_info->loc_addr;
1599 cm_node->loc_port = cm_info->loc_port;
1600 }
1570 cm_node->rem_addr = cm_info->rem_addr; 1601 cm_node->rem_addr = cm_info->rem_addr;
1571 cm_node->loc_port = cm_info->loc_port;
1572 cm_node->rem_port = cm_info->rem_port; 1602 cm_node->rem_port = cm_info->rem_port;
1573 1603
1574 cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; 1604 cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
2151 cm_node->state = NES_CM_STATE_ESTABLISHED; 2181 cm_node->state = NES_CM_STATE_ESTABLISHED;
2152 if (datasize) { 2182 if (datasize) {
2153 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; 2183 cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
2184 nes_get_remote_addr(cm_node);
2154 handle_rcv_mpa(cm_node, skb); 2185 handle_rcv_mpa(cm_node, skb);
2155 } else { /* rcvd ACK only */ 2186 } else { /* rcvd ACK only */
2156 dev_kfree_skb_any(skb); 2187 dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index c9780d919769..b396344fae16 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
40#include <be_roce.h> 40#include <be_roce.h>
41#include "ocrdma_sli.h" 41#include "ocrdma_sli.h"
42 42
43#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" 43#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
44 44
45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 45#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 46#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
515 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 515 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
516 if (rdma_is_multicast_addr(&in6)) 516 if (rdma_is_multicast_addr(&in6))
517 rdma_get_mcast_mac(&in6, mac_addr); 517 rdma_get_mcast_mac(&in6, mac_addr);
518 else if (rdma_link_local_addr(&in6))
519 rdma_get_ll_mac(&in6, mac_addr);
518 else 520 else
519 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 521 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
520 return 0; 522 return 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d812904f3984..f5a5ea836dbd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
56 vlan_tag = attr->vlan_id; 56 vlan_tag = attr->vlan_id;
57 if (!vlan_tag || (vlan_tag > 0xFFF)) 57 if (!vlan_tag || (vlan_tag > 0xFFF))
58 vlan_tag = dev->pvid; 58 vlan_tag = dev->pvid;
59 if (vlan_tag && (vlan_tag < 0x1000)) { 59 if (vlan_tag || dev->pfc_state) {
60 if (!vlan_tag) {
61 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
62 dev->id);
63 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
64 dev->id);
65 }
60 eth.eth_type = cpu_to_be16(0x8100); 66 eth.eth_type = cpu_to_be16(0x8100);
61 eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 67 eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
62 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 68 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
@@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
121 goto av_conf_err; 127 goto av_conf_err;
122 } 128 }
123 129
124 if (pd->uctx) { 130 if ((pd->uctx) &&
131 (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
132 (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
125 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid, 133 status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
126 attr->dmac, &attr->vlan_id); 134 attr->dmac, &attr->vlan_id);
127 if (status) { 135 if (status) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0c9e95909a64..47615ff33bc6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
933 struct ocrdma_eqe eqe; 933 struct ocrdma_eqe eqe;
934 struct ocrdma_eqe *ptr; 934 struct ocrdma_eqe *ptr;
935 u16 cq_id; 935 u16 cq_id;
936 u8 mcode;
936 int budget = eq->cq_cnt; 937 int budget = eq->cq_cnt;
937 938
938 do { 939 do {
939 ptr = ocrdma_get_eqe(eq); 940 ptr = ocrdma_get_eqe(eq);
940 eqe = *ptr; 941 eqe = *ptr;
941 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); 942 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
943 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
944 >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
945 if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
946 pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
947 eq->q.id, eqe.id_valid);
942 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) 948 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
943 break; 949 break;
944 950
@@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1434 struct ocrdma_alloc_pd_range_rsp *rsp; 1440 struct ocrdma_alloc_pd_range_rsp *rsp;
1435 1441
1436 /* Pre allocate the DPP PDs */ 1442 /* Pre allocate the DPP PDs */
1437 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1443 if (dev->attr.max_dpp_pds) {
1438 if (!cmd) 1444 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
1439 return -ENOMEM; 1445 sizeof(*cmd));
1440 cmd->pd_count = dev->attr.max_dpp_pds; 1446 if (!cmd)
1441 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1447 return -ENOMEM;
1442 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1448 cmd->pd_count = dev->attr.max_dpp_pds;
1443 if (status) 1449 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1444 goto mbx_err; 1450 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1445 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1451 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1446 1452
1447 if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { 1453 if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
1448 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> 1454 rsp->pd_count) {
1449 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1455 dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
1450 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1456 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1451 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1457 dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
1452 dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1458 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1453 pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1459 dev->pd_mgr->max_dpp_pd = rsp->pd_count;
1454 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1460 pd_bitmap_size =
1455 GFP_KERNEL); 1461 BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
1462 dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
1463 GFP_KERNEL);
1464 }
1465 kfree(cmd);
1456 } 1466 }
1457 kfree(cmd);
1458 1467
1459 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1468 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
1460 if (!cmd) 1469 if (!cmd)
@@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1462 1471
1463 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; 1472 cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
1464 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1473 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1465 if (status)
1466 goto mbx_err;
1467 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1474 rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
1468 if (rsp->pd_count) { 1475 if (!status && rsp->pd_count) {
1469 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & 1476 dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
1470 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1477 OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
1471 dev->pd_mgr->max_normal_pd = rsp->pd_count; 1478 dev->pd_mgr->max_normal_pd = rsp->pd_count;
@@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
1473 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, 1480 dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
1474 GFP_KERNEL); 1481 GFP_KERNEL);
1475 } 1482 }
1483 kfree(cmd);
1476 1484
1477 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { 1485 if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
1478 /* Enable PD resource manager */ 1486 /* Enable PD resource manager */
1479 dev->pd_mgr->pd_prealloc_valid = true; 1487 dev->pd_mgr->pd_prealloc_valid = true;
1480 } else { 1488 return 0;
1481 return -ENOMEM;
1482 } 1489 }
1483mbx_err:
1484 kfree(cmd);
1485 return status; 1490 return status;
1486} 1491}
1487 1492
@@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2406 struct ocrdma_query_qp *cmd; 2411 struct ocrdma_query_qp *cmd;
2407 struct ocrdma_query_qp_rsp *rsp; 2412 struct ocrdma_query_qp_rsp *rsp;
2408 2413
2409 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd)); 2414 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
2410 if (!cmd) 2415 if (!cmd)
2411 return status; 2416 return status;
2412 cmd->qp_id = qp->id; 2417 cmd->qp_id = qp->id;
@@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2428 int status; 2433 int status;
2429 struct ib_ah_attr *ah_attr = &attrs->ah_attr; 2434 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2430 union ib_gid sgid, zgid; 2435 union ib_gid sgid, zgid;
2431 u32 vlan_id; 2436 u32 vlan_id = 0xFFFF;
2432 u8 mac_addr[6]; 2437 u8 mac_addr[6];
2433 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2438 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2434 2439
@@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2468 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8); 2473 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2469 if (attr_mask & IB_QP_VID) { 2474 if (attr_mask & IB_QP_VID) {
2470 vlan_id = attrs->vlan_id; 2475 vlan_id = attrs->vlan_id;
2476 } else if (dev->pfc_state) {
2477 vlan_id = 0;
2478 pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
2479 dev->id);
2480 pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
2481 dev->id);
2482 }
2483
2484 if (vlan_id < 0x1000) {
2471 cmd->params.vlan_dmac_b4_to_b5 |= 2485 cmd->params.vlan_dmac_b4_to_b5 |=
2472 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2486 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2473 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2487 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2474 cmd->params.rnt_rc_sl_fl |= 2488 cmd->params.rnt_rc_sl_fl |=
2475 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2489 (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2476 } 2490 }
2491
2477 return 0; 2492 return 0;
2478} 2493}
2479 2494
@@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2519 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID; 2534 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2520 } 2535 }
2521 if (attr_mask & IB_QP_PATH_MTU) { 2536 if (attr_mask & IB_QP_PATH_MTU) {
2522 if (attrs->path_mtu < IB_MTU_256 || 2537 if (attrs->path_mtu < IB_MTU_512 ||
2523 attrs->path_mtu > IB_MTU_4096) { 2538 attrs->path_mtu > IB_MTU_4096) {
2539 pr_err("ocrdma%d: IB MTU %d is not supported\n",
2540 dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
2524 status = -EINVAL; 2541 status = -EINVAL;
2525 goto pmtu_err; 2542 goto pmtu_err;
2526 } 2543 }
@@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
3147 ocrdma_free_pd_pool(dev); 3164 ocrdma_free_pd_pool(dev);
3148 ocrdma_mbx_delete_ah_tbl(dev); 3165 ocrdma_mbx_delete_ah_tbl(dev);
3149 3166
3150 /* cleanup the eqs */
3151 ocrdma_destroy_eqs(dev);
3152
3153 /* cleanup the control path */ 3167 /* cleanup the control path */
3154 ocrdma_destroy_mq(dev); 3168 ocrdma_destroy_mq(dev);
3169
3170 /* cleanup the eqs */
3171 ocrdma_destroy_eqs(dev);
3155} 3172}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 243c87c8bd65..02ad0aee99af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp {
1176 struct ocrdma_mqe_hdr hdr; 1176 struct ocrdma_mqe_hdr hdr;
1177 struct ocrdma_mbx_rsp rsp; 1177 struct ocrdma_mbx_rsp rsp;
1178 struct ocrdma_qp_params params; 1178 struct ocrdma_qp_params params;
1179 u32 dpp_credits_cqid;
1180 u32 rbq_id;
1179}; 1181};
1180 1182
1181enum { 1183enum {
@@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp {
1624enum { 1626enum {
1625 OCRDMA_EQE_VALID_SHIFT = 0, 1627 OCRDMA_EQE_VALID_SHIFT = 0,
1626 OCRDMA_EQE_VALID_MASK = BIT(0), 1628 OCRDMA_EQE_VALID_MASK = BIT(0),
1629 OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E,
1630 OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01,
1627 OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE, 1631 OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE,
1628 OCRDMA_EQE_RESOURCE_ID_SHIFT = 16, 1632 OCRDMA_EQE_RESOURCE_ID_SHIFT = 16,
1629 OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF << 1633 OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF <<
1630 OCRDMA_EQE_RESOURCE_ID_SHIFT, 1634 OCRDMA_EQE_RESOURCE_ID_SHIFT,
1631}; 1635};
1632 1636
1637enum major_code {
1638 OCRDMA_MAJOR_CODE_COMPLETION = 0x00,
1639 OCRDMA_MAJOR_CODE_SENTINAL = 0x01
1640};
1641
1633struct ocrdma_eqe { 1642struct ocrdma_eqe {
1634 u32 id_valid; 1643 u32 id_valid;
1635}; 1644};
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 877175563634..9dcb66077d6c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
365 if (!pd) 365 if (!pd)
366 return ERR_PTR(-ENOMEM); 366 return ERR_PTR(-ENOMEM);
367 367
368 if (udata && uctx) { 368 if (udata && uctx && dev->attr.max_dpp_pds) {
369 pd->dpp_enabled = 369 pd->dpp_enabled =
370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 370 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
371 pd->num_dpp_qp = 371 pd->num_dpp_qp =
@@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
1721 struct ocrdma_qp *qp; 1721 struct ocrdma_qp *qp;
1722 struct ocrdma_dev *dev; 1722 struct ocrdma_dev *dev;
1723 struct ib_qp_attr attrs; 1723 struct ib_qp_attr attrs;
1724 int attr_mask = IB_QP_STATE; 1724 int attr_mask;
1725 unsigned long flags; 1725 unsigned long flags;
1726 1726
1727 qp = get_ocrdma_qp(ibqp); 1727 qp = get_ocrdma_qp(ibqp);
1728 dev = get_ocrdma_dev(ibqp->device); 1728 dev = get_ocrdma_dev(ibqp->device);
1729 1729
1730 attrs.qp_state = IB_QPS_ERR;
1731 pd = qp->pd; 1730 pd = qp->pd;
1732 1731
1733 /* change the QP state to ERROR */ 1732 /* change the QP state to ERROR */
1734 _ocrdma_modify_qp(ibqp, &attrs, attr_mask); 1733 if (qp->state != OCRDMA_QPS_RST) {
1735 1734 attrs.qp_state = IB_QPS_ERR;
1735 attr_mask = IB_QP_STATE;
1736 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1737 }
1736 /* ensure that CQEs for newly created QP (whose id may be same with 1738 /* ensure that CQEs for newly created QP (whose id may be same with
1737 * one which just getting destroyed are same), dont get 1739 * one which just getting destroyed are same), dont get
1738 * discarded until the old CQEs are discarded. 1740 * discarded until the old CQEs are discarded.
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ffd48bfc4923..7df16f74bb45 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -903,7 +903,7 @@ struct qib_devdata {
903 /* PCI Device ID (here for NodeInfo) */ 903 /* PCI Device ID (here for NodeInfo) */
904 u16 deviceid; 904 u16 deviceid;
905 /* for write combining settings */ 905 /* for write combining settings */
906 unsigned long wc_cookie; 906 int wc_cookie;
907 unsigned long wc_base; 907 unsigned long wc_base;
908 unsigned long wc_len; 908 unsigned long wc_len;
909 909
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
1136extern u32 qib_cpulist_count; 1136extern u32 qib_cpulist_count;
1137extern unsigned long *qib_cpulist; 1137extern unsigned long *qib_cpulist;
1138 1138
1139extern unsigned qib_wc_pat;
1140extern unsigned qib_cc_table_size; 1139extern unsigned qib_cc_table_size;
1141int qib_init(struct qib_devdata *, int); 1140int qib_init(struct qib_devdata *, int);
1142int init_chip_wc_pat(struct qib_devdata *dd, u32); 1141int init_chip_wc_pat(struct qib_devdata *dd, u32);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 9ea6c440a00c..725881890c4a 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
835 vma->vm_flags &= ~VM_MAYREAD; 835 vma->vm_flags &= ~VM_MAYREAD;
836 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; 836 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
837 837
838 if (qib_wc_pat) 838 /* We used PAT if wc_cookie == 0 */
839 if (!dd->wc_cookie)
839 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 840 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
840 841
841 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, 842 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 0d2ba59af30a..4b927809d1a1 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
3315 qib_6120_config_ctxts(dd); 3315 qib_6120_config_ctxts(dd);
3316 qib_set_ctxtcnt(dd); 3316 qib_set_ctxtcnt(dd);
3317 3317
3318 if (qib_wc_pat) { 3318 ret = init_chip_wc_pat(dd, 0);
3319 ret = init_chip_wc_pat(dd, 0); 3319 if (ret)
3320 if (ret) 3320 goto bail;
3321 goto bail;
3322 }
3323 set_6120_baseaddrs(dd); /* set chip access pointers now */ 3321 set_6120_baseaddrs(dd); /* set chip access pointers now */
3324 3322
3325 ret = 0; 3323 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 22affda8af88..00b2af211157 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
4126 qib_7220_config_ctxts(dd); 4126 qib_7220_config_ctxts(dd);
4127 qib_set_ctxtcnt(dd); /* needed for PAT setup */ 4127 qib_set_ctxtcnt(dd); /* needed for PAT setup */
4128 4128
4129 if (qib_wc_pat) { 4129 ret = init_chip_wc_pat(dd, 0);
4130 ret = init_chip_wc_pat(dd, 0); 4130 if (ret)
4131 if (ret) 4131 goto bail;
4132 goto bail;
4133 }
4134 set_7220_baseaddrs(dd); /* set chip access pointers now */ 4132 set_7220_baseaddrs(dd); /* set chip access pointers now */
4135 4133
4136 ret = 0; 4134 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ef97b71c8f7d..f32b4628e991 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6429 unsigned features, pidx, sbufcnt; 6429 unsigned features, pidx, sbufcnt;
6430 int ret, mtu; 6430 int ret, mtu;
6431 u32 sbufs, updthresh; 6431 u32 sbufs, updthresh;
6432 resource_size_t vl15off;
6432 6433
6433 /* pport structs are contiguous, allocated after devdata */ 6434 /* pport structs are contiguous, allocated after devdata */
6434 ppd = (struct qib_pportdata *)(dd + 1); 6435 ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
6677 qib_7322_config_ctxts(dd); 6678 qib_7322_config_ctxts(dd);
6678 qib_set_ctxtcnt(dd); 6679 qib_set_ctxtcnt(dd);
6679 6680
6680 if (qib_wc_pat) { 6681 /*
6681 resource_size_t vl15off; 6682 * We do not set WC on the VL15 buffers to avoid
6682 /* 6683 * a rare problem with unaligned writes from
6683 * We do not set WC on the VL15 buffers to avoid 6684 * interrupt-flushed store buffers, so we need
6684 * a rare problem with unaligned writes from 6685 * to map those separately here. We can't solve
6685 * interrupt-flushed store buffers, so we need 6686 * this for the rarely used mtrr case.
6686 * to map those separately here. We can't solve 6687 */
6687 * this for the rarely used mtrr case. 6688 ret = init_chip_wc_pat(dd, 0);
6688 */ 6689 if (ret)
6689 ret = init_chip_wc_pat(dd, 0); 6690 goto bail;
6690 if (ret)
6691 goto bail;
6692 6691
6693 /* vl15 buffers start just after the 4k buffers */ 6692 /* vl15 buffers start just after the 4k buffers */
6694 vl15off = dd->physaddr + (dd->piobufbase >> 32) + 6693 vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6695 dd->piobcnt4k * dd->align4k; 6694 dd->piobcnt4k * dd->align4k;
6696 dd->piovl15base = ioremap_nocache(vl15off, 6695 dd->piovl15base = ioremap_nocache(vl15off,
6697 NUM_VL15_BUFS * dd->align4k); 6696 NUM_VL15_BUFS * dd->align4k);
6698 if (!dd->piovl15base) { 6697 if (!dd->piovl15base) {
6699 ret = -ENOMEM; 6698 ret = -ENOMEM;
6700 goto bail; 6699 goto bail;
6701 }
6702 } 6700 }
6701
6703 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ 6702 qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6704 6703
6705 ret = 0; 6704 ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 2ee36953e234..7e00470adc30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
91unsigned qib_cc_table_size; 91unsigned qib_cc_table_size;
92module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); 92module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
93MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); 93MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
94/*
95 * qib_wc_pat parameter:
96 * 0 is WC via MTRR
97 * 1 is WC via PAT
98 * If PAT initialization fails, code reverts back to MTRR
99 */
100unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
101module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
102MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
103 94
104static void verify_interrupt(unsigned long); 95static void verify_interrupt(unsigned long);
105 96
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
1377 spin_unlock(&dd->pport[pidx].cc_shadow_lock); 1368 spin_unlock(&dd->pport[pidx].cc_shadow_lock);
1378 } 1369 }
1379 1370
1380 if (!qib_wc_pat) 1371 qib_disable_wc(dd);
1381 qib_disable_wc(dd);
1382 1372
1383 if (dd->pioavailregs_dma) { 1373 if (dd->pioavailregs_dma) {
1384 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, 1374 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1547 goto bail; 1537 goto bail;
1548 } 1538 }
1549 1539
1550 if (!qib_wc_pat) { 1540 ret = qib_enable_wc(dd);
1551 ret = qib_enable_wc(dd); 1541 if (ret) {
1552 if (ret) { 1542 qib_dev_err(dd,
1553 qib_dev_err(dd, 1543 "Write combining not enabled (err %d): performance may be poor\n",
1554 "Write combining not enabled (err %d): performance may be poor\n", 1544 -ret);
1555 -ret); 1545 ret = 0;
1556 ret = 0;
1557 }
1558 } 1546 }
1559 1547
1560 qib_verify_pioperf(dd); 1548 qib_verify_pioperf(dd);
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 81b225f2300a..edd0ddbd4481 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -116,21 +116,10 @@ int qib_enable_wc(struct qib_devdata *dd)
116 } 116 }
117 117
118 if (!ret) { 118 if (!ret) {
119 int cookie; 119 dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
120 120 if (dd->wc_cookie < 0)
121 cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); 121 /* use error from routine */
122 if (cookie < 0) { 122 ret = dd->wc_cookie;
123 {
124 qib_devinfo(dd->pcidev,
125 "mtrr_add() WC for PIO bufs failed (%d)\n",
126 cookie);
127 ret = -EINVAL;
128 }
129 } else {
130 dd->wc_cookie = cookie;
131 dd->wc_base = (unsigned long) pioaddr;
132 dd->wc_len = (unsigned long) piolen;
133 }
134 } 123 }
135 124
136 return ret; 125 return ret;
@@ -142,18 +131,7 @@ int qib_enable_wc(struct qib_devdata *dd)
142 */ 131 */
143void qib_disable_wc(struct qib_devdata *dd) 132void qib_disable_wc(struct qib_devdata *dd)
144{ 133{
145 if (dd->wc_cookie) { 134 arch_phys_wc_del(dd->wc_cookie);
146 int r;
147
148 r = mtrr_del(dd->wc_cookie, dd->wc_base,
149 dd->wc_len);
150 if (r < 0)
151 qib_devinfo(dd->pcidev,
152 "mtrr_del(%lx, %lx, %lx) failed: %d\n",
153 dd->wc_cookie, dd->wc_base,
154 dd->wc_len, r);
155 dd->wc_cookie = 0; /* even on failure */
156 }
157} 135}
158 136
159/** 137/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959adb6c7d..cf32a778e7d0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
386 rx->rx_ring[i].mapping, 386 rx->rx_ring[i].mapping,
387 GFP_KERNEL)) { 387 GFP_KERNEL)) {
388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 388 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
389 ret = -ENOMEM; 389 ret = -ENOMEM;
390 goto err_count; 390 goto err_count;
391 } 391 }
392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); 392 ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
393 if (ret) { 393 if (ret) {