aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/cma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/cma.c')
-rw-r--r--drivers/infiniband/core/cma.c100
1 files changed, 93 insertions, 7 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f0c91ba3178a..0451307bea18 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
68MODULE_LICENSE("Dual BSD/GPL"); 68MODULE_LICENSE("Dual BSD/GPL");
69 69
70#define CMA_CM_RESPONSE_TIMEOUT 20 70#define CMA_CM_RESPONSE_TIMEOUT 20
71#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
71#define CMA_MAX_CM_RETRIES 15 72#define CMA_MAX_CM_RETRIES 15
72#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) 73#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
73#define CMA_IBOE_PACKET_LIFETIME 18 74#define CMA_IBOE_PACKET_LIFETIME 18
@@ -162,6 +163,14 @@ struct rdma_bind_list {
162 unsigned short port; 163 unsigned short port;
163}; 164};
164 165
166struct class_port_info_context {
167 struct ib_class_port_info *class_port_info;
168 struct ib_device *device;
169 struct completion done;
170 struct ib_sa_query *sa_query;
171 u8 port_num;
172};
173
165static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, 174static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
166 struct rdma_bind_list *bind_list, int snum) 175 struct rdma_bind_list *bind_list, int snum)
167{ 176{
@@ -306,6 +315,7 @@ struct cma_multicast {
306 struct sockaddr_storage addr; 315 struct sockaddr_storage addr;
307 struct kref mcref; 316 struct kref mcref;
308 bool igmp_joined; 317 bool igmp_joined;
318 u8 join_state;
309}; 319};
310 320
311struct cma_work { 321struct cma_work {
@@ -3754,10 +3764,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
3754 } 3764 }
3755} 3765}
3756 3766
3767static void cma_query_sa_classport_info_cb(int status,
3768 struct ib_class_port_info *rec,
3769 void *context)
3770{
3771 struct class_port_info_context *cb_ctx = context;
3772
3773 WARN_ON(!context);
3774
3775 if (status || !rec) {
3776 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
3777 cb_ctx->device->name, cb_ctx->port_num, status);
3778 goto out;
3779 }
3780
3781 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
3782
3783out:
3784 complete(&cb_ctx->done);
3785}
3786
3787static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
3788 struct ib_class_port_info *class_port_info)
3789{
3790 struct class_port_info_context *cb_ctx;
3791 int ret;
3792
3793 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
3794 if (!cb_ctx)
3795 return -ENOMEM;
3796
3797 cb_ctx->device = device;
3798 cb_ctx->class_port_info = class_port_info;
3799 cb_ctx->port_num = port_num;
3800 init_completion(&cb_ctx->done);
3801
3802 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
3803 CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
3804 GFP_KERNEL, cma_query_sa_classport_info_cb,
3805 cb_ctx, &cb_ctx->sa_query);
3806 if (ret < 0) {
3807 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
3808 device->name, port_num, ret);
3809 goto out;
3810 }
3811
3812 wait_for_completion(&cb_ctx->done);
3813
3814out:
3815 kfree(cb_ctx);
3816 return ret;
3817}
3818
3757static int cma_join_ib_multicast(struct rdma_id_private *id_priv, 3819static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3758 struct cma_multicast *mc) 3820 struct cma_multicast *mc)
3759{ 3821{
3760 struct ib_sa_mcmember_rec rec; 3822 struct ib_sa_mcmember_rec rec;
3823 struct ib_class_port_info class_port_info;
3761 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; 3824 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
3762 ib_sa_comp_mask comp_mask; 3825 ib_sa_comp_mask comp_mask;
3763 int ret; 3826 int ret;
@@ -3776,7 +3839,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
3776 rec.qkey = cpu_to_be32(id_priv->qkey); 3839 rec.qkey = cpu_to_be32(id_priv->qkey);
3777 rdma_addr_get_sgid(dev_addr, &rec.port_gid); 3840 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
3778 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 3841 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
3779 rec.join_state = 1; 3842 rec.join_state = mc->join_state;
3843
3844 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
3845 ret = cma_query_sa_classport_info(id_priv->id.device,
3846 id_priv->id.port_num,
3847 &class_port_info);
3848
3849 if (ret)
3850 return ret;
3851
3852 if (!(ib_get_cpi_capmask2(&class_port_info) &
3853 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
3854 pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
3855 "RDMA CM: SM doesn't support Send Only Full Member option\n",
3856 id_priv->id.device->name, id_priv->id.port_num);
3857 return -EOPNOTSUPP;
3858 }
3859 }
3780 3860
3781 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | 3861 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
3782 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | 3862 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@ -3845,6 +3925,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3845 struct sockaddr *addr = (struct sockaddr *)&mc->addr; 3925 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
3846 struct net_device *ndev = NULL; 3926 struct net_device *ndev = NULL;
3847 enum ib_gid_type gid_type; 3927 enum ib_gid_type gid_type;
3928 bool send_only;
3929
3930 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
3848 3931
3849 if (cma_zero_addr((struct sockaddr *)&mc->addr)) 3932 if (cma_zero_addr((struct sockaddr *)&mc->addr))
3850 return -EINVAL; 3933 return -EINVAL;
@@ -3878,12 +3961,14 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
3878 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - 3961 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
3879 rdma_start_port(id_priv->cma_dev->device)]; 3962 rdma_start_port(id_priv->cma_dev->device)];
3880 if (addr->sa_family == AF_INET) { 3963 if (addr->sa_family == AF_INET) {
3881 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3964 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
3882 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
3883 true);
3884 if (!err) {
3885 mc->igmp_joined = true;
3886 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; 3965 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
3966 if (!send_only) {
3967 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
3968 true);
3969 if (!err)
3970 mc->igmp_joined = true;
3971 }
3887 } 3972 }
3888 } else { 3973 } else {
3889 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) 3974 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -3913,7 +3998,7 @@ out1:
3913} 3998}
3914 3999
3915int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, 4000int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3916 void *context) 4001 u8 join_state, void *context)
3917{ 4002{
3918 struct rdma_id_private *id_priv; 4003 struct rdma_id_private *id_priv;
3919 struct cma_multicast *mc; 4004 struct cma_multicast *mc;
@@ -3932,6 +4017,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
3932 mc->context = context; 4017 mc->context = context;
3933 mc->id_priv = id_priv; 4018 mc->id_priv = id_priv;
3934 mc->igmp_joined = false; 4019 mc->igmp_joined = false;
4020 mc->join_state = join_state;
3935 spin_lock(&id_priv->lock); 4021 spin_lock(&id_priv->lock);
3936 list_add(&mc->list, &id_priv->mc_list); 4022 list_add(&mc->list, &id_priv->mc_list);
3937 spin_unlock(&id_priv->lock); 4023 spin_unlock(&id_priv->lock);