aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatan Barak <matanb@mellanox.com>2015-06-11 09:35:21 -0400
committerDoug Ledford <dledford@redhat.com>2015-06-12 14:49:10 -0400
commit8e37210b38fb7d6aa06aebde763316ee955d44c0 (patch)
tree2ef8854af886906e1901e7c6f0b93a28ae515c6e
parentbcf4c1ea583cd213f0bafdbeb11d80f83c5f10e6 (diff)
IB/core: Change ib_create_cq to use struct ib_cq_init_attr
Currently, ib_create_cq uses cqe and comp_vecotr instead of the extendible ib_cq_init_attr struct. Earlier patches already changed the vendors to work with ib_cq_init_attr. This patch changes the consumers too. Signed-off-by: Matan Barak <matanb@mellanox.com> Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/verbs.c6
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c8
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c5
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c9
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c6
-rw-r--r--include/rdma/ib_verbs.h7
-rw-r--r--net/9p/trans_rdma.c4
-rw-r--r--net/rds/ib_cm.c7
-rw-r--r--net/rds/iw_cm.c7
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c9
-rw-r--r--net/sunrpc/xprtrdma/verbs.c9
18 files changed, 74 insertions, 35 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 600af266838c..533c0b2e7a63 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2923,6 +2923,7 @@ static int ib_mad_port_open(struct ib_device *device,
2923 unsigned long flags; 2923 unsigned long flags;
2924 char name[sizeof "ib_mad123"]; 2924 char name[sizeof "ib_mad123"];
2925 int has_smi; 2925 int has_smi;
2926 struct ib_cq_init_attr cq_attr = {};
2926 2927
2927 /* Create new device info */ 2928 /* Create new device info */
2928 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL); 2929 port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
@@ -2943,9 +2944,10 @@ static int ib_mad_port_open(struct ib_device *device,
2943 if (has_smi) 2944 if (has_smi)
2944 cq_size *= 2; 2945 cq_size *= 2;
2945 2946
2947 cq_attr.cqe = cq_size;
2946 port_priv->cq = ib_create_cq(port_priv->device, 2948 port_priv->cq = ib_create_cq(port_priv->device,
2947 ib_mad_thread_completion_handler, 2949 ib_mad_thread_completion_handler,
2948 NULL, port_priv, cq_size, 0); 2950 NULL, port_priv, &cq_attr);
2949 if (IS_ERR(port_priv->cq)) { 2951 if (IS_ERR(port_priv->cq)) {
2950 dev_err(&device->dev, "Couldn't create ib_mad CQ\n"); 2952 dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2951 ret = PTR_ERR(port_priv->cq); 2953 ret = PTR_ERR(port_priv->cq);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 7bffdbe6afe9..bac3fb406a74 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1076,12 +1076,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
1076struct ib_cq *ib_create_cq(struct ib_device *device, 1076struct ib_cq *ib_create_cq(struct ib_device *device,
1077 ib_comp_handler comp_handler, 1077 ib_comp_handler comp_handler,
1078 void (*event_handler)(struct ib_event *, void *), 1078 void (*event_handler)(struct ib_event *, void *),
1079 void *cq_context, int cqe, int comp_vector) 1079 void *cq_context,
1080 const struct ib_cq_init_attr *cq_attr)
1080{ 1081{
1081 struct ib_cq *cq; 1082 struct ib_cq *cq;
1082 struct ib_cq_init_attr attr = {.cqe = cqe, .comp_vector = comp_vector};
1083 1083
1084 cq = device->create_cq(device, &attr, NULL, NULL); 1084 cq = device->create_cq(device, cq_attr, NULL, NULL);
1085 1085
1086 if (!IS_ERR(cq)) { 1086 if (!IS_ERR(cq)) {
1087 cq->device = device; 1087 cq->device = device;
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 5e30b72d3677..c0e45a46504b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -552,6 +552,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
552 struct ib_cq *ibcq; 552 struct ib_cq *ibcq;
553 struct ib_qp *ibqp; 553 struct ib_qp *ibqp;
554 struct ib_qp_init_attr qp_init_attr; 554 struct ib_qp_init_attr qp_init_attr;
555 struct ib_cq_init_attr cq_attr = {};
555 int ret; 556 int ret;
556 557
557 if (sport->ibcq_aqp1) { 558 if (sport->ibcq_aqp1) {
@@ -559,7 +560,9 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
559 return -EPERM; 560 return -EPERM;
560 } 561 }
561 562
562 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1), 10, 0); 563 cq_attr.cqe = 10;
564 ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
565 &cq_attr);
563 if (IS_ERR(ibcq)) { 566 if (IS_ERR(ibcq)) {
564 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ."); 567 ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
565 return PTR_ERR(ibcq); 568 return PTR_ERR(ibcq);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 614ac6f07ae1..a790be5a7423 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1774,6 +1774,7 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1774 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) 1774 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1775{ 1775{
1776 int ret, cq_size; 1776 int ret, cq_size;
1777 struct ib_cq_init_attr cq_attr = {};
1777 1778
1778 if (ctx->state != DEMUX_PV_STATE_DOWN) 1779 if (ctx->state != DEMUX_PV_STATE_DOWN)
1779 return -EEXIST; 1780 return -EEXIST;
@@ -1802,8 +1803,9 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1802 if (ctx->has_smi) 1803 if (ctx->has_smi)
1803 cq_size *= 2; 1804 cq_size *= 2;
1804 1805
1806 cq_attr.cqe = cq_size;
1805 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, 1807 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
1806 NULL, ctx, cq_size, 0); 1808 NULL, ctx, &cq_attr);
1807 if (IS_ERR(ctx->cq)) { 1809 if (IS_ERR(ctx->cq)) {
1808 ret = PTR_ERR(ctx->cq); 1810 ret = PTR_ERR(ctx->cq);
1809 pr_err("Couldn't create tunnel CQ (%d)\n", ret); 1811 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 86c0c27120f7..af2071ed1437 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -758,6 +758,7 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
758 struct ib_udata *udata) 758 struct ib_udata *udata)
759{ 759{
760 struct mlx4_ib_xrcd *xrcd; 760 struct mlx4_ib_xrcd *xrcd;
761 struct ib_cq_init_attr cq_attr = {};
761 int err; 762 int err;
762 763
763 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) 764 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
@@ -777,7 +778,8 @@ static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
777 goto err2; 778 goto err2;
778 } 779 }
779 780
780 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0); 781 cq_attr.cqe = 1;
782 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
781 if (IS_ERR(xrcd->cq)) { 783 if (IS_ERR(xrcd->cq)) {
782 err = PTR_ERR(xrcd->cq); 784 err = PTR_ERR(xrcd->cq);
783 goto err3; 785 goto err3;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9565c203a497..06b023855a33 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -971,6 +971,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
971 struct ib_cq *cq; 971 struct ib_cq *cq;
972 struct ib_qp *qp; 972 struct ib_qp *qp;
973 struct ib_mr *mr; 973 struct ib_mr *mr;
974 struct ib_cq_init_attr cq_attr = {};
974 int ret; 975 int ret;
975 976
976 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 977 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -994,8 +995,9 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
994 goto error_1; 995 goto error_1;
995 } 996 }
996 997
997 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, 998 cq_attr.cqe = 128;
998 0); 999 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
1000 &cq_attr);
999 if (IS_ERR(cq)) { 1001 if (IS_ERR(cq)) {
1000 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 1002 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1001 ret = PTR_ERR(cq); 1003 ret = PTR_ERR(cq);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index e5cc43074196..9e6ee82a8fd7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -141,6 +141,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
141 .sq_sig_type = IB_SIGNAL_ALL_WR, 141 .sq_sig_type = IB_SIGNAL_ALL_WR,
142 .qp_type = IB_QPT_UD 142 .qp_type = IB_QPT_UD
143 }; 143 };
144 struct ib_cq_init_attr cq_attr = {};
144 145
145 int ret, size; 146 int ret, size;
146 int i; 147 int i;
@@ -178,14 +179,17 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
178 } else 179 } else
179 goto out_free_wq; 180 goto out_free_wq;
180 181
181 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); 182 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
184 dev, &cq_attr);
182 if (IS_ERR(priv->recv_cq)) { 185 if (IS_ERR(priv->recv_cq)) {
183 printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name); 186 printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
184 goto out_cm_dev_cleanup; 187 goto out_cm_dev_cleanup;
185 } 188 }
186 189
190 cq_attr.cqe = ipoib_sendq_size;
187 priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL, 191 priv->send_cq = ib_create_cq(priv->ca, ipoib_send_comp_handler, NULL,
188 dev, ipoib_sendq_size, 0); 192 dev, &cq_attr);
189 if (IS_ERR(priv->send_cq)) { 193 if (IS_ERR(priv->send_cq)) {
190 printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name); 194 printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
191 goto out_free_recv_cq; 195 goto out_free_recv_cq;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index d33c5c000f9c..5c9f565ea0e8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -126,14 +126,17 @@ static int iser_create_device_ib_res(struct iser_device *device)
126 goto pd_err; 126 goto pd_err;
127 127
128 for (i = 0; i < device->comps_used; i++) { 128 for (i = 0; i < device->comps_used; i++) {
129 struct ib_cq_init_attr cq_attr = {};
129 struct iser_comp *comp = &device->comps[i]; 130 struct iser_comp *comp = &device->comps[i];
130 131
131 comp->device = device; 132 comp->device = device;
133 cq_attr.cqe = max_cqe;
134 cq_attr.comp_vector = i;
132 comp->cq = ib_create_cq(device->ib_device, 135 comp->cq = ib_create_cq(device->ib_device,
133 iser_cq_callback, 136 iser_cq_callback,
134 iser_cq_event_callback, 137 iser_cq_event_callback,
135 (void *)comp, 138 (void *)comp,
136 max_cqe, i); 139 &cq_attr);
137 if (IS_ERR(comp->cq)) { 140 if (IS_ERR(comp->cq)) {
138 comp->cq = NULL; 141 comp->cq = NULL;
139 goto cq_err; 142 goto cq_err;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index d99a0c8f14a4..9e7b4927265c 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -318,15 +318,18 @@ isert_alloc_comps(struct isert_device *device,
318 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe); 318 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
319 319
320 for (i = 0; i < device->comps_used; i++) { 320 for (i = 0; i < device->comps_used; i++) {
321 struct ib_cq_init_attr cq_attr = {};
321 struct isert_comp *comp = &device->comps[i]; 322 struct isert_comp *comp = &device->comps[i];
322 323
323 comp->device = device; 324 comp->device = device;
324 INIT_WORK(&comp->work, isert_cq_work); 325 INIT_WORK(&comp->work, isert_cq_work);
326 cq_attr.cqe = max_cqe;
327 cq_attr.comp_vector = i;
325 comp->cq = ib_create_cq(device->ib_device, 328 comp->cq = ib_create_cq(device->ib_device,
326 isert_cq_callback, 329 isert_cq_callback,
327 isert_cq_event_callback, 330 isert_cq_event_callback,
328 (void *)comp, 331 (void *)comp,
329 max_cqe, i); 332 &cq_attr);
330 if (IS_ERR(comp->cq)) { 333 if (IS_ERR(comp->cq)) {
331 isert_err("Unable to allocate cq\n"); 334 isert_err("Unable to allocate cq\n");
332 ret = PTR_ERR(comp->cq); 335 ret = PTR_ERR(comp->cq);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index c3f654d20038..eada8f758ad4 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -500,6 +500,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
500 struct ib_fmr_pool *fmr_pool = NULL; 500 struct ib_fmr_pool *fmr_pool = NULL;
501 struct srp_fr_pool *fr_pool = NULL; 501 struct srp_fr_pool *fr_pool = NULL;
502 const int m = 1 + dev->use_fast_reg; 502 const int m = 1 + dev->use_fast_reg;
503 struct ib_cq_init_attr cq_attr = {};
503 int ret; 504 int ret;
504 505
505 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL); 506 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
@@ -507,15 +508,19 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
507 return -ENOMEM; 508 return -ENOMEM;
508 509
509 /* + 1 for SRP_LAST_WR_ID */ 510 /* + 1 for SRP_LAST_WR_ID */
511 cq_attr.cqe = target->queue_size + 1;
512 cq_attr.comp_vector = ch->comp_vector;
510 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch, 513 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
511 target->queue_size + 1, ch->comp_vector); 514 &cq_attr);
512 if (IS_ERR(recv_cq)) { 515 if (IS_ERR(recv_cq)) {
513 ret = PTR_ERR(recv_cq); 516 ret = PTR_ERR(recv_cq);
514 goto err; 517 goto err;
515 } 518 }
516 519
520 cq_attr.cqe = m * target->queue_size;
521 cq_attr.comp_vector = ch->comp_vector;
517 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch, 522 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
518 m * target->queue_size, ch->comp_vector); 523 &cq_attr);
519 if (IS_ERR(send_cq)) { 524 if (IS_ERR(send_cq)) {
520 ret = PTR_ERR(send_cq); 525 ret = PTR_ERR(send_cq);
521 goto err_recv_cq; 526 goto err_recv_cq;
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 9b84b4c0a000..783efe1a3a28 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2080,6 +2080,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2080 struct srpt_port *sport = ch->sport; 2080 struct srpt_port *sport = ch->sport;
2081 struct srpt_device *sdev = sport->sdev; 2081 struct srpt_device *sdev = sport->sdev;
2082 u32 srp_sq_size = sport->port_attrib.srp_sq_size; 2082 u32 srp_sq_size = sport->port_attrib.srp_sq_size;
2083 struct ib_cq_init_attr cq_attr = {};
2083 int ret; 2084 int ret;
2084 2085
2085 WARN_ON(ch->rq_size < 1); 2086 WARN_ON(ch->rq_size < 1);
@@ -2090,8 +2091,9 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2090 goto out; 2091 goto out;
2091 2092
2092retry: 2093retry:
2094 cq_attr.cqe = ch->rq_size + srp_sq_size;
2093 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, 2095 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2094 ch->rq_size + srp_sq_size, 0); 2096 &cq_attr);
2095 if (IS_ERR(ch->cq)) { 2097 if (IS_ERR(ch->cq)) {
2096 ret = PTR_ERR(ch->cq); 2098 ret = PTR_ERR(ch->cq);
2097 pr_err("failed to create CQ cqe= %d ret= %d\n", 2099 pr_err("failed to create CQ cqe= %d ret= %d\n",
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 3bad441de8dc..c41b5575df05 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -647,6 +647,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
647 kib_dev_t *dev; 647 kib_dev_t *dev;
648 struct ib_qp_init_attr *init_qp_attr; 648 struct ib_qp_init_attr *init_qp_attr;
649 struct kib_sched_info *sched; 649 struct kib_sched_info *sched;
650 struct ib_cq_init_attr cq_attr = {};
650 kib_conn_t *conn; 651 kib_conn_t *conn;
651 struct ib_cq *cq; 652 struct ib_cq *cq;
652 unsigned long flags; 653 unsigned long flags;
@@ -742,10 +743,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
742 743
743 kiblnd_map_rx_descs(conn); 744 kiblnd_map_rx_descs(conn);
744 745
746 cq_attr.cqe = IBLND_CQ_ENTRIES(version);
747 cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
745 cq = ib_create_cq(cmid->device, 748 cq = ib_create_cq(cmid->device,
746 kiblnd_cq_completion, kiblnd_cq_event, conn, 749 kiblnd_cq_completion, kiblnd_cq_event, conn,
747 IBLND_CQ_ENTRIES(version), 750 &cq_attr);
748 kiblnd_get_completion_vector(conn, cpt));
749 if (IS_ERR(cq)) { 751 if (IS_ERR(cq)) {
750 CERROR("Can't create CQ: %ld, cqe: %d\n", 752 CERROR("Can't create CQ: %ld, cqe: %d\n",
751 PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); 753 PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index b25ffa05e338..ea01e9953ec7 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2314,16 +2314,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
2314 * asynchronous event not associated with a completion occurs on the CQ. 2314 * asynchronous event not associated with a completion occurs on the CQ.
2315 * @cq_context: Context associated with the CQ returned to the user via 2315 * @cq_context: Context associated with the CQ returned to the user via
2316 * the associated completion and event handlers. 2316 * the associated completion and event handlers.
2317 * @cqe: The minimum size of the CQ. 2317 * @cq_attr: The attributes the CQ should be created upon.
2318 * @comp_vector - Completion vector used to signal completion events.
2319 * Must be >= 0 and < context->num_comp_vectors.
2320 * 2318 *
2321 * Users can examine the cq structure to determine the actual CQ size. 2319 * Users can examine the cq structure to determine the actual CQ size.
2322 */ 2320 */
2323struct ib_cq *ib_create_cq(struct ib_device *device, 2321struct ib_cq *ib_create_cq(struct ib_device *device,
2324 ib_comp_handler comp_handler, 2322 ib_comp_handler comp_handler,
2325 void (*event_handler)(struct ib_event *, void *), 2323 void (*event_handler)(struct ib_event *, void *),
2326 void *cq_context, int cqe, int comp_vector); 2324 void *cq_context,
2325 const struct ib_cq_init_attr *cq_attr);
2327 2326
2328/** 2327/**
2329 * ib_resize_cq - Modifies the capacity of the CQ. 2328 * ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 3533d2a53ab6..37a78d20c0f6 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -648,6 +648,7 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
648 struct rdma_conn_param conn_param; 648 struct rdma_conn_param conn_param;
649 struct ib_qp_init_attr qp_attr; 649 struct ib_qp_init_attr qp_attr;
650 struct ib_device_attr devattr; 650 struct ib_device_attr devattr;
651 struct ib_cq_init_attr cq_attr = {};
651 652
652 /* Parse the transport specific mount options */ 653 /* Parse the transport specific mount options */
653 err = parse_opts(args, &opts); 654 err = parse_opts(args, &opts);
@@ -705,9 +706,10 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
705 goto error; 706 goto error;
706 707
707 /* Create the Completion Queue */ 708 /* Create the Completion Queue */
709 cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
708 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, 710 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
709 cq_event_handler, client, 711 cq_event_handler, client,
710 opts.sq_depth + opts.rq_depth + 1, 0); 712 &cq_attr);
711 if (IS_ERR(rdma->cq)) 713 if (IS_ERR(rdma->cq))
712 goto error; 714 goto error;
713 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); 715 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index b8d1bdae8a2a..0da2a45b33bd 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -247,6 +247,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
247 struct rds_ib_connection *ic = conn->c_transport_data; 247 struct rds_ib_connection *ic = conn->c_transport_data;
248 struct ib_device *dev = ic->i_cm_id->device; 248 struct ib_device *dev = ic->i_cm_id->device;
249 struct ib_qp_init_attr attr; 249 struct ib_qp_init_attr attr;
250 struct ib_cq_init_attr cq_attr = {};
250 struct rds_ib_device *rds_ibdev; 251 struct rds_ib_device *rds_ibdev;
251 int ret; 252 int ret;
252 253
@@ -270,9 +271,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
270 ic->i_pd = rds_ibdev->pd; 271 ic->i_pd = rds_ibdev->pd;
271 ic->i_mr = rds_ibdev->mr; 272 ic->i_mr = rds_ibdev->mr;
272 273
274 cq_attr.cqe = ic->i_send_ring.w_nr + 1;
273 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler, 275 ic->i_send_cq = ib_create_cq(dev, rds_ib_send_cq_comp_handler,
274 rds_ib_cq_event_handler, conn, 276 rds_ib_cq_event_handler, conn,
275 ic->i_send_ring.w_nr + 1, 0); 277 &cq_attr);
276 if (IS_ERR(ic->i_send_cq)) { 278 if (IS_ERR(ic->i_send_cq)) {
277 ret = PTR_ERR(ic->i_send_cq); 279 ret = PTR_ERR(ic->i_send_cq);
278 ic->i_send_cq = NULL; 280 ic->i_send_cq = NULL;
@@ -280,9 +282,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
280 goto out; 282 goto out;
281 } 283 }
282 284
285 cq_attr.cqe = ic->i_recv_ring.w_nr;
283 ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler, 286 ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
284 rds_ib_cq_event_handler, conn, 287 rds_ib_cq_event_handler, conn,
285 ic->i_recv_ring.w_nr, 0); 288 &cq_attr);
286 if (IS_ERR(ic->i_recv_cq)) { 289 if (IS_ERR(ic->i_recv_cq)) {
287 ret = PTR_ERR(ic->i_recv_cq); 290 ret = PTR_ERR(ic->i_recv_cq);
288 ic->i_recv_cq = NULL; 291 ic->i_recv_cq = NULL;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index a6c2bea9f8f9..8f486fa32079 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -179,6 +179,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
179 void *context) 179 void *context)
180{ 180{
181 struct ib_device *dev = rds_iwdev->dev; 181 struct ib_device *dev = rds_iwdev->dev;
182 struct ib_cq_init_attr cq_attr = {};
182 unsigned int send_size, recv_size; 183 unsigned int send_size, recv_size;
183 int ret; 184 int ret;
184 185
@@ -198,9 +199,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
198 attr->sq_sig_type = IB_SIGNAL_REQ_WR; 199 attr->sq_sig_type = IB_SIGNAL_REQ_WR;
199 attr->qp_type = IB_QPT_RC; 200 attr->qp_type = IB_QPT_RC;
200 201
202 cq_attr.cqe = send_size;
201 attr->send_cq = ib_create_cq(dev, send_cq_handler, 203 attr->send_cq = ib_create_cq(dev, send_cq_handler,
202 rds_iw_cq_event_handler, 204 rds_iw_cq_event_handler,
203 context, send_size, 0); 205 context, &cq_attr);
204 if (IS_ERR(attr->send_cq)) { 206 if (IS_ERR(attr->send_cq)) {
205 ret = PTR_ERR(attr->send_cq); 207 ret = PTR_ERR(attr->send_cq);
206 attr->send_cq = NULL; 208 attr->send_cq = NULL;
@@ -208,9 +210,10 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
208 goto out; 210 goto out;
209 } 211 }
210 212
213 cq_attr.cqe = recv_size;
211 attr->recv_cq = ib_create_cq(dev, recv_cq_handler, 214 attr->recv_cq = ib_create_cq(dev, recv_cq_handler,
212 rds_iw_cq_event_handler, 215 rds_iw_cq_event_handler,
213 context, recv_size, 0); 216 context, &cq_attr);
214 if (IS_ERR(attr->recv_cq)) { 217 if (IS_ERR(attr->recv_cq)) {
215 ret = PTR_ERR(attr->recv_cq); 218 ret = PTR_ERR(attr->recv_cq);
216 attr->recv_cq = NULL; 219 attr->recv_cq = NULL;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 88eb994edd36..f4cfa764d76f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -855,6 +855,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
855 struct svcxprt_rdma *listen_rdma; 855 struct svcxprt_rdma *listen_rdma;
856 struct svcxprt_rdma *newxprt = NULL; 856 struct svcxprt_rdma *newxprt = NULL;
857 struct rdma_conn_param conn_param; 857 struct rdma_conn_param conn_param;
858 struct ib_cq_init_attr cq_attr = {};
858 struct ib_qp_init_attr qp_attr; 859 struct ib_qp_init_attr qp_attr;
859 struct ib_device_attr devattr; 860 struct ib_device_attr devattr;
860 int uninitialized_var(dma_mr_acc); 861 int uninitialized_var(dma_mr_acc);
@@ -907,22 +908,22 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
907 dprintk("svcrdma: error creating PD for connect request\n"); 908 dprintk("svcrdma: error creating PD for connect request\n");
908 goto errout; 909 goto errout;
909 } 910 }
911 cq_attr.cqe = newxprt->sc_sq_depth;
910 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device, 912 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
911 sq_comp_handler, 913 sq_comp_handler,
912 cq_event_handler, 914 cq_event_handler,
913 newxprt, 915 newxprt,
914 newxprt->sc_sq_depth, 916 &cq_attr);
915 0);
916 if (IS_ERR(newxprt->sc_sq_cq)) { 917 if (IS_ERR(newxprt->sc_sq_cq)) {
917 dprintk("svcrdma: error creating SQ CQ for connect request\n"); 918 dprintk("svcrdma: error creating SQ CQ for connect request\n");
918 goto errout; 919 goto errout;
919 } 920 }
921 cq_attr.cqe = newxprt->sc_max_requests;
920 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device, 922 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
921 rq_comp_handler, 923 rq_comp_handler,
922 cq_event_handler, 924 cq_event_handler,
923 newxprt, 925 newxprt,
924 newxprt->sc_max_requests, 926 &cq_attr);
925 0);
926 if (IS_ERR(newxprt->sc_rq_cq)) { 927 if (IS_ERR(newxprt->sc_rq_cq)) {
927 dprintk("svcrdma: error creating RQ CQ for connect request\n"); 928 dprintk("svcrdma: error creating RQ CQ for connect request\n");
928 goto errout; 929 goto errout;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 6f6b8a56212a..52df265b472a 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -644,6 +644,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
644{ 644{
645 struct ib_device_attr *devattr = &ia->ri_devattr; 645 struct ib_device_attr *devattr = &ia->ri_devattr;
646 struct ib_cq *sendcq, *recvcq; 646 struct ib_cq *sendcq, *recvcq;
647 struct ib_cq_init_attr cq_attr = {};
647 int rc, err; 648 int rc, err;
648 649
649 /* check provider's send/recv wr limits */ 650 /* check provider's send/recv wr limits */
@@ -691,9 +692,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
691 init_waitqueue_head(&ep->rep_connect_wait); 692 init_waitqueue_head(&ep->rep_connect_wait);
692 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 693 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
693 694
695 cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1;
694 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, 696 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
695 rpcrdma_cq_async_error_upcall, ep, 697 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
696 ep->rep_attr.cap.max_send_wr + 1, 0);
697 if (IS_ERR(sendcq)) { 698 if (IS_ERR(sendcq)) {
698 rc = PTR_ERR(sendcq); 699 rc = PTR_ERR(sendcq);
699 dprintk("RPC: %s: failed to create send CQ: %i\n", 700 dprintk("RPC: %s: failed to create send CQ: %i\n",
@@ -708,9 +709,9 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
708 goto out2; 709 goto out2;
709 } 710 }
710 711
712 cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1;
711 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, 713 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
712 rpcrdma_cq_async_error_upcall, ep, 714 rpcrdma_cq_async_error_upcall, ep, &cq_attr);
713 ep->rep_attr.cap.max_recv_wr + 1, 0);
714 if (IS_ERR(recvcq)) { 715 if (IS_ERR(recvcq)) {
715 rc = PTR_ERR(recvcq); 716 rc = PTR_ERR(recvcq);
716 dprintk("RPC: %s: failed to create recv CQ: %i\n", 717 dprintk("RPC: %s: failed to create recv CQ: %i\n",