aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-06-08 02:24:39 -0400
committerRoland Dreier <rolandd@cisco.com>2007-06-08 02:24:39 -0400
commita4cd7ed86ff511aebcc97675937039f2321d6987 (patch)
tree1fdcec754e2086a0f67022c967d55fab12f7aec4 /drivers
parentbf2944bd56c7a48cc3962a860dbc4ceee6b1ace8 (diff)
IB/mlx4: Make sure RQ allocation is always valid
QPs attached to an SRQ must never have their own RQ, and QPs not attached to SRQs must have an RQ with at least 1 entry. Enforce all of this in set_rq_size(). Based on a patch by Eli Cohen <eli@mellanox.co.il>. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cd2297586980..5c6d05427a0f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -189,18 +189,28 @@ static int send_wqe_overhead(enum ib_qp_type type)
189} 189}
190 190
191static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, 191static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
192 struct mlx4_ib_qp *qp) 192 int is_user, int has_srq, struct mlx4_ib_qp *qp)
193{ 193{
194 /* Sanity check RQ size before proceeding */ 194 /* Sanity check RQ size before proceeding */
195 if (cap->max_recv_wr > dev->dev->caps.max_wqes || 195 if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
196 cap->max_recv_sge > dev->dev->caps.max_rq_sg) 196 cap->max_recv_sge > dev->dev->caps.max_rq_sg)
197 return -EINVAL; 197 return -EINVAL;
198 198
199 qp->rq.max = cap->max_recv_wr ? roundup_pow_of_two(cap->max_recv_wr) : 0; 199 if (has_srq) {
200 /* QPs attached to an SRQ should have no RQ */
201 if (cap->max_recv_wr)
202 return -EINVAL;
200 203
201 qp->rq.wqe_shift = ilog2(roundup_pow_of_two(cap->max_recv_sge * 204 qp->rq.max = qp->rq.max_gs = 0;
202 sizeof (struct mlx4_wqe_data_seg))); 205 } else {
203 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof (struct mlx4_wqe_data_seg); 206 /* HW requires >= 1 RQ entry with >= 1 gather entry */
207 if (is_user && (!cap->max_recv_wr || !cap->max_recv_sge))
208 return -EINVAL;
209
210 qp->rq.max = roundup_pow_of_two(max(1, cap->max_recv_wr));
211 qp->rq.max_gs = roundup_pow_of_two(max(1, cap->max_recv_sge));
212 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
213 }
204 214
205 cap->max_recv_wr = qp->rq.max; 215 cap->max_recv_wr = qp->rq.max;
206 cap->max_recv_sge = qp->rq.max_gs; 216 cap->max_recv_sge = qp->rq.max_gs;
@@ -285,7 +295,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
285 qp->sq.head = 0; 295 qp->sq.head = 0;
286 qp->sq.tail = 0; 296 qp->sq.tail = 0;
287 297
288 err = set_rq_size(dev, &init_attr->cap, qp); 298 err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
289 if (err) 299 if (err)
290 goto err; 300 goto err;
291 301