diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-09-28 12:00:07 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-28 14:16:41 -0400 |
commit | 6a553af286653818bb5831f1b351eefdc8a93b61 (patch) | |
tree | f37156843f767f7609b266dd42675c812767966a /drivers/infiniband/hw/ipath/ipath_ruc.c | |
parent | 10aeb0e6d8823c1cccf9edc8401c848745c128be (diff) |
IB/ipath: Ensure that PD of MR matches PD of QP checking the Rkey
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_ruc.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_ruc.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c index 5c1da2d25e03..17ae23fb1e40 100644 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c | |||
@@ -108,7 +108,6 @@ void ipath_insert_rnr_queue(struct ipath_qp *qp) | |||
108 | 108 | ||
109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | 109 | static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) |
110 | { | 110 | { |
111 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | ||
112 | int user = to_ipd(qp->ibqp.pd)->user; | 111 | int user = to_ipd(qp->ibqp.pd)->user; |
113 | int i, j, ret; | 112 | int i, j, ret; |
114 | struct ib_wc wc; | 113 | struct ib_wc wc; |
@@ -119,8 +118,7 @@ static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) | |||
119 | continue; | 118 | continue; |
120 | /* Check LKEY */ | 119 | /* Check LKEY */ |
121 | if ((user && wqe->sg_list[i].lkey == 0) || | 120 | if ((user && wqe->sg_list[i].lkey == 0) || |
122 | !ipath_lkey_ok(&dev->lk_table, | 121 | !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i], |
123 | &qp->r_sg_list[j], &wqe->sg_list[i], | ||
124 | IB_ACCESS_LOCAL_WRITE)) | 122 | IB_ACCESS_LOCAL_WRITE)) |
125 | goto bad_lkey; | 123 | goto bad_lkey; |
126 | qp->r_len += wqe->sg_list[i].length; | 124 | qp->r_len += wqe->sg_list[i].length; |
@@ -326,7 +324,7 @@ again: | |||
326 | case IB_WR_RDMA_WRITE: | 324 | case IB_WR_RDMA_WRITE: |
327 | if (wqe->length == 0) | 325 | if (wqe->length == 0) |
328 | break; | 326 | break; |
329 | if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length, | 327 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, |
330 | wqe->wr.wr.rdma.remote_addr, | 328 | wqe->wr.wr.rdma.remote_addr, |
331 | wqe->wr.wr.rdma.rkey, | 329 | wqe->wr.wr.rdma.rkey, |
332 | IB_ACCESS_REMOTE_WRITE))) { | 330 | IB_ACCESS_REMOTE_WRITE))) { |
@@ -350,7 +348,7 @@ again: | |||
350 | break; | 348 | break; |
351 | 349 | ||
352 | case IB_WR_RDMA_READ: | 350 | case IB_WR_RDMA_READ: |
353 | if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length, | 351 | if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, |
354 | wqe->wr.wr.rdma.remote_addr, | 352 | wqe->wr.wr.rdma.remote_addr, |
355 | wqe->wr.wr.rdma.rkey, | 353 | wqe->wr.wr.rdma.rkey, |
356 | IB_ACCESS_REMOTE_READ))) | 354 | IB_ACCESS_REMOTE_READ))) |
@@ -365,7 +363,7 @@ again: | |||
365 | 363 | ||
366 | case IB_WR_ATOMIC_CMP_AND_SWP: | 364 | case IB_WR_ATOMIC_CMP_AND_SWP: |
367 | case IB_WR_ATOMIC_FETCH_AND_ADD: | 365 | case IB_WR_ATOMIC_FETCH_AND_ADD: |
368 | if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64), | 366 | if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), |
369 | wqe->wr.wr.rdma.remote_addr, | 367 | wqe->wr.wr.rdma.remote_addr, |
370 | wqe->wr.wr.rdma.rkey, | 368 | wqe->wr.wr.rdma.rkey, |
371 | IB_ACCESS_REMOTE_ATOMIC))) | 369 | IB_ACCESS_REMOTE_ATOMIC))) |
@@ -575,8 +573,7 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) | |||
575 | } | 573 | } |
576 | if (wr->sg_list[i].length == 0) | 574 | if (wr->sg_list[i].length == 0) |
577 | continue; | 575 | continue; |
578 | if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table, | 576 | if (!ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], |
579 | &wqe->sg_list[j], &wr->sg_list[i], | ||
580 | acc)) { | 577 | acc)) { |
581 | spin_unlock_irqrestore(&qp->s_lock, flags); | 578 | spin_unlock_irqrestore(&qp->s_lock, flags); |
582 | ret = -EINVAL; | 579 | ret = -EINVAL; |