aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShiraz, Saleem <shiraz.saleem@intel.com>2019-02-12 11:52:24 -0500
committerJason Gunthorpe <jgg@mellanox.com>2019-02-13 11:00:43 -0500
commit36d577089d207c4832c4f24902167dcd47635ebe (patch)
tree4cebc8583920876b731c1a4f31c87c7f254e0286
parente8ac9389f0d7c74fd988a5ce77765588f74dacd2 (diff)
RDMA/rdmavt: Adapt to handle non-uniform sizes on umem SGEs
rdmavt expects a uniform size on all umem SGEs which is currently at PAGE_SIZE. Adapt to a umem API change which could return non-uniform sized SGEs due to combining contiguous PAGE_SIZE regions into an SGE. Use for_each_sg_page variant to unfold the larger SGEs into a list of PAGE_SIZE elements. Additionally, purge umem->page_shift usage in the driver as its only relevant for ODP MRs. Use system page size and shift instead. Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com> Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 8b1c1e8dd7ef..728795043496 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -381,8 +381,8 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
381{ 381{
382 struct rvt_mr *mr; 382 struct rvt_mr *mr;
383 struct ib_umem *umem; 383 struct ib_umem *umem;
384 struct scatterlist *sg; 384 struct sg_page_iter sg_iter;
385 int n, m, entry; 385 int n, m;
386 struct ib_mr *ret; 386 struct ib_mr *ret;
387 387
388 if (length == 0) 388 if (length == 0)
@@ -407,23 +407,21 @@ struct ib_mr *rvt_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
407 mr->mr.access_flags = mr_access_flags; 407 mr->mr.access_flags = mr_access_flags;
408 mr->umem = umem; 408 mr->umem = umem;
409 409
410 mr->mr.page_shift = umem->page_shift; 410 mr->mr.page_shift = PAGE_SHIFT;
411 m = 0; 411 m = 0;
412 n = 0; 412 n = 0;
413 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 413 for_each_sg_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
414 void *vaddr; 414 void *vaddr;
415 415
416 vaddr = page_address(sg_page(sg)); 416 vaddr = page_address(sg_page_iter_page(&sg_iter));
417 if (!vaddr) { 417 if (!vaddr) {
418 ret = ERR_PTR(-EINVAL); 418 ret = ERR_PTR(-EINVAL);
419 goto bail_inval; 419 goto bail_inval;
420 } 420 }
421 mr->mr.map[m]->segs[n].vaddr = vaddr; 421 mr->mr.map[m]->segs[n].vaddr = vaddr;
422 mr->mr.map[m]->segs[n].length = BIT(umem->page_shift); 422 mr->mr.map[m]->segs[n].length = PAGE_SIZE;
423 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, 423 trace_rvt_mr_user_seg(&mr->mr, m, n, vaddr, PAGE_SIZE);
424 BIT(umem->page_shift)); 424 if (++n == RVT_SEGSZ) {
425 n++;
426 if (n == RVT_SEGSZ) {
427 m++; 425 m++;
428 n = 0; 426 n = 0;
429 } 427 }