aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw.h1
-rw-r--r--net/rds/iw_rdma.c28
-rw-r--r--net/rds/iw_send.c2
4 files changed, 11 insertions, 22 deletions
diff --git a/net/rds/iw.c b/net/rds/iw.c
index d16e1cbc8e83..2e6b495502c1 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -83,8 +83,6 @@ void rds_iw_add_one(struct ib_device *device)
83 rds_iwdev->max_wrs = dev_attr->max_qp_wr; 83 rds_iwdev->max_wrs = dev_attr->max_qp_wr;
84 rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE); 84 rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
85 85
86 rds_iwdev->page_shift = max(PAGE_SHIFT, ffs(dev_attr->page_size_cap) - 1);
87
88 rds_iwdev->dev = device; 86 rds_iwdev->dev = device;
89 rds_iwdev->pd = ib_alloc_pd(device); 87 rds_iwdev->pd = ib_alloc_pd(device);
90 if (IS_ERR(rds_iwdev->pd)) 88 if (IS_ERR(rds_iwdev->pd))
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 0715dde323e7..dd72b62bd506 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -181,7 +181,6 @@ struct rds_iw_device {
181 struct ib_pd *pd; 181 struct ib_pd *pd;
182 struct ib_mr *mr; 182 struct ib_mr *mr;
183 struct rds_iw_mr_pool *mr_pool; 183 struct rds_iw_mr_pool *mr_pool;
184 int page_shift;
185 int max_sge; 184 int max_sge;
186 unsigned int max_wrs; 185 unsigned int max_wrs;
187 unsigned int dma_local_lkey:1; 186 unsigned int dma_local_lkey:1;
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index dcdb37da80f2..de4a1b16bf7b 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -263,18 +263,12 @@ static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
263} 263}
264 264
265static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev, 265static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
266 struct rds_iw_scatterlist *sg, 266 struct rds_iw_scatterlist *sg)
267 unsigned int dma_page_shift)
268{ 267{
269 struct ib_device *dev = rds_iwdev->dev; 268 struct ib_device *dev = rds_iwdev->dev;
270 u64 *dma_pages = NULL; 269 u64 *dma_pages = NULL;
271 u64 dma_mask;
272 unsigned int dma_page_size;
273 int i, j, ret; 270 int i, j, ret;
274 271
275 dma_page_size = 1 << dma_page_shift;
276 dma_mask = dma_page_size - 1;
277
278 WARN_ON(sg->dma_len); 272 WARN_ON(sg->dma_len);
279 273
280 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL); 274 sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
@@ -295,18 +289,18 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
295 sg->bytes += dma_len; 289 sg->bytes += dma_len;
296 290
297 end_addr = dma_addr + dma_len; 291 end_addr = dma_addr + dma_len;
298 if (dma_addr & dma_mask) { 292 if (dma_addr & PAGE_MASK) {
299 if (i > 0) 293 if (i > 0)
300 goto out_unmap; 294 goto out_unmap;
301 dma_addr &= ~dma_mask; 295 dma_addr &= ~PAGE_MASK;
302 } 296 }
303 if (end_addr & dma_mask) { 297 if (end_addr & PAGE_MASK) {
304 if (i < sg->dma_len - 1) 298 if (i < sg->dma_len - 1)
305 goto out_unmap; 299 goto out_unmap;
306 end_addr = (end_addr + dma_mask) & ~dma_mask; 300 end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK;
307 } 301 }
308 302
309 sg->dma_npages += (end_addr - dma_addr) >> dma_page_shift; 303 sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT;
310 } 304 }
311 305
312 /* Now gather the dma addrs into one list */ 306 /* Now gather the dma addrs into one list */
@@ -325,8 +319,8 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
325 u64 end_addr; 319 u64 end_addr;
326 320
327 end_addr = dma_addr + dma_len; 321 end_addr = dma_addr + dma_len;
328 dma_addr &= ~dma_mask; 322 dma_addr &= ~PAGE_MASK;
329 for (; dma_addr < end_addr; dma_addr += dma_page_size) 323 for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
330 dma_pages[j++] = dma_addr; 324 dma_pages[j++] = dma_addr;
331 BUG_ON(j > sg->dma_npages); 325 BUG_ON(j > sg->dma_npages);
332 } 326 }
@@ -727,7 +721,7 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
727 f_wr.wr.fast_reg.rkey = mapping->m_rkey; 721 f_wr.wr.fast_reg.rkey = mapping->m_rkey;
728 f_wr.wr.fast_reg.page_list = ibmr->page_list; 722 f_wr.wr.fast_reg.page_list = ibmr->page_list;
729 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; 723 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
730 f_wr.wr.fast_reg.page_shift = ibmr->device->page_shift; 724 f_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
731 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE | 725 f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
732 IB_ACCESS_REMOTE_READ | 726 IB_ACCESS_REMOTE_READ |
733 IB_ACCESS_REMOTE_WRITE; 727 IB_ACCESS_REMOTE_WRITE;
@@ -780,9 +774,7 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
780 774
781 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); 775 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
782 776
783 dma_pages = rds_iw_map_scatterlist(rds_iwdev, 777 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
784 &mapping->m_sg,
785 rds_iwdev->page_shift);
786 if (IS_ERR(dma_pages)) { 778 if (IS_ERR(dma_pages)) {
787 ret = PTR_ERR(dma_pages); 779 ret = PTR_ERR(dma_pages);
788 dma_pages = NULL; 780 dma_pages = NULL;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 44a6a0551f28..1f5abe3cf2b4 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -779,7 +779,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
779 send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey; 779 send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey;
780 send->s_wr.wr.fast_reg.page_list = send->s_page_list; 780 send->s_wr.wr.fast_reg.page_list = send->s_page_list;
781 send->s_wr.wr.fast_reg.page_list_len = nent; 781 send->s_wr.wr.fast_reg.page_list_len = nent;
782 send->s_wr.wr.fast_reg.page_shift = rds_iwdev->page_shift; 782 send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
783 send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE; 783 send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE;
784 send->s_wr.wr.fast_reg.iova_start = sg_addr; 784 send->s_wr.wr.fast_reg.iova_start = sg_addr;
785 785