aboutsummaryrefslogtreecommitdiffstats
path: root/net/rds/ib_rdma.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2009-07-17 09:13:33 -0400
committerDavid S. Miller <davem@davemloft.net>2009-07-20 11:03:13 -0400
commita870d62726721785c34fa73d852bd35e5d1b295b (patch)
treebab3e33c8030e296477d4719817dfbe75303f04f /net/rds/ib_rdma.c
parentedacaeae52ade6cbb3a0704db32a9fb4a219dee3 (diff)
RDS/IB: Always use PAGE_SIZE for FMR page size
While FMRs allow significant flexibility in what size of pages they can use, we really just want FMR pages to match CPU page size. Roland says we can count on this always being supported, so this simplifies things. Signed-off-by: Andy Grover <andy.grover@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/rds/ib_rdma.c')
-rw-r--r--net/rds/ib_rdma.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 81033af93020..ef3ab5b7283e 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
211 211
212 pool->fmr_attr.max_pages = fmr_message_size; 212 pool->fmr_attr.max_pages = fmr_message_size;
213 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; 213 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
214 pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift; 214 pool->fmr_attr.page_shift = PAGE_SHIFT;
215 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4; 215 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
216 216
217 /* We never allow more than max_items MRs to be allocated. 217 /* We never allow more than max_items MRs to be allocated.
@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
349 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); 349 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
350 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 350 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
351 351
352 if (dma_addr & ~rds_ibdev->fmr_page_mask) { 352 if (dma_addr & ~PAGE_MASK) {
353 if (i > 0) 353 if (i > 0)
354 return -EINVAL; 354 return -EINVAL;
355 else 355 else
356 ++page_cnt; 356 ++page_cnt;
357 } 357 }
358 if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) { 358 if ((dma_addr + dma_len) & ~PAGE_MASK) {
359 if (i < sg_dma_len - 1) 359 if (i < sg_dma_len - 1)
360 return -EINVAL; 360 return -EINVAL;
361 else 361 else
@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
365 len += dma_len; 365 len += dma_len;
366 } 366 }
367 367
368 page_cnt += len >> rds_ibdev->fmr_page_shift; 368 page_cnt += len >> PAGE_SHIFT;
369 if (page_cnt > fmr_message_size) 369 if (page_cnt > fmr_message_size)
370 return -EINVAL; 370 return -EINVAL;
371 371
@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
378 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]); 378 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
379 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]); 379 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
380 380
381 for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size) 381 for (j = 0; j < dma_len; j += PAGE_SIZE)
382 dma_pages[page_cnt++] = 382 dma_pages[page_cnt++] =
383 (dma_addr & rds_ibdev->fmr_page_mask) + j; 383 (dma_addr & PAGE_MASK) + j;
384 } 384 }
385 385
386 ret = ib_map_phys_fmr(ibmr->fmr, 386 ret = ib_map_phys_fmr(ibmr->fmr,