diff options
author | Joachim Fenkes <fenkes@de.ibm.com> | 2007-10-16 11:31:14 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-10-18 00:47:24 -0400 |
commit | abc39d3672d8af4bf6c943faf85fa8877caccf7e (patch) | |
tree | c90456ddae53b1ce39cd604404d3810e3b499a27 /drivers/infiniband/hw/ehca/ehca_mrmw.c | |
parent | 8c08d50d4fc52a9367c356ebbeb194c30fbc7ac8 (diff) |
IB/ehca: Change meaning of hca_cap_mr_pgsize
ehca_shca.hca_cap_mr_pgsize now contains all supported page sizes ORed
together. This makes some checks easier to code and understand, plus
we can return this value verbatim in query_hca(), fixing a problem
with SRP (reported by Anton Blanchard -- thanks!).
Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_mrmw.c')
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_mrmw.c | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index b9a788c4fdd1..bb9791555f49 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -79,9 +79,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize) | |||
79 | 79 | ||
80 | static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) | 80 | static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) |
81 | { | 81 | { |
82 | if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M) | 82 | return 1UL << ilog2(shca->hca_cap_mr_pgsize); |
83 | return EHCA_MR_PGSIZE16M; | ||
84 | return EHCA_MR_PGSIZE4K; | ||
85 | } | 83 | } |
86 | 84 | ||
87 | static struct ehca_mr *ehca_mr_new(void) | 85 | static struct ehca_mr *ehca_mr_new(void) |
@@ -288,7 +286,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
288 | container_of(pd->device, struct ehca_shca, ib_device); | 286 | container_of(pd->device, struct ehca_shca, ib_device); |
289 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); | 287 | struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); |
290 | struct ehca_mr_pginfo pginfo; | 288 | struct ehca_mr_pginfo pginfo; |
291 | int ret; | 289 | int ret, page_shift; |
292 | u32 num_kpages; | 290 | u32 num_kpages; |
293 | u32 num_hwpages; | 291 | u32 num_hwpages; |
294 | u64 hwpage_size; | 292 | u64 hwpage_size; |
@@ -343,19 +341,20 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
343 | /* determine number of MR pages */ | 341 | /* determine number of MR pages */ |
344 | num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); | 342 | num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); |
345 | /* select proper hw_pgsize */ | 343 | /* select proper hw_pgsize */ |
346 | if (ehca_mr_largepage && | 344 | page_shift = PAGE_SHIFT; |
347 | (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) { | 345 | if (e_mr->umem->hugetlb) { |
348 | int page_shift = PAGE_SHIFT; | 346 | /* determine page_shift, clamp between 4K and 16M */ |
349 | if (e_mr->umem->hugetlb) { | 347 | page_shift = (fls64(length - 1) + 3) & ~3; |
350 | /* determine page_shift, clamp between 4K and 16M */ | 348 | page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K), |
351 | page_shift = (fls64(length - 1) + 3) & ~3; | 349 | EHCA_MR_PGSHIFT16M); |
352 | page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K), | 350 | } |
353 | EHCA_MR_PGSHIFT16M); | 351 | hwpage_size = 1UL << page_shift; |
354 | } | 352 | |
355 | hwpage_size = 1UL << page_shift; | 353 | /* now that we have the desired page size, shift until it's |
356 | } else | 354 | * supported, too. 4K is always supported, so this terminates. |
357 | hwpage_size = EHCA_MR_PGSIZE4K; /* ehca1 only supports 4k */ | 355 | */ |
358 | ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size); | 356 | while (!(hwpage_size & shca->hca_cap_mr_pgsize)) |
357 | hwpage_size >>= 4; | ||
359 | 358 | ||
360 | reg_user_mr_fallback: | 359 | reg_user_mr_fallback: |
361 | num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size); | 360 | num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size); |
@@ -801,8 +800,9 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd, | |||
801 | ib_fmr = ERR_PTR(-EINVAL); | 800 | ib_fmr = ERR_PTR(-EINVAL); |
802 | goto alloc_fmr_exit0; | 801 | goto alloc_fmr_exit0; |
803 | } | 802 | } |
804 | hw_pgsize = ehca_get_max_hwpage_size(shca); | 803 | |
805 | if ((1 << fmr_attr->page_shift) != hw_pgsize) { | 804 | hw_pgsize = 1 << fmr_attr->page_shift; |
805 | if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) { | ||
806 | ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", | 806 | ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", |
807 | fmr_attr->page_shift); | 807 | fmr_attr->page_shift); |
808 | ib_fmr = ERR_PTR(-EINVAL); | 808 | ib_fmr = ERR_PTR(-EINVAL); |