aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_mrmw.c
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@de.ibm.com>2007-07-20 10:01:51 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-21 00:19:43 -0400
commit5bb7d9290cd23a55906e4fe7a7fedecf29468c81 (patch)
treea22071c38078848296c554281f3c0d6acdfa2bf3 /drivers/infiniband/hw/ehca/ehca_mrmw.c
parent23f1b38481596ad77e5f51562977b12c8418eee3 (diff)
IB/ehca: Support large page MRs
Add support for MR pages larger than 4K on eHCA2. This reduces firmware memory consumption. If enabled via the mr_largepage module parameter, the MR page size will be determined based on the MR length and the hardware capabilities -- if the MR is >= 16M, 16M pages are used, for example. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_mrmw.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c371
1 files changed, 311 insertions, 60 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 9f4c9d46e8ef..c1b868b79d67 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com> 6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com> 7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * 9 *
9 * Copyright (c) 2005 IBM Corporation 10 * Copyright (c) 2005 IBM Corporation
10 * 11 *
@@ -56,6 +57,37 @@
56static struct kmem_cache *mr_cache; 57static struct kmem_cache *mr_cache;
57static struct kmem_cache *mw_cache; 58static struct kmem_cache *mw_cache;
58 59
60enum ehca_mr_pgsize {
61 EHCA_MR_PGSIZE4K = 0x1000L,
62 EHCA_MR_PGSIZE64K = 0x10000L,
63 EHCA_MR_PGSIZE1M = 0x100000L,
64 EHCA_MR_PGSIZE16M = 0x1000000L
65};
66
67extern int ehca_mr_largepage;
68
69static u32 ehca_encode_hwpage_size(u32 pgsize)
70{
71 u32 idx = 0;
72 pgsize >>= 12;
73 /*
74 * map mr page size into hw code:
75 * 0, 1, 2, 3 for 4K, 64K, 1M, 64M
76 */
77 while (!(pgsize & 1)) {
78 idx++;
79 pgsize >>= 4;
80 }
81 return idx;
82}
83
84static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
85{
86 if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
87 return EHCA_MR_PGSIZE16M;
88 return EHCA_MR_PGSIZE4K;
89}
90
59static struct ehca_mr *ehca_mr_new(void) 91static struct ehca_mr *ehca_mr_new(void)
60{ 92{
61 struct ehca_mr *me; 93 struct ehca_mr *me;
@@ -207,19 +239,23 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
207 struct ehca_mr_pginfo pginfo; 239 struct ehca_mr_pginfo pginfo;
208 u32 num_kpages; 240 u32 num_kpages;
209 u32 num_hwpages; 241 u32 num_hwpages;
242 u64 hw_pgsize;
210 243
211 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size, 244 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
212 PAGE_SIZE); 245 PAGE_SIZE);
213 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + 246 /* for kernel space we try most possible pgsize */
214 size, EHCA_PAGESIZE); 247 hw_pgsize = ehca_get_max_hwpage_size(shca);
248 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
249 hw_pgsize);
215 memset(&pginfo, 0, sizeof(pginfo)); 250 memset(&pginfo, 0, sizeof(pginfo));
216 pginfo.type = EHCA_MR_PGI_PHYS; 251 pginfo.type = EHCA_MR_PGI_PHYS;
217 pginfo.num_kpages = num_kpages; 252 pginfo.num_kpages = num_kpages;
253 pginfo.hwpage_size = hw_pgsize;
218 pginfo.num_hwpages = num_hwpages; 254 pginfo.num_hwpages = num_hwpages;
219 pginfo.u.phy.num_phys_buf = num_phys_buf; 255 pginfo.u.phy.num_phys_buf = num_phys_buf;
220 pginfo.u.phy.phys_buf_array = phys_buf_array; 256 pginfo.u.phy.phys_buf_array = phys_buf_array;
221 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / 257 pginfo.next_hwpage =
222 EHCA_PAGESIZE); 258 ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
223 259
224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 260 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 261 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
@@ -259,6 +295,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
259 int ret; 295 int ret;
260 u32 num_kpages; 296 u32 num_kpages;
261 u32 num_hwpages; 297 u32 num_hwpages;
298 u64 hwpage_size;
262 299
263 if (!pd) { 300 if (!pd) {
264 ehca_gen_err("bad pd=%p", pd); 301 ehca_gen_err("bad pd=%p", pd);
@@ -309,16 +346,32 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
309 346
310 /* determine number of MR pages */ 347 /* determine number of MR pages */
311 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); 348 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
312 num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, 349 /* select proper hw_pgsize */
313 EHCA_PAGESIZE); 350 if (ehca_mr_largepage &&
351 (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
352 if (length <= EHCA_MR_PGSIZE4K
353 && PAGE_SIZE == EHCA_MR_PGSIZE4K)
354 hwpage_size = EHCA_MR_PGSIZE4K;
355 else if (length <= EHCA_MR_PGSIZE64K)
356 hwpage_size = EHCA_MR_PGSIZE64K;
357 else if (length <= EHCA_MR_PGSIZE1M)
358 hwpage_size = EHCA_MR_PGSIZE1M;
359 else
360 hwpage_size = EHCA_MR_PGSIZE16M;
361 } else
362 hwpage_size = EHCA_MR_PGSIZE4K;
363 ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
314 364
365reg_user_mr_fallback:
366 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
315 /* register MR on HCA */ 367 /* register MR on HCA */
316 memset(&pginfo, 0, sizeof(pginfo)); 368 memset(&pginfo, 0, sizeof(pginfo));
317 pginfo.type = EHCA_MR_PGI_USER; 369 pginfo.type = EHCA_MR_PGI_USER;
370 pginfo.hwpage_size = hwpage_size;
318 pginfo.num_kpages = num_kpages; 371 pginfo.num_kpages = num_kpages;
319 pginfo.num_hwpages = num_hwpages; 372 pginfo.num_hwpages = num_hwpages;
320 pginfo.u.usr.region = e_mr->umem; 373 pginfo.u.usr.region = e_mr->umem;
321 pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE; 374 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
322 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk, 375 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
323 (&e_mr->umem->chunk_list), 376 (&e_mr->umem->chunk_list),
324 list); 377 list);
@@ -326,6 +379,18 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
326 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, 379 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
327 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 380 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
328 &e_mr->ib.ib_mr.rkey); 381 &e_mr->ib.ib_mr.rkey);
382 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
383 ehca_warn(pd->device, "failed to register mr "
384 "with hwpage_size=%lx", hwpage_size);
385 ehca_info(pd->device, "try to register mr with "
386 "kpage_size=%lx", PAGE_SIZE);
387 /*
388 * this means kpages are not contiguous for a hw page
389 * try kernel page size as fallback solution
390 */
391 hwpage_size = PAGE_SIZE;
392 goto reg_user_mr_fallback;
393 }
329 if (ret) { 394 if (ret) {
330 ib_mr = ERR_PTR(ret); 395 ib_mr = ERR_PTR(ret);
331 goto reg_user_mr_exit2; 396 goto reg_user_mr_exit2;
@@ -452,6 +517,8 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
452 new_pd = container_of(mr->pd, struct ehca_pd, ib_pd); 517 new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
453 518
454 if (mr_rereg_mask & IB_MR_REREG_TRANS) { 519 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
520 u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
521
455 new_start = iova_start; /* change address */ 522 new_start = iova_start; /* change address */
456 /* check physical buffer list and calculate size */ 523 /* check physical buffer list and calculate size */
457 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, 524 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
@@ -468,16 +535,17 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
468 } 535 }
469 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + 536 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
470 new_size, PAGE_SIZE); 537 new_size, PAGE_SIZE);
471 num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + 538 num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
472 new_size, EHCA_PAGESIZE); 539 new_size, hw_pgsize);
473 memset(&pginfo, 0, sizeof(pginfo)); 540 memset(&pginfo, 0, sizeof(pginfo));
474 pginfo.type = EHCA_MR_PGI_PHYS; 541 pginfo.type = EHCA_MR_PGI_PHYS;
475 pginfo.num_kpages = num_kpages; 542 pginfo.num_kpages = num_kpages;
543 pginfo.hwpage_size = hw_pgsize;
476 pginfo.num_hwpages = num_hwpages; 544 pginfo.num_hwpages = num_hwpages;
477 pginfo.u.phy.num_phys_buf = num_phys_buf; 545 pginfo.u.phy.num_phys_buf = num_phys_buf;
478 pginfo.u.phy.phys_buf_array = phys_buf_array; 546 pginfo.u.phy.phys_buf_array = phys_buf_array;
479 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) / 547 pginfo.next_hwpage =
480 EHCA_PAGESIZE); 548 ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
481 } 549 }
482 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 550 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
483 new_acl = mr_access_flags; 551 new_acl = mr_access_flags;
@@ -709,6 +777,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
709 int ret; 777 int ret;
710 u32 tmp_lkey, tmp_rkey; 778 u32 tmp_lkey, tmp_rkey;
711 struct ehca_mr_pginfo pginfo; 779 struct ehca_mr_pginfo pginfo;
780 u64 hw_pgsize;
712 781
713 /* check other parameters */ 782 /* check other parameters */
714 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && 783 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
@@ -738,8 +807,8 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
738 ib_fmr = ERR_PTR(-EINVAL); 807 ib_fmr = ERR_PTR(-EINVAL);
739 goto alloc_fmr_exit0; 808 goto alloc_fmr_exit0;
740 } 809 }
741 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) && 810 hw_pgsize = ehca_get_max_hwpage_size(shca);
742 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) { 811 if ((1 << fmr_attr->page_shift) != hw_pgsize) {
743 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x", 812 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
744 fmr_attr->page_shift); 813 fmr_attr->page_shift);
745 ib_fmr = ERR_PTR(-EINVAL); 814 ib_fmr = ERR_PTR(-EINVAL);
@@ -755,6 +824,10 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
755 824
756 /* register MR on HCA */ 825 /* register MR on HCA */
757 memset(&pginfo, 0, sizeof(pginfo)); 826 memset(&pginfo, 0, sizeof(pginfo));
827 /*
828 * pginfo.num_hwpages==0, ie register_rpages() will not be called
829 * but deferred to map_phys_fmr()
830 */
758 ret = ehca_reg_mr(shca, e_fmr, NULL, 831 ret = ehca_reg_mr(shca, e_fmr, NULL,
759 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 832 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
760 mr_access_flags, e_pd, &pginfo, 833 mr_access_flags, e_pd, &pginfo,
@@ -765,6 +838,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
765 } 838 }
766 839
767 /* successful */ 840 /* successful */
841 e_fmr->hwpage_size = hw_pgsize;
768 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift; 842 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
769 e_fmr->fmr_max_pages = fmr_attr->max_pages; 843 e_fmr->fmr_max_pages = fmr_attr->max_pages;
770 e_fmr->fmr_max_maps = fmr_attr->max_maps; 844 e_fmr->fmr_max_maps = fmr_attr->max_maps;
@@ -822,10 +896,12 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
822 memset(&pginfo, 0, sizeof(pginfo)); 896 memset(&pginfo, 0, sizeof(pginfo));
823 pginfo.type = EHCA_MR_PGI_FMR; 897 pginfo.type = EHCA_MR_PGI_FMR;
824 pginfo.num_kpages = list_len; 898 pginfo.num_kpages = list_len;
825 pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); 899 pginfo.hwpage_size = e_fmr->hwpage_size;
900 pginfo.num_hwpages =
901 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
826 pginfo.u.fmr.page_list = page_list; 902 pginfo.u.fmr.page_list = page_list;
827 pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) / 903 pginfo.next_hwpage =
828 EHCA_PAGESIZE); 904 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
829 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size; 905 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
830 906
831 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, 907 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
@@ -964,7 +1040,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
964 struct ehca_mr_hipzout_parms hipzout; 1040 struct ehca_mr_hipzout_parms hipzout;
965 1041
966 ehca_mrmw_map_acl(acl, &hipz_acl); 1042 ehca_mrmw_map_acl(acl, &hipz_acl);
967 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1043 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
968 if (ehca_use_hp_mr == 1) 1044 if (ehca_use_hp_mr == 1)
969 hipz_acl |= 0x00000001; 1045 hipz_acl |= 0x00000001;
970 1046
@@ -987,6 +1063,7 @@ int ehca_reg_mr(struct ehca_shca *shca,
987 /* successful registration */ 1063 /* successful registration */
988 e_mr->num_kpages = pginfo->num_kpages; 1064 e_mr->num_kpages = pginfo->num_kpages;
989 e_mr->num_hwpages = pginfo->num_hwpages; 1065 e_mr->num_hwpages = pginfo->num_hwpages;
1066 e_mr->hwpage_size = pginfo->hwpage_size;
990 e_mr->start = iova_start; 1067 e_mr->start = iova_start;
991 e_mr->size = size; 1068 e_mr->size = size;
992 e_mr->acl = acl; 1069 e_mr->acl = acl;
@@ -1029,6 +1106,9 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1029 u32 i; 1106 u32 i;
1030 u64 *kpage; 1107 u64 *kpage;
1031 1108
1109 if (!pginfo->num_hwpages) /* in case of fmr */
1110 return 0;
1111
1032 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1112 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1033 if (!kpage) { 1113 if (!kpage) {
1034 ehca_err(&shca->ib_device, "kpage alloc failed"); 1114 ehca_err(&shca->ib_device, "kpage alloc failed");
@@ -1036,7 +1116,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1036 goto ehca_reg_mr_rpages_exit0; 1116 goto ehca_reg_mr_rpages_exit0;
1037 } 1117 }
1038 1118
1039 /* max 512 pages per shot */ 1119 /* max MAX_RPAGES ehca mr pages per register call */
1040 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) { 1120 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1041 1121
1042 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { 1122 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
@@ -1049,8 +1129,8 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1049 ret = ehca_set_pagebuf(pginfo, rnum, kpage); 1129 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
1050 if (ret) { 1130 if (ret) {
1051 ehca_err(&shca->ib_device, "ehca_set_pagebuf " 1131 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1052 "bad rc, ret=%x rnum=%x kpage=%p", 1132 "bad rc, ret=%x rnum=%x kpage=%p",
1053 ret, rnum, kpage); 1133 ret, rnum, kpage);
1054 goto ehca_reg_mr_rpages_exit1; 1134 goto ehca_reg_mr_rpages_exit1;
1055 } 1135 }
1056 1136
@@ -1065,9 +1145,10 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1065 } else 1145 } else
1066 rpage = *kpage; 1146 rpage = *kpage;
1067 1147
1068 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr, 1148 h_ret = hipz_h_register_rpage_mr(
1069 0, /* pagesize 4k */ 1149 shca->ipz_hca_handle, e_mr,
1070 0, rpage, rnum); 1150 ehca_encode_hwpage_size(pginfo->hwpage_size),
1151 0, rpage, rnum);
1071 1152
1072 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) { 1153 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1073 /* 1154 /*
@@ -1131,7 +1212,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1131 struct ehca_mr_hipzout_parms hipzout; 1212 struct ehca_mr_hipzout_parms hipzout;
1132 1213
1133 ehca_mrmw_map_acl(acl, &hipz_acl); 1214 ehca_mrmw_map_acl(acl, &hipz_acl);
1134 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1215 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1135 1216
1136 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1217 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1137 if (!kpage) { 1218 if (!kpage) {
@@ -1182,6 +1263,7 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1182 */ 1263 */
1183 e_mr->num_kpages = pginfo->num_kpages; 1264 e_mr->num_kpages = pginfo->num_kpages;
1184 e_mr->num_hwpages = pginfo->num_hwpages; 1265 e_mr->num_hwpages = pginfo->num_hwpages;
1266 e_mr->hwpage_size = pginfo->hwpage_size;
1185 e_mr->start = iova_start; 1267 e_mr->start = iova_start;
1186 e_mr->size = size; 1268 e_mr->size = size;
1187 e_mr->acl = acl; 1269 e_mr->acl = acl;
@@ -1268,13 +1350,14 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1268 1350
1269 /* set some MR values */ 1351 /* set some MR values */
1270 e_mr->flags = save_mr.flags; 1352 e_mr->flags = save_mr.flags;
1353 e_mr->hwpage_size = save_mr.hwpage_size;
1271 e_mr->fmr_page_size = save_mr.fmr_page_size; 1354 e_mr->fmr_page_size = save_mr.fmr_page_size;
1272 e_mr->fmr_max_pages = save_mr.fmr_max_pages; 1355 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1273 e_mr->fmr_max_maps = save_mr.fmr_max_maps; 1356 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1274 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; 1357 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1275 1358
1276 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, 1359 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1277 e_pd, pginfo, lkey, rkey); 1360 e_pd, pginfo, lkey, rkey);
1278 if (ret) { 1361 if (ret) {
1279 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; 1362 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1280 memcpy(&e_mr->flags, &(save_mr.flags), 1363 memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1355,6 +1438,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1355 1438
1356 /* set some MR values */ 1439 /* set some MR values */
1357 e_fmr->flags = save_fmr.flags; 1440 e_fmr->flags = save_fmr.flags;
1441 e_fmr->hwpage_size = save_fmr.hwpage_size;
1358 e_fmr->fmr_page_size = save_fmr.fmr_page_size; 1442 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1359 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages; 1443 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1360 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps; 1444 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
@@ -1363,8 +1447,6 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1363 1447
1364 memset(&pginfo, 0, sizeof(pginfo)); 1448 memset(&pginfo, 0, sizeof(pginfo));
1365 pginfo.type = EHCA_MR_PGI_FMR; 1449 pginfo.type = EHCA_MR_PGI_FMR;
1366 pginfo.num_kpages = 0;
1367 pginfo.num_hwpages = 0;
1368 ret = ehca_reg_mr(shca, e_fmr, NULL, 1450 ret = ehca_reg_mr(shca, e_fmr, NULL,
1369 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1451 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1370 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, 1452 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
@@ -1373,7 +1455,6 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1373 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; 1455 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1374 memcpy(&e_fmr->flags, &(save_mr.flags), 1456 memcpy(&e_fmr->flags, &(save_mr.flags),
1375 sizeof(struct ehca_mr) - offset); 1457 sizeof(struct ehca_mr) - offset);
1376 goto ehca_unmap_one_fmr_exit0;
1377 } 1458 }
1378 1459
1379ehca_unmap_one_fmr_exit0: 1460ehca_unmap_one_fmr_exit0:
@@ -1401,7 +1482,7 @@ int ehca_reg_smr(struct ehca_shca *shca,
1401 struct ehca_mr_hipzout_parms hipzout; 1482 struct ehca_mr_hipzout_parms hipzout;
1402 1483
1403 ehca_mrmw_map_acl(acl, &hipz_acl); 1484 ehca_mrmw_map_acl(acl, &hipz_acl);
1404 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1485 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1405 1486
1406 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, 1487 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1407 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1488 (u64)iova_start, hipz_acl, e_pd->fw_pd,
@@ -1420,6 +1501,7 @@ int ehca_reg_smr(struct ehca_shca *shca,
1420 /* successful registration */ 1501 /* successful registration */
1421 e_newmr->num_kpages = e_origmr->num_kpages; 1502 e_newmr->num_kpages = e_origmr->num_kpages;
1422 e_newmr->num_hwpages = e_origmr->num_hwpages; 1503 e_newmr->num_hwpages = e_origmr->num_hwpages;
1504 e_newmr->hwpage_size = e_origmr->hwpage_size;
1423 e_newmr->start = iova_start; 1505 e_newmr->start = iova_start;
1424 e_newmr->size = e_origmr->size; 1506 e_newmr->size = e_origmr->size;
1425 e_newmr->acl = acl; 1507 e_newmr->acl = acl;
@@ -1452,6 +1534,7 @@ int ehca_reg_internal_maxmr(
1452 struct ib_phys_buf ib_pbuf; 1534 struct ib_phys_buf ib_pbuf;
1453 u32 num_kpages; 1535 u32 num_kpages;
1454 u32 num_hwpages; 1536 u32 num_hwpages;
1537 u64 hw_pgsize;
1455 1538
1456 e_mr = ehca_mr_new(); 1539 e_mr = ehca_mr_new();
1457 if (!e_mr) { 1540 if (!e_mr) {
@@ -1468,13 +1551,15 @@ int ehca_reg_internal_maxmr(
1468 ib_pbuf.size = size_maxmr; 1551 ib_pbuf.size = size_maxmr;
1469 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1552 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1470 PAGE_SIZE); 1553 PAGE_SIZE);
1471 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr, 1554 hw_pgsize = ehca_get_max_hwpage_size(shca);
1472 EHCA_PAGESIZE); 1555 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1556 hw_pgsize);
1473 1557
1474 memset(&pginfo, 0, sizeof(pginfo)); 1558 memset(&pginfo, 0, sizeof(pginfo));
1475 pginfo.type = EHCA_MR_PGI_PHYS; 1559 pginfo.type = EHCA_MR_PGI_PHYS;
1476 pginfo.num_kpages = num_kpages; 1560 pginfo.num_kpages = num_kpages;
1477 pginfo.num_hwpages = num_hwpages; 1561 pginfo.num_hwpages = num_hwpages;
1562 pginfo.hwpage_size = hw_pgsize;
1478 pginfo.u.phy.num_phys_buf = 1; 1563 pginfo.u.phy.num_phys_buf = 1;
1479 pginfo.u.phy.phys_buf_array = &ib_pbuf; 1564 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1480 1565
@@ -1523,7 +1608,7 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
1523 struct ehca_mr_hipzout_parms hipzout; 1608 struct ehca_mr_hipzout_parms hipzout;
1524 1609
1525 ehca_mrmw_map_acl(acl, &hipz_acl); 1610 ehca_mrmw_map_acl(acl, &hipz_acl);
1526 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl); 1611 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1527 1612
1528 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr, 1613 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1529 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1614 (u64)iova_start, hipz_acl, e_pd->fw_pd,
@@ -1539,6 +1624,7 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
1539 /* successful registration */ 1624 /* successful registration */
1540 e_newmr->num_kpages = e_origmr->num_kpages; 1625 e_newmr->num_kpages = e_origmr->num_kpages;
1541 e_newmr->num_hwpages = e_origmr->num_hwpages; 1626 e_newmr->num_hwpages = e_origmr->num_hwpages;
1627 e_newmr->hwpage_size = e_origmr->hwpage_size;
1542 e_newmr->start = iova_start; 1628 e_newmr->start = iova_start;
1543 e_newmr->size = e_origmr->size; 1629 e_newmr->size = e_origmr->size;
1544 e_newmr->acl = acl; 1630 e_newmr->acl = acl;
@@ -1684,6 +1770,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1684 u64 pgaddr; 1770 u64 pgaddr;
1685 u32 i = 0; 1771 u32 i = 0;
1686 u32 j = 0; 1772 u32 j = 0;
1773 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1687 1774
1688 /* loop over desired chunk entries */ 1775 /* loop over desired chunk entries */
1689 chunk = pginfo->u.usr.next_chunk; 1776 chunk = pginfo->u.usr.next_chunk;
@@ -1695,7 +1782,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1695 << PAGE_SHIFT ; 1782 << PAGE_SHIFT ;
1696 *kpage = phys_to_abs(pgaddr + 1783 *kpage = phys_to_abs(pgaddr +
1697 (pginfo->next_hwpage * 1784 (pginfo->next_hwpage *
1698 EHCA_PAGESIZE)); 1785 pginfo->hwpage_size));
1699 if ( !(*kpage) ) { 1786 if ( !(*kpage) ) {
1700 ehca_gen_err("pgaddr=%lx " 1787 ehca_gen_err("pgaddr=%lx "
1701 "chunk->page_list[i]=%lx " 1788 "chunk->page_list[i]=%lx "
@@ -1708,8 +1795,7 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1708 (pginfo->hwpage_cnt)++; 1795 (pginfo->hwpage_cnt)++;
1709 (pginfo->next_hwpage)++; 1796 (pginfo->next_hwpage)++;
1710 kpage++; 1797 kpage++;
1711 if (pginfo->next_hwpage % 1798 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1712 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1713 (pginfo->kpage_cnt)++; 1799 (pginfo->kpage_cnt)++;
1714 (pginfo->u.usr.next_nmap)++; 1800 (pginfo->u.usr.next_nmap)++;
1715 pginfo->next_hwpage = 0; 1801 pginfo->next_hwpage = 0;
@@ -1738,6 +1824,143 @@ static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1738 return ret; 1824 return ret;
1739} 1825}
1740 1826
1827/*
1828 * check given pages for contiguous layout
1829 * last page addr is returned in prev_pgaddr for further check
1830 */
1831static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1832 int start_idx, int end_idx,
1833 u64 *prev_pgaddr)
1834{
1835 int t;
1836 for (t = start_idx; t <= end_idx; t++) {
1837 u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT;
1838 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1839 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1840 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1841 ehca_gen_err("uncontiguous page found pgaddr=%lx "
1842 "prev_pgaddr=%lx page_list_i=%x",
1843 pgaddr, *prev_pgaddr, t);
1844 return -EINVAL;
1845 }
1846 *prev_pgaddr = pgaddr;
1847 }
1848 return 0;
1849}
1850
1851/* PAGE_SIZE < pginfo->hwpage_size */
1852static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1853 u32 number,
1854 u64 *kpage)
1855{
1856 int ret = 0;
1857 struct ib_umem_chunk *prev_chunk;
1858 struct ib_umem_chunk *chunk;
1859 u64 pgaddr, prev_pgaddr;
1860 u32 i = 0;
1861 u32 j = 0;
1862 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1863 int nr_kpages = kpages_per_hwpage;
1864
1865 /* loop over desired chunk entries */
1866 chunk = pginfo->u.usr.next_chunk;
1867 prev_chunk = pginfo->u.usr.next_chunk;
1868 list_for_each_entry_continue(
1869 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1870 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1871 if (nr_kpages == kpages_per_hwpage) {
1872 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1873 << PAGE_SHIFT );
1874 *kpage = phys_to_abs(pgaddr);
1875 if ( !(*kpage) ) {
1876 ehca_gen_err("pgaddr=%lx i=%x",
1877 pgaddr, i);
1878 ret = -EFAULT;
1879 return ret;
1880 }
1881 /*
1882 * The first page in a hwpage must be aligned;
1883 * the first MR page is exempt from this rule.
1884 */
1885 if (pgaddr & (pginfo->hwpage_size - 1)) {
1886 if (pginfo->hwpage_cnt) {
1887 ehca_gen_err(
1888 "invalid alignment "
1889 "pgaddr=%lx i=%x "
1890 "mr_pgsize=%lx",
1891 pgaddr, i,
1892 pginfo->hwpage_size);
1893 ret = -EFAULT;
1894 return ret;
1895 }
1896 /* first MR page */
1897 pginfo->kpage_cnt =
1898 (pgaddr &
1899 (pginfo->hwpage_size - 1)) >>
1900 PAGE_SHIFT;
1901 nr_kpages -= pginfo->kpage_cnt;
1902 *kpage = phys_to_abs(
1903 pgaddr &
1904 ~(pginfo->hwpage_size - 1));
1905 }
1906 ehca_gen_dbg("kpage=%lx chunk_page=%lx "
1907 "value=%016lx", *kpage, pgaddr,
1908 *(u64 *)abs_to_virt(
1909 phys_to_abs(pgaddr)));
1910 prev_pgaddr = pgaddr;
1911 i++;
1912 pginfo->kpage_cnt++;
1913 pginfo->u.usr.next_nmap++;
1914 nr_kpages--;
1915 if (!nr_kpages)
1916 goto next_kpage;
1917 continue;
1918 }
1919 if (i + nr_kpages > chunk->nmap) {
1920 ret = ehca_check_kpages_per_ate(
1921 chunk->page_list, i,
1922 chunk->nmap - 1, &prev_pgaddr);
1923 if (ret) return ret;
1924 pginfo->kpage_cnt += chunk->nmap - i;
1925 pginfo->u.usr.next_nmap += chunk->nmap - i;
1926 nr_kpages -= chunk->nmap - i;
1927 break;
1928 }
1929
1930 ret = ehca_check_kpages_per_ate(chunk->page_list, i,
1931 i + nr_kpages - 1,
1932 &prev_pgaddr);
1933 if (ret) return ret;
1934 i += nr_kpages;
1935 pginfo->kpage_cnt += nr_kpages;
1936 pginfo->u.usr.next_nmap += nr_kpages;
1937next_kpage:
1938 nr_kpages = kpages_per_hwpage;
1939 (pginfo->hwpage_cnt)++;
1940 kpage++;
1941 j++;
1942 if (j >= number) break;
1943 }
1944 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1945 (j >= number)) {
1946 pginfo->u.usr.next_nmap = 0;
1947 prev_chunk = chunk;
1948 break;
1949 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1950 pginfo->u.usr.next_nmap = 0;
1951 prev_chunk = chunk;
1952 } else if (j >= number)
1953 break;
1954 else
1955 prev_chunk = chunk;
1956 }
1957 pginfo->u.usr.next_chunk =
1958 list_prepare_entry(prev_chunk,
1959 (&(pginfo->u.usr.region->chunk_list)),
1960 list);
1961 return ret;
1962}
1963
1741int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo, 1964int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1742 u32 number, 1965 u32 number,
1743 u64 *kpage) 1966 u64 *kpage)
@@ -1750,9 +1973,10 @@ int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1750 /* loop over desired phys_buf_array entries */ 1973 /* loop over desired phys_buf_array entries */
1751 while (i < number) { 1974 while (i < number) {
1752 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf; 1975 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1753 num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) + 1976 num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
1754 pbuf->size, EHCA_PAGESIZE); 1977 pbuf->size, pginfo->hwpage_size);
1755 offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1978 offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
1979 pginfo->hwpage_size;
1756 while (pginfo->next_hwpage < offs_hw + num_hw) { 1980 while (pginfo->next_hwpage < offs_hw + num_hw) {
1757 /* sanity check */ 1981 /* sanity check */
1758 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1982 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
@@ -1768,21 +1992,23 @@ int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1768 return -EFAULT; 1992 return -EFAULT;
1769 } 1993 }
1770 *kpage = phys_to_abs( 1994 *kpage = phys_to_abs(
1771 (pbuf->addr & EHCA_PAGEMASK) 1995 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
1772 + (pginfo->next_hwpage * EHCA_PAGESIZE)); 1996 (pginfo->next_hwpage * pginfo->hwpage_size));
1773 if ( !(*kpage) && pbuf->addr ) { 1997 if ( !(*kpage) && pbuf->addr ) {
1774 ehca_gen_err("pbuf->addr=%lx " 1998 ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
1775 "pbuf->size=%lx "
1776 "next_hwpage=%lx", pbuf->addr, 1999 "next_hwpage=%lx", pbuf->addr,
1777 pbuf->size, 2000 pbuf->size, pginfo->next_hwpage);
1778 pginfo->next_hwpage);
1779 return -EFAULT; 2001 return -EFAULT;
1780 } 2002 }
1781 (pginfo->hwpage_cnt)++; 2003 (pginfo->hwpage_cnt)++;
1782 (pginfo->next_hwpage)++; 2004 (pginfo->next_hwpage)++;
1783 if (pginfo->next_hwpage % 2005 if (PAGE_SIZE >= pginfo->hwpage_size) {
1784 (PAGE_SIZE / EHCA_PAGESIZE) == 0) 2006 if (pginfo->next_hwpage %
1785 (pginfo->kpage_cnt)++; 2007 (PAGE_SIZE / pginfo->hwpage_size) == 0)
2008 (pginfo->kpage_cnt)++;
2009 } else
2010 pginfo->kpage_cnt += pginfo->hwpage_size /
2011 PAGE_SIZE;
1786 kpage++; 2012 kpage++;
1787 i++; 2013 i++;
1788 if (i >= number) break; 2014 if (i >= number) break;
@@ -1806,8 +2032,8 @@ int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1806 /* loop over desired page_list entries */ 2032 /* loop over desired page_list entries */
1807 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem; 2033 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1808 for (i = 0; i < number; i++) { 2034 for (i = 0; i < number; i++) {
1809 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + 2035 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
1810 pginfo->next_hwpage * EHCA_PAGESIZE); 2036 pginfo->next_hwpage * pginfo->hwpage_size);
1811 if ( !(*kpage) ) { 2037 if ( !(*kpage) ) {
1812 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 2038 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1813 "next_listelem=%lx next_hwpage=%lx", 2039 "next_listelem=%lx next_hwpage=%lx",
@@ -1817,15 +2043,38 @@ int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1817 return -EFAULT; 2043 return -EFAULT;
1818 } 2044 }
1819 (pginfo->hwpage_cnt)++; 2045 (pginfo->hwpage_cnt)++;
1820 (pginfo->next_hwpage)++; 2046 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1821 kpage++; 2047 if (pginfo->next_hwpage %
1822 if (pginfo->next_hwpage % 2048 (pginfo->u.fmr.fmr_pgsize /
1823 (pginfo->u.fmr.fmr_pgsize / EHCA_PAGESIZE) == 0) { 2049 pginfo->hwpage_size) == 0) {
1824 (pginfo->kpage_cnt)++; 2050 (pginfo->kpage_cnt)++;
1825 (pginfo->u.fmr.next_listelem)++; 2051 (pginfo->u.fmr.next_listelem)++;
1826 fmrlist++; 2052 fmrlist++;
1827 pginfo->next_hwpage = 0; 2053 pginfo->next_hwpage = 0;
2054 } else
2055 (pginfo->next_hwpage)++;
2056 } else {
2057 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
2058 pginfo->u.fmr.fmr_pgsize;
2059 unsigned int j;
2060 u64 prev = *kpage;
2061 /* check if adrs are contiguous */
2062 for (j = 1; j < cnt_per_hwpage; j++) {
2063 u64 p = phys_to_abs(fmrlist[j] &
2064 ~(pginfo->hwpage_size - 1));
2065 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2066 ehca_gen_err("uncontiguous fmr pages "
2067 "found prev=%lx p=%lx "
2068 "idx=%x", prev, p, i + j);
2069 return -EINVAL;
2070 }
2071 prev = p;
2072 }
2073 pginfo->kpage_cnt += cnt_per_hwpage;
2074 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
2075 fmrlist += cnt_per_hwpage;
1828 } 2076 }
2077 kpage++;
1829 } 2078 }
1830 return ret; 2079 return ret;
1831} 2080}
@@ -1842,7 +2091,9 @@ int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1842 ret = ehca_set_pagebuf_phys(pginfo, number, kpage); 2091 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1843 break; 2092 break;
1844 case EHCA_MR_PGI_USER: 2093 case EHCA_MR_PGI_USER:
1845 ret = ehca_set_pagebuf_user1(pginfo, number, kpage); 2094 ret = PAGE_SIZE >= pginfo->hwpage_size ?
2095 ehca_set_pagebuf_user1(pginfo, number, kpage) :
2096 ehca_set_pagebuf_user2(pginfo, number, kpage);
1846 break; 2097 break;
1847 case EHCA_MR_PGI_FMR: 2098 case EHCA_MR_PGI_FMR:
1848 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage); 2099 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
@@ -1895,9 +2146,9 @@ void ehca_mrmw_map_acl(int ib_acl,
1895/*----------------------------------------------------------------------*/ 2146/*----------------------------------------------------------------------*/
1896 2147
1897/* sets page size in hipz access control for MR/MW. */ 2148/* sets page size in hipz access control for MR/MW. */
1898void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/ 2149void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1899{ 2150{
1900 return; /* HCA supports only 4k */ 2151 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1901} /* end ehca_mrmw_set_pgsize_hipz_acl() */ 2152} /* end ehca_mrmw_set_pgsize_hipz_acl() */
1902 2153
1903/*----------------------------------------------------------------------*/ 2154/*----------------------------------------------------------------------*/