aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@de.ibm.com>2007-07-12 11:52:29 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-17 21:37:40 -0400
commitdf17bfd4a030f7d986de14210f4b21876a7a2989 (patch)
tree48c95b8248f77a96ac7ffa896cbcd9405130ee39 /drivers/infiniband/hw
parent2492398e616451788bc7c7905cadb8734b082bc7 (diff)
IB/ehca: MR/MW structure refactoring
- Rename struct ehca_mr fields to clearly distinguish between kernel and HW page size. - Sort struct ehca_mr_pginfo into a common part and a union containing specific fields for physical, user and fast MR Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h50
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c511
2 files changed, 284 insertions, 277 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index daf823ea1ace..5e00202dc779 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -204,8 +204,8 @@ struct ehca_mr {
204 spinlock_t mrlock; 204 spinlock_t mrlock;
205 205
206 enum ehca_mr_flag flags; 206 enum ehca_mr_flag flags;
207 u32 num_pages; /* number of MR pages */ 207 u32 num_kpages; /* number of kernel pages */
208 u32 num_4k; /* number of 4k "page" portions to form MR */ 208 u32 num_hwpages; /* number of hw pages to form MR */
209 int acl; /* ACL (stored here for usage in reregister) */ 209 int acl; /* ACL (stored here for usage in reregister) */
210 u64 *start; /* virtual start address (stored here for */ 210 u64 *start; /* virtual start address (stored here for */
211 /* usage in reregister) */ 211 /* usage in reregister) */
@@ -217,9 +217,6 @@ struct ehca_mr {
217 /* fw specific data */ 217 /* fw specific data */
218 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */ 218 struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
219 struct h_galpas galpas; 219 struct h_galpas galpas;
220 /* data for userspace bridge */
221 u32 nr_of_pages;
222 void *pagearray;
223}; 220};
224 221
225struct ehca_mw { 222struct ehca_mw {
@@ -241,26 +238,29 @@ enum ehca_mr_pgi_type {
241 238
242struct ehca_mr_pginfo { 239struct ehca_mr_pginfo {
243 enum ehca_mr_pgi_type type; 240 enum ehca_mr_pgi_type type;
244 u64 num_pages; 241 u64 num_kpages;
245 u64 page_cnt; 242 u64 kpage_cnt;
246 u64 num_4k; /* number of 4k "page" portions */ 243 u64 num_hwpages; /* number of hw pages */
247 u64 page_4k_cnt; /* counter for 4k "page" portions */ 244 u64 hwpage_cnt; /* counter for hw pages */
248 u64 next_4k; /* next 4k "page" portion in buffer/chunk/listelem */ 245 u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
249 246
250 /* type EHCA_MR_PGI_PHYS section */ 247 union {
251 int num_phys_buf; 248 struct { /* type EHCA_MR_PGI_PHYS section */
252 struct ib_phys_buf *phys_buf_array; 249 int num_phys_buf;
253 u64 next_buf; 250 struct ib_phys_buf *phys_buf_array;
254 251 u64 next_buf;
255 /* type EHCA_MR_PGI_USER section */ 252 } phy;
256 struct ib_umem *region; 253 struct { /* type EHCA_MR_PGI_USER section */
257 struct ib_umem_chunk *next_chunk; 254 struct ib_umem *region;
258 u64 next_nmap; 255 struct ib_umem_chunk *next_chunk;
259 256 u64 next_nmap;
260 /* type EHCA_MR_PGI_FMR section */ 257 } usr;
261 u64 *page_list; 258 struct { /* type EHCA_MR_PGI_FMR section */
262 u64 next_listelem; 259 u64 fmr_pgsize;
263 /* next_4k also used within EHCA_MR_PGI_FMR */ 260 u64 *page_list;
261 u64 next_listelem;
262 } fmr;
263 } u;
264}; 264};
265 265
266/* output parameters for MR/FMR hipz calls */ 266/* output parameters for MR/FMR hipz calls */
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 58e8b33d030b..53b334b3c325 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -150,9 +150,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); 150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
151 151
152 u64 size; 152 u64 size;
153 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
154 u32 num_pages_mr;
155 u32 num_pages_4k; /* 4k portion "pages" */
156 153
157 if ((num_phys_buf <= 0) || !phys_buf_array) { 154 if ((num_phys_buf <= 0) || !phys_buf_array) {
158 ehca_err(pd->device, "bad input values: num_phys_buf=%x " 155 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
@@ -196,12 +193,6 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
196 goto reg_phys_mr_exit0; 193 goto reg_phys_mr_exit0;
197 } 194 }
198 195
199 /* determine number of MR pages */
200 num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
201 PAGE_SIZE);
202 num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size,
203 EHCA_PAGESIZE);
204
205 /* register MR on HCA */ 196 /* register MR on HCA */
206 if (ehca_mr_is_maxmr(size, iova_start)) { 197 if (ehca_mr_is_maxmr(size, iova_start)) {
207 e_mr->flags |= EHCA_MR_FLAG_MAXMR; 198 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
@@ -213,13 +204,22 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
213 goto reg_phys_mr_exit1; 204 goto reg_phys_mr_exit1;
214 } 205 }
215 } else { 206 } else {
216 pginfo.type = EHCA_MR_PGI_PHYS; 207 struct ehca_mr_pginfo pginfo;
217 pginfo.num_pages = num_pages_mr; 208 u32 num_kpages;
218 pginfo.num_4k = num_pages_4k; 209 u32 num_hwpages;
219 pginfo.num_phys_buf = num_phys_buf; 210
220 pginfo.phys_buf_array = phys_buf_array; 211 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
221 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / 212 PAGE_SIZE);
222 EHCA_PAGESIZE); 213 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
214 size, EHCA_PAGESIZE);
215 memset(&pginfo, 0, sizeof(pginfo));
216 pginfo.type = EHCA_MR_PGI_PHYS;
217 pginfo.num_kpages = num_kpages;
218 pginfo.num_hwpages = num_hwpages;
219 pginfo.u.phy.num_phys_buf = num_phys_buf;
220 pginfo.u.phy.phys_buf_array = phys_buf_array;
221 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
222 EHCA_PAGESIZE);
223 223
224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
@@ -254,10 +254,10 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
254 struct ehca_shca *shca = 254 struct ehca_shca *shca =
255 container_of(pd->device, struct ehca_shca, ib_device); 255 container_of(pd->device, struct ehca_shca, ib_device);
256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd); 256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
257 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 257 struct ehca_mr_pginfo pginfo;
258 int ret; 258 int ret;
259 u32 num_pages_mr; 259 u32 num_kpages;
260 u32 num_pages_4k; /* 4k portion "pages" */ 260 u32 num_hwpages;
261 261
262 if (!pd) { 262 if (!pd) {
263 ehca_gen_err("bad pd=%p", pd); 263 ehca_gen_err("bad pd=%p", pd);
@@ -307,19 +307,20 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt
307 } 307 }
308 308
309 /* determine number of MR pages */ 309 /* determine number of MR pages */
310 num_pages_mr = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE); 310 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
311 num_pages_4k = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length, 311 num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
312 EHCA_PAGESIZE); 312 EHCA_PAGESIZE);
313 313
314 /* register MR on HCA */ 314 /* register MR on HCA */
315 pginfo.type = EHCA_MR_PGI_USER; 315 memset(&pginfo, 0, sizeof(pginfo));
316 pginfo.num_pages = num_pages_mr; 316 pginfo.type = EHCA_MR_PGI_USER;
317 pginfo.num_4k = num_pages_4k; 317 pginfo.num_kpages = num_kpages;
318 pginfo.region = e_mr->umem; 318 pginfo.num_hwpages = num_hwpages;
319 pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE; 319 pginfo.u.usr.region = e_mr->umem;
320 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk, 320 pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
321 (&e_mr->umem->chunk_list), 321 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
322 list); 322 (&e_mr->umem->chunk_list),
323 list);
323 324
324 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd, 325 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
325 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey); 326 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
@@ -365,9 +366,9 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
365 struct ehca_pd *new_pd; 366 struct ehca_pd *new_pd;
366 u32 tmp_lkey, tmp_rkey; 367 u32 tmp_lkey, tmp_rkey;
367 unsigned long sl_flags; 368 unsigned long sl_flags;
368 u32 num_pages_mr = 0; 369 u32 num_kpages = 0;
369 u32 num_pages_4k = 0; /* 4k portion "pages" */ 370 u32 num_hwpages = 0;
370 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 371 struct ehca_mr_pginfo pginfo;
371 u32 cur_pid = current->tgid; 372 u32 cur_pid = current->tgid;
372 373
373 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context && 374 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
@@ -463,17 +464,18 @@ int ehca_rereg_phys_mr(struct ib_mr *mr,
463 ret = -EINVAL; 464 ret = -EINVAL;
464 goto rereg_phys_mr_exit1; 465 goto rereg_phys_mr_exit1;
465 } 466 }
466 num_pages_mr = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) + 467 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
467 new_size, PAGE_SIZE); 468 new_size, PAGE_SIZE);
468 num_pages_4k = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) + 469 num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
469 new_size, EHCA_PAGESIZE); 470 new_size, EHCA_PAGESIZE);
470 pginfo.type = EHCA_MR_PGI_PHYS; 471 memset(&pginfo, 0, sizeof(pginfo));
471 pginfo.num_pages = num_pages_mr; 472 pginfo.type = EHCA_MR_PGI_PHYS;
472 pginfo.num_4k = num_pages_4k; 473 pginfo.num_kpages = num_kpages;
473 pginfo.num_phys_buf = num_phys_buf; 474 pginfo.num_hwpages = num_hwpages;
474 pginfo.phys_buf_array = phys_buf_array; 475 pginfo.u.phy.num_phys_buf = num_phys_buf;
475 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) / 476 pginfo.u.phy.phys_buf_array = phys_buf_array;
476 EHCA_PAGESIZE); 477 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
478 EHCA_PAGESIZE);
477 } 479 }
478 if (mr_rereg_mask & IB_MR_REREG_ACCESS) 480 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
479 new_acl = mr_access_flags; 481 new_acl = mr_access_flags;
@@ -544,11 +546,11 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
544 ret = ehca2ib_return_code(h_ret); 546 ret = ehca2ib_return_code(h_ret);
545 goto query_mr_exit1; 547 goto query_mr_exit1;
546 } 548 }
547 mr_attr->pd = mr->pd; 549 mr_attr->pd = mr->pd;
548 mr_attr->device_virt_addr = hipzout.vaddr; 550 mr_attr->device_virt_addr = hipzout.vaddr;
549 mr_attr->size = hipzout.len; 551 mr_attr->size = hipzout.len;
550 mr_attr->lkey = hipzout.lkey; 552 mr_attr->lkey = hipzout.lkey;
551 mr_attr->rkey = hipzout.rkey; 553 mr_attr->rkey = hipzout.rkey;
552 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags); 554 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
553 555
554query_mr_exit1: 556query_mr_exit1:
@@ -704,7 +706,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
704 struct ehca_mr *e_fmr; 706 struct ehca_mr *e_fmr;
705 int ret; 707 int ret;
706 u32 tmp_lkey, tmp_rkey; 708 u32 tmp_lkey, tmp_rkey;
707 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 709 struct ehca_mr_pginfo pginfo;
708 710
709 /* check other parameters */ 711 /* check other parameters */
710 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) && 712 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
@@ -750,6 +752,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
750 e_fmr->flags |= EHCA_MR_FLAG_FMR; 752 e_fmr->flags |= EHCA_MR_FLAG_FMR;
751 753
752 /* register MR on HCA */ 754 /* register MR on HCA */
755 memset(&pginfo, 0, sizeof(pginfo));
753 ret = ehca_reg_mr(shca, e_fmr, NULL, 756 ret = ehca_reg_mr(shca, e_fmr, NULL,
754 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 757 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
755 mr_access_flags, e_pd, &pginfo, 758 mr_access_flags, e_pd, &pginfo,
@@ -788,7 +791,7 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
788 container_of(fmr->device, struct ehca_shca, ib_device); 791 container_of(fmr->device, struct ehca_shca, ib_device);
789 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr); 792 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
790 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd); 793 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
791 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 794 struct ehca_mr_pginfo pginfo;
792 u32 tmp_lkey, tmp_rkey; 795 u32 tmp_lkey, tmp_rkey;
793 796
794 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) { 797 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
@@ -814,12 +817,13 @@ int ehca_map_phys_fmr(struct ib_fmr *fmr,
814 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps); 817 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
815 } 818 }
816 819
817 pginfo.type = EHCA_MR_PGI_FMR; 820 memset(&pginfo, 0, sizeof(pginfo));
818 pginfo.num_pages = list_len; 821 pginfo.type = EHCA_MR_PGI_FMR;
819 pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE); 822 pginfo.num_kpages = list_len;
820 pginfo.page_list = page_list; 823 pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
821 pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) / 824 pginfo.u.fmr.page_list = page_list;
822 EHCA_PAGESIZE); 825 pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
826 EHCA_PAGESIZE);
823 827
824 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova, 828 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
825 list_len * e_fmr->fmr_page_size, 829 list_len * e_fmr->fmr_page_size,
@@ -979,11 +983,11 @@ int ehca_reg_mr(struct ehca_shca *shca,
979 goto ehca_reg_mr_exit1; 983 goto ehca_reg_mr_exit1;
980 984
981 /* successful registration */ 985 /* successful registration */
982 e_mr->num_pages = pginfo->num_pages; 986 e_mr->num_kpages = pginfo->num_kpages;
983 e_mr->num_4k = pginfo->num_4k; 987 e_mr->num_hwpages = pginfo->num_hwpages;
984 e_mr->start = iova_start; 988 e_mr->start = iova_start;
985 e_mr->size = size; 989 e_mr->size = size;
986 e_mr->acl = acl; 990 e_mr->acl = acl;
987 *lkey = hipzout.lkey; 991 *lkey = hipzout.lkey;
988 *rkey = hipzout.rkey; 992 *rkey = hipzout.rkey;
989 return 0; 993 return 0;
@@ -993,10 +997,10 @@ ehca_reg_mr_exit1:
993 if (h_ret != H_SUCCESS) { 997 if (h_ret != H_SUCCESS) {
994 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p " 998 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
995 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 999 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
996 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x", 1000 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
997 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1001 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
998 hipzout.lkey, pginfo, pginfo->num_pages, 1002 hipzout.lkey, pginfo, pginfo->num_kpages,
999 pginfo->num_4k, ret); 1003 pginfo->num_hwpages, ret);
1000 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, " 1004 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1001 "not recoverable"); 1005 "not recoverable");
1002 } 1006 }
@@ -1004,9 +1008,9 @@ ehca_reg_mr_exit0:
1004 if (ret) 1008 if (ret)
1005 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " 1009 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1006 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1010 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1007 "num_pages=%lx num_4k=%lx", 1011 "num_kpages=%lx num_hwpages=%lx",
1008 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1012 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1009 pginfo->num_pages, pginfo->num_4k); 1013 pginfo->num_kpages, pginfo->num_hwpages);
1010 return ret; 1014 return ret;
1011} /* end ehca_reg_mr() */ 1015} /* end ehca_reg_mr() */
1012 1016
@@ -1031,10 +1035,10 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1031 } 1035 }
1032 1036
1033 /* max 512 pages per shot */ 1037 /* max 512 pages per shot */
1034 for (i = 0; i < NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES); i++) { 1038 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1035 1039
1036 if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { 1040 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1037 rnum = pginfo->num_4k % MAX_RPAGES; /* last shot */ 1041 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1038 if (rnum == 0) 1042 if (rnum == 0)
1039 rnum = MAX_RPAGES; /* last shot is full */ 1043 rnum = MAX_RPAGES; /* last shot is full */
1040 } else 1044 } else
@@ -1070,7 +1074,7 @@ int ehca_reg_mr_rpages(struct ehca_shca *shca,
1070 0, /* pagesize 4k */ 1074 0, /* pagesize 4k */
1071 0, rpage, rnum); 1075 0, rpage, rnum);
1072 1076
1073 if (i == NUM_CHUNKS(pginfo->num_4k, MAX_RPAGES) - 1) { 1077 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1074 /* 1078 /*
1075 * check for 'registration complete'==H_SUCCESS 1079 * check for 'registration complete'==H_SUCCESS
1076 * and for 'page registered'==H_PAGE_REGISTERED 1080 * and for 'page registered'==H_PAGE_REGISTERED
@@ -1106,8 +1110,8 @@ ehca_reg_mr_rpages_exit1:
1106ehca_reg_mr_rpages_exit0: 1110ehca_reg_mr_rpages_exit0:
1107 if (ret) 1111 if (ret)
1108 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p " 1112 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1109 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo, 1113 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
1110 pginfo->num_pages, pginfo->num_4k); 1114 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1111 return ret; 1115 return ret;
1112} /* end ehca_reg_mr_rpages() */ 1116} /* end ehca_reg_mr_rpages() */
1113 1117
@@ -1142,12 +1146,12 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1142 } 1146 }
1143 1147
1144 pginfo_save = *pginfo; 1148 pginfo_save = *pginfo;
1145 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage); 1149 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_hwpages, kpage);
1146 if (ret) { 1150 if (ret) {
1147 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1151 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1148 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p", 1152 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
1149 e_mr, pginfo, pginfo->type, pginfo->num_pages, 1153 "kpage=%p", e_mr, pginfo, pginfo->type,
1150 pginfo->num_4k,kpage); 1154 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1151 goto ehca_rereg_mr_rereg1_exit1; 1155 goto ehca_rereg_mr_rereg1_exit1;
1152 } 1156 }
1153 rpage = virt_to_abs(kpage); 1157 rpage = virt_to_abs(kpage);
@@ -1181,11 +1185,11 @@ inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1181 * successful reregistration 1185 * successful reregistration
1182 * note: start and start_out are identical for eServer HCAs 1186 * note: start and start_out are identical for eServer HCAs
1183 */ 1187 */
1184 e_mr->num_pages = pginfo->num_pages; 1188 e_mr->num_kpages = pginfo->num_kpages;
1185 e_mr->num_4k = pginfo->num_4k; 1189 e_mr->num_hwpages = pginfo->num_hwpages;
1186 e_mr->start = iova_start; 1190 e_mr->start = iova_start;
1187 e_mr->size = size; 1191 e_mr->size = size;
1188 e_mr->acl = acl; 1192 e_mr->acl = acl;
1189 *lkey = hipzout.lkey; 1193 *lkey = hipzout.lkey;
1190 *rkey = hipzout.rkey; 1194 *rkey = hipzout.rkey;
1191 } 1195 }
@@ -1195,9 +1199,9 @@ ehca_rereg_mr_rereg1_exit1:
1195ehca_rereg_mr_rereg1_exit0: 1199ehca_rereg_mr_rereg1_exit0:
1196 if ( ret && (ret != -EAGAIN) ) 1200 if ( ret && (ret != -EAGAIN) )
1197 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x " 1201 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1198 "pginfo=%p num_pages=%lx num_4k=%lx", 1202 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
1199 ret, *lkey, *rkey, pginfo, pginfo->num_pages, 1203 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1200 pginfo->num_4k); 1204 pginfo->num_hwpages);
1201 return ret; 1205 return ret;
1202} /* end ehca_rereg_mr_rereg1() */ 1206} /* end ehca_rereg_mr_rereg1() */
1203 1207
@@ -1219,10 +1223,12 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1219 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */ 1223 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1220 1224
1221 /* first determine reregistration hCall(s) */ 1225 /* first determine reregistration hCall(s) */
1222 if ((pginfo->num_4k > MAX_RPAGES) || (e_mr->num_4k > MAX_RPAGES) || 1226 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1223 (pginfo->num_4k > e_mr->num_4k)) { 1227 (e_mr->num_hwpages > MAX_RPAGES) ||
1224 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx " 1228 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1225 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k); 1229 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1230 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
1231 pginfo->num_hwpages, e_mr->num_hwpages);
1226 rereg_1_hcall = 0; 1232 rereg_1_hcall = 0;
1227 rereg_3_hcall = 1; 1233 rereg_3_hcall = 1;
1228 } 1234 }
@@ -1286,9 +1292,9 @@ ehca_rereg_mr_exit0:
1286 if (ret) 1292 if (ret)
1287 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p " 1293 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1288 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1294 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1289 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1295 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1290 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1296 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1291 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey, 1297 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1292 rereg_1_hcall, rereg_3_hcall); 1298 rereg_1_hcall, rereg_3_hcall);
1293 return ret; 1299 return ret;
1294} /* end ehca_rereg_mr() */ 1300} /* end ehca_rereg_mr() */
@@ -1306,7 +1312,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1306 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd); 1312 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1307 struct ehca_mr save_fmr; 1313 struct ehca_mr save_fmr;
1308 u32 tmp_lkey, tmp_rkey; 1314 u32 tmp_lkey, tmp_rkey;
1309 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 1315 struct ehca_mr_pginfo pginfo;
1310 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0}; 1316 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1311 1317
1312 /* first check if reregistration hCall can be used for unmap */ 1318 /* first check if reregistration hCall can be used for unmap */
@@ -1370,9 +1376,10 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1370 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt; 1376 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1371 e_fmr->acl = save_fmr.acl; 1377 e_fmr->acl = save_fmr.acl;
1372 1378
1373 pginfo.type = EHCA_MR_PGI_FMR; 1379 memset(&pginfo, 0, sizeof(pginfo));
1374 pginfo.num_pages = 0; 1380 pginfo.type = EHCA_MR_PGI_FMR;
1375 pginfo.num_4k = 0; 1381 pginfo.num_kpages = 0;
1382 pginfo.num_hwpages = 0;
1376 ret = ehca_reg_mr(shca, e_fmr, NULL, 1383 ret = ehca_reg_mr(shca, e_fmr, NULL,
1377 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1384 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1378 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, 1385 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
@@ -1428,11 +1435,11 @@ int ehca_reg_smr(struct ehca_shca *shca,
1428 goto ehca_reg_smr_exit0; 1435 goto ehca_reg_smr_exit0;
1429 } 1436 }
1430 /* successful registration */ 1437 /* successful registration */
1431 e_newmr->num_pages = e_origmr->num_pages; 1438 e_newmr->num_kpages = e_origmr->num_kpages;
1432 e_newmr->num_4k = e_origmr->num_4k; 1439 e_newmr->num_hwpages = e_origmr->num_hwpages;
1433 e_newmr->start = iova_start; 1440 e_newmr->start = iova_start;
1434 e_newmr->size = e_origmr->size; 1441 e_newmr->size = e_origmr->size;
1435 e_newmr->acl = acl; 1442 e_newmr->acl = acl;
1436 e_newmr->ipz_mr_handle = hipzout.handle; 1443 e_newmr->ipz_mr_handle = hipzout.handle;
1437 *lkey = hipzout.lkey; 1444 *lkey = hipzout.lkey;
1438 *rkey = hipzout.rkey; 1445 *rkey = hipzout.rkey;
@@ -1458,10 +1465,10 @@ int ehca_reg_internal_maxmr(
1458 struct ehca_mr *e_mr; 1465 struct ehca_mr *e_mr;
1459 u64 *iova_start; 1466 u64 *iova_start;
1460 u64 size_maxmr; 1467 u64 size_maxmr;
1461 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0}; 1468 struct ehca_mr_pginfo pginfo;
1462 struct ib_phys_buf ib_pbuf; 1469 struct ib_phys_buf ib_pbuf;
1463 u32 num_pages_mr; 1470 u32 num_kpages;
1464 u32 num_pages_4k; /* 4k portion "pages" */ 1471 u32 num_hwpages;
1465 1472
1466 e_mr = ehca_mr_new(); 1473 e_mr = ehca_mr_new();
1467 if (!e_mr) { 1474 if (!e_mr) {
@@ -1476,25 +1483,26 @@ int ehca_reg_internal_maxmr(
1476 iova_start = (u64*)KERNELBASE; 1483 iova_start = (u64*)KERNELBASE;
1477 ib_pbuf.addr = 0; 1484 ib_pbuf.addr = 0;
1478 ib_pbuf.size = size_maxmr; 1485 ib_pbuf.size = size_maxmr;
1479 num_pages_mr = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1486 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1480 PAGE_SIZE); 1487 PAGE_SIZE);
1481 num_pages_4k = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) 1488 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
1482 + size_maxmr, EHCA_PAGESIZE); 1489 EHCA_PAGESIZE);
1483 1490
1484 pginfo.type = EHCA_MR_PGI_PHYS; 1491 memset(&pginfo, 0, sizeof(pginfo));
1485 pginfo.num_pages = num_pages_mr; 1492 pginfo.type = EHCA_MR_PGI_PHYS;
1486 pginfo.num_4k = num_pages_4k; 1493 pginfo.num_kpages = num_kpages;
1487 pginfo.num_phys_buf = 1; 1494 pginfo.num_hwpages = num_hwpages;
1488 pginfo.phys_buf_array = &ib_pbuf; 1495 pginfo.u.phy.num_phys_buf = 1;
1496 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1489 1497
1490 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, 1498 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1491 &pginfo, &e_mr->ib.ib_mr.lkey, 1499 &pginfo, &e_mr->ib.ib_mr.lkey,
1492 &e_mr->ib.ib_mr.rkey); 1500 &e_mr->ib.ib_mr.rkey);
1493 if (ret) { 1501 if (ret) {
1494 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1502 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1495 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x " 1503 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
1496 "num_pages_4k=%x", e_mr, iova_start, size_maxmr, 1504 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1497 num_pages_mr, num_pages_4k); 1505 num_kpages, num_hwpages);
1498 goto ehca_reg_internal_maxmr_exit1; 1506 goto ehca_reg_internal_maxmr_exit1;
1499 } 1507 }
1500 1508
@@ -1546,11 +1554,11 @@ int ehca_reg_maxmr(struct ehca_shca *shca,
1546 return ehca2ib_return_code(h_ret); 1554 return ehca2ib_return_code(h_ret);
1547 } 1555 }
1548 /* successful registration */ 1556 /* successful registration */
1549 e_newmr->num_pages = e_origmr->num_pages; 1557 e_newmr->num_kpages = e_origmr->num_kpages;
1550 e_newmr->num_4k = e_origmr->num_4k; 1558 e_newmr->num_hwpages = e_origmr->num_hwpages;
1551 e_newmr->start = iova_start; 1559 e_newmr->start = iova_start;
1552 e_newmr->size = e_origmr->size; 1560 e_newmr->size = e_origmr->size;
1553 e_newmr->acl = acl; 1561 e_newmr->acl = acl;
1554 e_newmr->ipz_mr_handle = hipzout.handle; 1562 e_newmr->ipz_mr_handle = hipzout.handle;
1555 *lkey = hipzout.lkey; 1563 *lkey = hipzout.lkey;
1556 *rkey = hipzout.rkey; 1564 *rkey = hipzout.rkey;
@@ -1693,138 +1701,139 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr,
1693 struct ib_umem_chunk *chunk; 1701 struct ib_umem_chunk *chunk;
1694 struct ib_phys_buf *pbuf; 1702 struct ib_phys_buf *pbuf;
1695 u64 *fmrlist; 1703 u64 *fmrlist;
1696 u64 num4k, pgaddr, offs4k; 1704 u64 num_hw, pgaddr, offs_hw;
1697 u32 i = 0; 1705 u32 i = 0;
1698 u32 j = 0; 1706 u32 j = 0;
1699 1707
1700 if (pginfo->type == EHCA_MR_PGI_PHYS) { 1708 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1701 /* loop over desired phys_buf_array entries */ 1709 /* loop over desired phys_buf_array entries */
1702 while (i < number) { 1710 while (i < number) {
1703 pbuf = pginfo->phys_buf_array + pginfo->next_buf; 1711 pbuf = pginfo->u.phy.phys_buf_array
1704 num4k = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) 1712 + pginfo->u.phy.next_buf;
1705 + pbuf->size, EHCA_PAGESIZE); 1713 num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
1706 offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1714 pbuf->size, EHCA_PAGESIZE);
1707 while (pginfo->next_4k < offs4k + num4k) { 1715 offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1716 while (pginfo->next_hwpage < offs_hw + num_hw) {
1708 /* sanity check */ 1717 /* sanity check */
1709 if ((pginfo->page_cnt >= pginfo->num_pages) || 1718 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1710 (pginfo->page_4k_cnt >= pginfo->num_4k)) { 1719 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1711 ehca_gen_err("page_cnt >= num_pages, " 1720 ehca_gen_err("kpage_cnt >= num_kpages, "
1712 "page_cnt=%lx " 1721 "kpage_cnt=%lx "
1713 "num_pages=%lx " 1722 "num_kpages=%lx "
1714 "page_4k_cnt=%lx " 1723 "hwpage_cnt=%lx "
1715 "num_4k=%lx i=%x", 1724 "num_hwpages=%lx i=%x",
1716 pginfo->page_cnt, 1725 pginfo->kpage_cnt,
1717 pginfo->num_pages, 1726 pginfo->num_kpages,
1718 pginfo->page_4k_cnt, 1727 pginfo->hwpage_cnt,
1719 pginfo->num_4k, i); 1728 pginfo->num_hwpages, i);
1720 ret = -EFAULT; 1729 ret = -EFAULT;
1721 goto ehca_set_pagebuf_exit0; 1730 goto ehca_set_pagebuf_exit0;
1722 } 1731 }
1723 *kpage = phys_to_abs( 1732 *kpage = phys_to_abs(
1724 (pbuf->addr & EHCA_PAGEMASK) 1733 (pbuf->addr & EHCA_PAGEMASK)
1725 + (pginfo->next_4k * EHCA_PAGESIZE)); 1734 + (pginfo->next_hwpage * EHCA_PAGESIZE));
1726 if ( !(*kpage) && pbuf->addr ) { 1735 if ( !(*kpage) && pbuf->addr ) {
1727 ehca_gen_err("pbuf->addr=%lx " 1736 ehca_gen_err("pbuf->addr=%lx "
1728 "pbuf->size=%lx " 1737 "pbuf->size=%lx "
1729 "next_4k=%lx", pbuf->addr, 1738 "next_hwpage=%lx", pbuf->addr,
1730 pbuf->size, 1739 pbuf->size,
1731 pginfo->next_4k); 1740 pginfo->next_hwpage);
1732 ret = -EFAULT; 1741 ret = -EFAULT;
1733 goto ehca_set_pagebuf_exit0; 1742 goto ehca_set_pagebuf_exit0;
1734 } 1743 }
1735 (pginfo->page_4k_cnt)++; 1744 (pginfo->hwpage_cnt)++;
1736 (pginfo->next_4k)++; 1745 (pginfo->next_hwpage)++;
1737 if (pginfo->next_4k % 1746 if (pginfo->next_hwpage %
1738 (PAGE_SIZE / EHCA_PAGESIZE) == 0) 1747 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1739 (pginfo->page_cnt)++; 1748 (pginfo->kpage_cnt)++;
1740 kpage++; 1749 kpage++;
1741 i++; 1750 i++;
1742 if (i >= number) break; 1751 if (i >= number) break;
1743 } 1752 }
1744 if (pginfo->next_4k >= offs4k + num4k) { 1753 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1745 (pginfo->next_buf)++; 1754 (pginfo->u.phy.next_buf)++;
1746 pginfo->next_4k = 0; 1755 pginfo->next_hwpage = 0;
1747 } 1756 }
1748 } 1757 }
1749 } else if (pginfo->type == EHCA_MR_PGI_USER) { 1758 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1750 /* loop over desired chunk entries */ 1759 /* loop over desired chunk entries */
1751 chunk = pginfo->next_chunk; 1760 chunk = pginfo->u.usr.next_chunk;
1752 prev_chunk = pginfo->next_chunk; 1761 prev_chunk = pginfo->u.usr.next_chunk;
1753 list_for_each_entry_continue(chunk, 1762 list_for_each_entry_continue(chunk,
1754 (&(pginfo->region->chunk_list)), 1763 (&(pginfo->u.usr.region->chunk_list)),
1755 list) { 1764 list) {
1756 for (i = pginfo->next_nmap; i < chunk->nmap; ) { 1765 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1757 pgaddr = ( page_to_pfn(chunk->page_list[i].page) 1766 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1758 << PAGE_SHIFT ); 1767 << PAGE_SHIFT );
1759 *kpage = phys_to_abs(pgaddr + 1768 *kpage = phys_to_abs(pgaddr +
1760 (pginfo->next_4k * 1769 (pginfo->next_hwpage *
1761 EHCA_PAGESIZE)); 1770 EHCA_PAGESIZE));
1762 if ( !(*kpage) ) { 1771 if ( !(*kpage) ) {
1763 ehca_gen_err("pgaddr=%lx " 1772 ehca_gen_err("pgaddr=%lx "
1764 "chunk->page_list[i]=%lx " 1773 "chunk->page_list[i]=%lx "
1765 "i=%x next_4k=%lx mr=%p", 1774 "i=%x next_hwpage=%lx mr=%p",
1766 pgaddr, 1775 pgaddr,
1767 (u64)sg_dma_address( 1776 (u64)sg_dma_address(
1768 &chunk-> 1777 &chunk->
1769 page_list[i]), 1778 page_list[i]),
1770 i, pginfo->next_4k, e_mr); 1779 i, pginfo->next_hwpage, e_mr);
1771 ret = -EFAULT; 1780 ret = -EFAULT;
1772 goto ehca_set_pagebuf_exit0; 1781 goto ehca_set_pagebuf_exit0;
1773 } 1782 }
1774 (pginfo->page_4k_cnt)++; 1783 (pginfo->hwpage_cnt)++;
1775 (pginfo->next_4k)++; 1784 (pginfo->next_hwpage)++;
1776 kpage++; 1785 kpage++;
1777 if (pginfo->next_4k % 1786 if (pginfo->next_hwpage %
1778 (PAGE_SIZE / EHCA_PAGESIZE) == 0) { 1787 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1779 (pginfo->page_cnt)++; 1788 (pginfo->kpage_cnt)++;
1780 (pginfo->next_nmap)++; 1789 (pginfo->u.usr.next_nmap)++;
1781 pginfo->next_4k = 0; 1790 pginfo->next_hwpage = 0;
1782 i++; 1791 i++;
1783 } 1792 }
1784 j++; 1793 j++;
1785 if (j >= number) break; 1794 if (j >= number) break;
1786 } 1795 }
1787 if ((pginfo->next_nmap >= chunk->nmap) && 1796 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1788 (j >= number)) { 1797 (j >= number)) {
1789 pginfo->next_nmap = 0; 1798 pginfo->u.usr.next_nmap = 0;
1790 prev_chunk = chunk; 1799 prev_chunk = chunk;
1791 break; 1800 break;
1792 } else if (pginfo->next_nmap >= chunk->nmap) { 1801 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1793 pginfo->next_nmap = 0; 1802 pginfo->u.usr.next_nmap = 0;
1794 prev_chunk = chunk; 1803 prev_chunk = chunk;
1795 } else if (j >= number) 1804 } else if (j >= number)
1796 break; 1805 break;
1797 else 1806 else
1798 prev_chunk = chunk; 1807 prev_chunk = chunk;
1799 } 1808 }
1800 pginfo->next_chunk = 1809 pginfo->u.usr.next_chunk =
1801 list_prepare_entry(prev_chunk, 1810 list_prepare_entry(prev_chunk,
1802 (&(pginfo->region->chunk_list)), 1811 (&(pginfo->u.usr.region->chunk_list)),
1803 list); 1812 list);
1804 } else if (pginfo->type == EHCA_MR_PGI_FMR) { 1813 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1805 /* loop over desired page_list entries */ 1814 /* loop over desired page_list entries */
1806 fmrlist = pginfo->page_list + pginfo->next_listelem; 1815 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1807 for (i = 0; i < number; i++) { 1816 for (i = 0; i < number; i++) {
1808 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + 1817 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1809 pginfo->next_4k * EHCA_PAGESIZE); 1818 pginfo->next_hwpage * EHCA_PAGESIZE);
1810 if ( !(*kpage) ) { 1819 if ( !(*kpage) ) {
1811 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1820 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1812 "next_listelem=%lx next_4k=%lx", 1821 "next_listelem=%lx next_hwpage=%lx",
1813 *fmrlist, fmrlist, 1822 *fmrlist, fmrlist,
1814 pginfo->next_listelem, 1823 pginfo->u.fmr.next_listelem,
1815 pginfo->next_4k); 1824 pginfo->next_hwpage);
1816 ret = -EFAULT; 1825 ret = -EFAULT;
1817 goto ehca_set_pagebuf_exit0; 1826 goto ehca_set_pagebuf_exit0;
1818 } 1827 }
1819 (pginfo->page_4k_cnt)++; 1828 (pginfo->hwpage_cnt)++;
1820 (pginfo->next_4k)++; 1829 (pginfo->next_hwpage)++;
1821 kpage++; 1830 kpage++;
1822 if (pginfo->next_4k % 1831 if (pginfo->next_hwpage %
1823 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { 1832 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1824 (pginfo->page_cnt)++; 1833 (pginfo->kpage_cnt)++;
1825 (pginfo->next_listelem)++; 1834 (pginfo->u.fmr.next_listelem)++;
1826 fmrlist++; 1835 fmrlist++;
1827 pginfo->next_4k = 0; 1836 pginfo->next_hwpage = 0;
1828 } 1837 }
1829 } 1838 }
1830 } else { 1839 } else {
@@ -1835,16 +1844,16 @@ int ehca_set_pagebuf(struct ehca_mr *e_mr,
1835 1844
1836ehca_set_pagebuf_exit0: 1845ehca_set_pagebuf_exit0:
1837 if (ret) 1846 if (ret)
1838 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " 1847 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
1839 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x " 1848 "num_hwpages=%lx next_buf=%lx next_hwpage=%lx number=%x "
1840 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x " 1849 "kpage=%p kpage_cnt=%lx hwpage_cnt=%lx i=%x "
1841 "next_listelem=%lx region=%p next_chunk=%p " 1850 "next_listelem=%lx region=%p next_chunk=%p "
1842 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type, 1851 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1843 pginfo->num_pages, pginfo->num_4k, 1852 pginfo->num_kpages, pginfo->num_hwpages,
1844 pginfo->next_buf, pginfo->next_4k, number, kpage, 1853 pginfo->u.phy.next_buf, pginfo->next_hwpage, number, kpage,
1845 pginfo->page_cnt, pginfo->page_4k_cnt, i, 1854 pginfo->kpage_cnt, pginfo->hwpage_cnt, i,
1846 pginfo->next_listelem, pginfo->region, 1855 pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
1847 pginfo->next_chunk, pginfo->next_nmap); 1856 pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
1848 return ret; 1857 return ret;
1849} /* end ehca_set_pagebuf() */ 1858} /* end ehca_set_pagebuf() */
1850 1859
@@ -1860,101 +1869,101 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1860 u64 *fmrlist; 1869 u64 *fmrlist;
1861 struct ib_umem_chunk *chunk; 1870 struct ib_umem_chunk *chunk;
1862 struct ib_umem_chunk *prev_chunk; 1871 struct ib_umem_chunk *prev_chunk;
1863 u64 pgaddr, num4k, offs4k; 1872 u64 pgaddr, num_hw, offs_hw;
1864 1873
1865 if (pginfo->type == EHCA_MR_PGI_PHYS) { 1874 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1866 /* sanity check */ 1875 /* sanity check */
1867 if ((pginfo->page_cnt >= pginfo->num_pages) || 1876 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1868 (pginfo->page_4k_cnt >= pginfo->num_4k)) { 1877 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1869 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx " 1878 ehca_gen_err("kpage_cnt >= num_hwpages, kpage_cnt=%lx "
1870 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx", 1879 "num_hwpages=%lx hwpage_cnt=%lx num_hwpages=%lx",
1871 pginfo->page_cnt, pginfo->num_pages, 1880 pginfo->kpage_cnt, pginfo->num_kpages,
1872 pginfo->page_4k_cnt, pginfo->num_4k); 1881 pginfo->hwpage_cnt, pginfo->num_hwpages);
1873 ret = -EFAULT; 1882 ret = -EFAULT;
1874 goto ehca_set_pagebuf_1_exit0; 1883 goto ehca_set_pagebuf_1_exit0;
1875 } 1884 }
1876 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf; 1885 tmp_pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1877 num4k = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) + 1886 num_hw = NUM_CHUNKS((tmp_pbuf->addr % EHCA_PAGESIZE) +
1878 tmp_pbuf->size, EHCA_PAGESIZE); 1887 tmp_pbuf->size, EHCA_PAGESIZE);
1879 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE; 1888 offs_hw = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1880 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) + 1889 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1881 (pginfo->next_4k * EHCA_PAGESIZE)); 1890 (pginfo->next_hwpage * EHCA_PAGESIZE));
1882 if ( !(*rpage) && tmp_pbuf->addr ) { 1891 if ( !(*rpage) && tmp_pbuf->addr ) {
1883 ehca_gen_err("tmp_pbuf->addr=%lx" 1892 ehca_gen_err("tmp_pbuf->addr=%lx"
1884 " tmp_pbuf->size=%lx next_4k=%lx", 1893 " tmp_pbuf->size=%lx next_hwpage=%lx",
1885 tmp_pbuf->addr, tmp_pbuf->size, 1894 tmp_pbuf->addr, tmp_pbuf->size,
1886 pginfo->next_4k); 1895 pginfo->next_hwpage);
1887 ret = -EFAULT; 1896 ret = -EFAULT;
1888 goto ehca_set_pagebuf_1_exit0; 1897 goto ehca_set_pagebuf_1_exit0;
1889 } 1898 }
1890 (pginfo->page_4k_cnt)++; 1899 (pginfo->hwpage_cnt)++;
1891 (pginfo->next_4k)++; 1900 (pginfo->next_hwpage)++;
1892 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0) 1901 if (pginfo->next_hwpage % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1893 (pginfo->page_cnt)++; 1902 (pginfo->kpage_cnt)++;
1894 if (pginfo->next_4k >= offs4k + num4k) { 1903 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1895 (pginfo->next_buf)++; 1904 (pginfo->u.phy.next_buf)++;
1896 pginfo->next_4k = 0; 1905 pginfo->next_hwpage = 0;
1897 } 1906 }
1898 } else if (pginfo->type == EHCA_MR_PGI_USER) { 1907 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1899 chunk = pginfo->next_chunk; 1908 chunk = pginfo->u.usr.next_chunk;
1900 prev_chunk = pginfo->next_chunk; 1909 prev_chunk = pginfo->u.usr.next_chunk;
1901 list_for_each_entry_continue(chunk, 1910 list_for_each_entry_continue(chunk,
1902 (&(pginfo->region->chunk_list)), 1911 (&(pginfo->u.usr.region->chunk_list)),
1903 list) { 1912 list) {
1904 pgaddr = ( page_to_pfn(chunk->page_list[ 1913 pgaddr = ( page_to_pfn(chunk->page_list[
1905 pginfo->next_nmap].page) 1914 pginfo->u.usr.next_nmap].page)
1906 << PAGE_SHIFT); 1915 << PAGE_SHIFT);
1907 *rpage = phys_to_abs(pgaddr + 1916 *rpage = phys_to_abs(pgaddr +
1908 (pginfo->next_4k * EHCA_PAGESIZE)); 1917 (pginfo->next_hwpage * EHCA_PAGESIZE));
1909 if ( !(*rpage) ) { 1918 if ( !(*rpage) ) {
1910 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx" 1919 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1911 " next_nmap=%lx next_4k=%lx mr=%p", 1920 " next_nmap=%lx next_hwpage=%lx mr=%p",
1912 pgaddr, (u64)sg_dma_address( 1921 pgaddr, (u64)sg_dma_address(
1913 &chunk->page_list[ 1922 &chunk->page_list[
1914 pginfo-> 1923 pginfo->u.usr.
1915 next_nmap]), 1924 next_nmap]),
1916 pginfo->next_nmap, pginfo->next_4k, 1925 pginfo->u.usr.next_nmap, pginfo->next_hwpage,
1917 e_mr); 1926 e_mr);
1918 ret = -EFAULT; 1927 ret = -EFAULT;
1919 goto ehca_set_pagebuf_1_exit0; 1928 goto ehca_set_pagebuf_1_exit0;
1920 } 1929 }
1921 (pginfo->page_4k_cnt)++; 1930 (pginfo->hwpage_cnt)++;
1922 (pginfo->next_4k)++; 1931 (pginfo->next_hwpage)++;
1923 if (pginfo->next_4k % 1932 if (pginfo->next_hwpage %
1924 (PAGE_SIZE / EHCA_PAGESIZE) == 0) { 1933 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1925 (pginfo->page_cnt)++; 1934 (pginfo->kpage_cnt)++;
1926 (pginfo->next_nmap)++; 1935 (pginfo->u.usr.next_nmap)++;
1927 pginfo->next_4k = 0; 1936 pginfo->next_hwpage = 0;
1928 } 1937 }
1929 if (pginfo->next_nmap >= chunk->nmap) { 1938 if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1930 pginfo->next_nmap = 0; 1939 pginfo->u.usr.next_nmap = 0;
1931 prev_chunk = chunk; 1940 prev_chunk = chunk;
1932 } 1941 }
1933 break; 1942 break;
1934 } 1943 }
1935 pginfo->next_chunk = 1944 pginfo->u.usr.next_chunk =
1936 list_prepare_entry(prev_chunk, 1945 list_prepare_entry(prev_chunk,
1937 (&(pginfo->region->chunk_list)), 1946 (&(pginfo->u.usr.region->chunk_list)),
1938 list); 1947 list);
1939 } else if (pginfo->type == EHCA_MR_PGI_FMR) { 1948 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1940 fmrlist = pginfo->page_list + pginfo->next_listelem; 1949 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1941 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) + 1950 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1942 pginfo->next_4k * EHCA_PAGESIZE); 1951 pginfo->next_hwpage * EHCA_PAGESIZE);
1943 if ( !(*rpage) ) { 1952 if ( !(*rpage) ) {
1944 ehca_gen_err("*fmrlist=%lx fmrlist=%p " 1953 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1945 "next_listelem=%lx next_4k=%lx", 1954 "next_listelem=%lx next_hwpage=%lx",
1946 *fmrlist, fmrlist, pginfo->next_listelem, 1955 *fmrlist, fmrlist, pginfo->u.fmr.next_listelem,
1947 pginfo->next_4k); 1956 pginfo->next_hwpage);
1948 ret = -EFAULT; 1957 ret = -EFAULT;
1949 goto ehca_set_pagebuf_1_exit0; 1958 goto ehca_set_pagebuf_1_exit0;
1950 } 1959 }
1951 (pginfo->page_4k_cnt)++; 1960 (pginfo->hwpage_cnt)++;
1952 (pginfo->next_4k)++; 1961 (pginfo->next_hwpage)++;
1953 if (pginfo->next_4k % 1962 if (pginfo->next_hwpage %
1954 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) { 1963 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1955 (pginfo->page_cnt)++; 1964 (pginfo->kpage_cnt)++;
1956 (pginfo->next_listelem)++; 1965 (pginfo->u.fmr.next_listelem)++;
1957 pginfo->next_4k = 0; 1966 pginfo->next_hwpage = 0;
1958 } 1967 }
1959 } else { 1968 } else {
1960 ehca_gen_err("bad pginfo->type=%x", pginfo->type); 1969 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
@@ -1964,15 +1973,15 @@ int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1964 1973
1965ehca_set_pagebuf_1_exit0: 1974ehca_set_pagebuf_1_exit0:
1966 if (ret) 1975 if (ret)
1967 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx " 1976 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_kpages=%lx "
1968 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p " 1977 "num_hwpages=%lx next_buf=%lx next_hwpage=%lx rpage=%p "
1969 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx " 1978 "kpage_cnt=%lx hwpage_cnt=%lx next_listelem=%lx "
1970 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr, 1979 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1971 pginfo, pginfo->type, pginfo->num_pages, 1980 pginfo, pginfo->type, pginfo->num_kpages,
1972 pginfo->num_4k, pginfo->next_buf, pginfo->next_4k, 1981 pginfo->num_hwpages, pginfo->u.phy.next_buf, pginfo->next_hwpage,
1973 rpage, pginfo->page_cnt, pginfo->page_4k_cnt, 1982 rpage, pginfo->kpage_cnt, pginfo->hwpage_cnt,
1974 pginfo->next_listelem, pginfo->region, 1983 pginfo->u.fmr.next_listelem, pginfo->u.usr.region,
1975 pginfo->next_chunk, pginfo->next_nmap); 1984 pginfo->u.usr.next_chunk, pginfo->u.usr.next_nmap);
1976 return ret; 1985 return ret;
1977} /* end ehca_set_pagebuf_1() */ 1986} /* end ehca_set_pagebuf_1() */
1978 1987
@@ -2053,19 +2062,17 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2053 */ 2062 */
2054void ehca_mr_deletenew(struct ehca_mr *mr) 2063void ehca_mr_deletenew(struct ehca_mr *mr)
2055{ 2064{
2056 mr->flags = 0; 2065 mr->flags = 0;
2057 mr->num_pages = 0; 2066 mr->num_kpages = 0;
2058 mr->num_4k = 0; 2067 mr->num_hwpages = 0;
2059 mr->acl = 0; 2068 mr->acl = 0;
2060 mr->start = NULL; 2069 mr->start = NULL;
2061 mr->fmr_page_size = 0; 2070 mr->fmr_page_size = 0;
2062 mr->fmr_max_pages = 0; 2071 mr->fmr_max_pages = 0;
2063 mr->fmr_max_maps = 0; 2072 mr->fmr_max_maps = 0;
2064 mr->fmr_map_cnt = 0; 2073 mr->fmr_map_cnt = 0;
2065 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle)); 2074 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2066 memset(&mr->galpas, 0, sizeof(mr->galpas)); 2075 memset(&mr->galpas, 0, sizeof(mr->galpas));
2067 mr->nr_of_pages = 0;
2068 mr->pagearray = NULL;
2069} /* end ehca_mr_deletenew() */ 2076} /* end ehca_mr_deletenew() */
2070 2077
2071int ehca_init_mrmw_cache(void) 2078int ehca_init_mrmw_cache(void)