aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorHannes Hering <hering2@de.ibm.com>2009-06-23 01:18:51 -0400
committerRoland Dreier <rolandd@cisco.com>2009-06-23 01:18:51 -0400
commit0cf89dcdbc53f2b43e4ce7419b6ff47f4309c2eb (patch)
treebee90178d8da3c89884daed60686fc59a952ce79 /drivers/infiniband
parent65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff)
IB/ehca: Tolerate dynamic memory operations before driver load
Implement toleration of dynamic memory operations and 16 GB gigantic pages, where "toleration" means that the driver can cope with dynamic memory operations that happen before the driver is loaded. While the ehca driver is loaded, dynamic memory operations are still prohibited by returning NOTIFY_BAD from the memory notifier. On module load the driver walks through available system memory, checks for available memory ranges and then registers the kernel internal memory region accordingly. The translation of address ranges is implemented via a 3-level busmap. Signed-off-by: Hannes Hering <hering2@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c18
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c508
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h13
3 files changed, 522 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ce4e6eff4792..14a18b7be245 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca)
506 shca->ib_device.detach_mcast = ehca_detach_mcast; 506 shca->ib_device.detach_mcast = ehca_detach_mcast;
507 shca->ib_device.process_mad = ehca_process_mad; 507 shca->ib_device.process_mad = ehca_process_mad;
508 shca->ib_device.mmap = ehca_mmap; 508 shca->ib_device.mmap = ehca_mmap;
509 shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
509 510
510 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 511 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
511 shca->ib_device.uverbs_cmd_mask |= 512 shca->ib_device.uverbs_cmd_mask |=
@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void)
1028 goto module_init1; 1029 goto module_init1;
1029 } 1030 }
1030 1031
1032 ret = ehca_create_busmap();
1033 if (ret) {
1034 ehca_gen_err("Cannot create busmap.");
1035 goto module_init2;
1036 }
1037
1031 ret = ibmebus_register_driver(&ehca_driver); 1038 ret = ibmebus_register_driver(&ehca_driver);
1032 if (ret) { 1039 if (ret) {
1033 ehca_gen_err("Cannot register eHCA device driver"); 1040 ehca_gen_err("Cannot register eHCA device driver");
1034 ret = -EINVAL; 1041 ret = -EINVAL;
1035 goto module_init2; 1042 goto module_init3;
1036 } 1043 }
1037 1044
1038 ret = register_memory_notifier(&ehca_mem_nb); 1045 ret = register_memory_notifier(&ehca_mem_nb);
1039 if (ret) { 1046 if (ret) {
1040 ehca_gen_err("Failed registering memory add/remove notifier"); 1047 ehca_gen_err("Failed registering memory add/remove notifier");
1041 goto module_init3; 1048 goto module_init4;
1042 } 1049 }
1043 1050
1044 if (ehca_poll_all_eqs != 1) { 1051 if (ehca_poll_all_eqs != 1) {
@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void)
1053 1060
1054 return 0; 1061 return 0;
1055 1062
1056module_init3: 1063module_init4:
1057 ibmebus_unregister_driver(&ehca_driver); 1064 ibmebus_unregister_driver(&ehca_driver);
1058 1065
1066module_init3:
1067 ehca_destroy_busmap();
1068
1059module_init2: 1069module_init2:
1060 ehca_destroy_slab_caches(); 1070 ehca_destroy_slab_caches();
1061 1071
@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void)
1073 1083
1074 unregister_memory_notifier(&ehca_mem_nb); 1084 unregister_memory_notifier(&ehca_mem_nb);
1075 1085
1086 ehca_destroy_busmap();
1087
1076 ehca_destroy_slab_caches(); 1088 ehca_destroy_slab_caches();
1077 1089
1078 ehca_destroy_comp_pool(); 1090 ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 72f83f7df614..7663a2a9f130 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,6 +53,38 @@
53/* max number of rpages (per hcall register_rpages) */ 53/* max number of rpages (per hcall register_rpages) */
54#define MAX_RPAGES 512 54#define MAX_RPAGES 512
55 55
56/* DMEM toleration management */
57#define EHCA_SECTSHIFT SECTION_SIZE_BITS
58#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
59#define EHCA_HUGEPAGESHIFT 34
60#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
61#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
62#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
63#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
64#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
65#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
66#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
67#define EHCA_DIR_MAP_SIZE (0x10000)
68#define EHCA_ENT_MAP_SIZE (0x10000)
69#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
70
71static unsigned long ehca_mr_len;
72
73/*
74 * Memory map data structures
75 */
76struct ehca_dir_bmap {
77 u64 ent[EHCA_MAP_ENTRIES];
78};
79struct ehca_top_bmap {
80 struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
81};
82struct ehca_bmap {
83 struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
84};
85
86static struct ehca_bmap *ehca_bmap;
87
56static struct kmem_cache *mr_cache; 88static struct kmem_cache *mr_cache;
57static struct kmem_cache *mw_cache; 89static struct kmem_cache *mw_cache;
58 90
@@ -68,6 +100,8 @@ enum ehca_mr_pgsize {
68#define EHCA_MR_PGSHIFT1M 20 100#define EHCA_MR_PGSHIFT1M 20
69#define EHCA_MR_PGSHIFT16M 24 101#define EHCA_MR_PGSHIFT16M 24
70 102
103static u64 ehca_map_vaddr(void *caddr);
104
71static u32 ehca_encode_hwpage_size(u32 pgsize) 105static u32 ehca_encode_hwpage_size(u32 pgsize)
72{ 106{
73 int log = ilog2(pgsize); 107 int log = ilog2(pgsize);
@@ -135,7 +169,8 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
135 goto get_dma_mr_exit0; 169 goto get_dma_mr_exit0;
136 } 170 }
137 171
138 ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, 172 ret = ehca_reg_maxmr(shca, e_maxmr,
173 (void *)ehca_map_vaddr((void *)KERNELBASE),
139 mr_access_flags, e_pd, 174 mr_access_flags, e_pd,
140 &e_maxmr->ib.ib_mr.lkey, 175 &e_maxmr->ib.ib_mr.lkey,
141 &e_maxmr->ib.ib_mr.rkey); 176 &e_maxmr->ib.ib_mr.rkey);
@@ -251,7 +286,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
251 286
252 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 287 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
253 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 288 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
254 &e_mr->ib.ib_mr.rkey); 289 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
255 if (ret) { 290 if (ret) {
256 ib_mr = ERR_PTR(ret); 291 ib_mr = ERR_PTR(ret);
257 goto reg_phys_mr_exit1; 292 goto reg_phys_mr_exit1;
@@ -370,7 +405,7 @@ reg_user_mr_fallback:
370 405
371 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, 406 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
372 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 407 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
373 &e_mr->ib.ib_mr.rkey); 408 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 409 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
375 ehca_warn(pd->device, "failed to register mr " 410 ehca_warn(pd->device, "failed to register mr "
376 "with hwpage_size=%llx", hwpage_size); 411 "with hwpage_size=%llx", hwpage_size);
@@ -794,7 +829,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
794 ret = ehca_reg_mr(shca, e_fmr, NULL, 829 ret = ehca_reg_mr(shca, e_fmr, NULL,
795 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 830 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
796 mr_access_flags, e_pd, &pginfo, 831 mr_access_flags, e_pd, &pginfo,
797 &tmp_lkey, &tmp_rkey); 832 &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
798 if (ret) { 833 if (ret) {
799 ib_fmr = ERR_PTR(ret); 834 ib_fmr = ERR_PTR(ret);
800 goto alloc_fmr_exit1; 835 goto alloc_fmr_exit1;
@@ -983,6 +1018,10 @@ free_fmr_exit0:
983 1018
984/*----------------------------------------------------------------------*/ 1019/*----------------------------------------------------------------------*/
985 1020
1021static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
1022 struct ehca_mr *e_mr,
1023 struct ehca_mr_pginfo *pginfo);
1024
986int ehca_reg_mr(struct ehca_shca *shca, 1025int ehca_reg_mr(struct ehca_shca *shca,
987 struct ehca_mr *e_mr, 1026 struct ehca_mr *e_mr,
988 u64 *iova_start, 1027 u64 *iova_start,
@@ -991,7 +1030,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
991 struct ehca_pd *e_pd, 1030 struct ehca_pd *e_pd,
992 struct ehca_mr_pginfo *pginfo, 1031 struct ehca_mr_pginfo *pginfo,
993 u32 *lkey, /*OUT*/ 1032 u32 *lkey, /*OUT*/
994 u32 *rkey) /*OUT*/ 1033 u32 *rkey, /*OUT*/
1034 enum ehca_reg_type reg_type)
995{ 1035{
996 int ret; 1036 int ret;
997 u64 h_ret; 1037 u64 h_ret;
@@ -1015,7 +1055,13 @@ int ehca_reg_mr(struct ehca_shca *shca,
1015 1055
1016 e_mr->ipz_mr_handle = hipzout.handle; 1056 e_mr->ipz_mr_handle = hipzout.handle;
1017 1057
1018 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); 1058 if (reg_type == EHCA_REG_BUSMAP_MR)
1059 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
1060 else if (reg_type == EHCA_REG_MR)
1061 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
1062 else
1063 ret = -EINVAL;
1064
1019 if (ret) 1065 if (ret)
1020 goto ehca_reg_mr_exit1; 1066 goto ehca_reg_mr_exit1;
1021 1067
@@ -1316,7 +1362,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1316 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; 1362 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1317 1363
1318 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, 1364 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1319 e_pd, pginfo, lkey, rkey); 1365 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1320 if (ret) { 1366 if (ret) {
1321 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; 1367 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1322 memcpy(&e_mr->flags, &(save_mr.flags), 1368 memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1409,7 +1455,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1409 ret = ehca_reg_mr(shca, e_fmr, NULL, 1455 ret = ehca_reg_mr(shca, e_fmr, NULL,
1410 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1456 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1411 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, 1457 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1412 &tmp_rkey); 1458 &tmp_rkey, EHCA_REG_MR);
1413 if (ret) { 1459 if (ret) {
1414 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; 1460 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1415 memcpy(&e_fmr->flags, &(save_mr.flags), 1461 memcpy(&e_fmr->flags, &(save_mr.flags),
@@ -1478,6 +1524,90 @@ ehca_reg_smr_exit0:
1478} /* end ehca_reg_smr() */ 1524} /* end ehca_reg_smr() */
1479 1525
1480/*----------------------------------------------------------------------*/ 1526/*----------------------------------------------------------------------*/
1527static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1528{
1529 unsigned long ret = idx;
1530 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1531 ret |= top << EHCA_TOP_INDEX_SHIFT;
1532 return abs_to_virt(ret << SECTION_SIZE_BITS);
1533}
1534
1535#define ehca_bmap_valid(entry) \
1536 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1537
1538static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1539 struct ehca_shca *shca, struct ehca_mr *mr,
1540 struct ehca_mr_pginfo *pginfo)
1541{
1542 u64 h_ret = 0;
1543 unsigned long page = 0;
1544 u64 rpage = virt_to_abs(kpage);
1545 int page_count;
1546
1547 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1548 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1549 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1550 "hwpage_size does not fit to "
1551 "section start address");
1552 }
1553 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1554
1555 while (page < page_count) {
1556 u64 rnum;
1557 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1558 rnum++) {
1559 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1560 kpage[rnum] = virt_to_abs(pg);
1561 }
1562
1563 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1564 ehca_encode_hwpage_size(pginfo->hwpage_size),
1565 0, rpage, rnum);
1566
1567 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1568 ehca_err(&shca->ib_device, "register_rpage_mr failed");
1569 return h_ret;
1570 }
1571 }
1572 return h_ret;
1573}
1574
1575static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1576 struct ehca_shca *shca, struct ehca_mr *mr,
1577 struct ehca_mr_pginfo *pginfo)
1578{
1579 u64 hret = H_SUCCESS;
1580 int idx;
1581
1582 for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1583 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1584 continue;
1585
1586 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1587 pginfo);
1588 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1589 return hret;
1590 }
1591 return hret;
1592}
1593
1594static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1595 struct ehca_mr *mr,
1596 struct ehca_mr_pginfo *pginfo)
1597{
1598 u64 hret = H_SUCCESS;
1599 int dir;
1600
1601 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1602 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1603 continue;
1604
1605 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1606 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1607 return hret;
1608 }
1609 return hret;
1610}
1481 1611
1482/* register internal max-MR to internal SHCA */ 1612/* register internal max-MR to internal SHCA */
1483int ehca_reg_internal_maxmr( 1613int ehca_reg_internal_maxmr(
@@ -1495,6 +1625,11 @@ int ehca_reg_internal_maxmr(
1495 u32 num_hwpages; 1625 u32 num_hwpages;
1496 u64 hw_pgsize; 1626 u64 hw_pgsize;
1497 1627
1628 if (!ehca_bmap) {
1629 ret = -EFAULT;
1630 goto ehca_reg_internal_maxmr_exit0;
1631 }
1632
1498 e_mr = ehca_mr_new(); 1633 e_mr = ehca_mr_new();
1499 if (!e_mr) { 1634 if (!e_mr) {
1500 ehca_err(&shca->ib_device, "out of memory"); 1635 ehca_err(&shca->ib_device, "out of memory");
@@ -1504,8 +1639,8 @@ int ehca_reg_internal_maxmr(
1504 e_mr->flags |= EHCA_MR_FLAG_MAXMR; 1639 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1505 1640
1506 /* register internal max-MR on HCA */ 1641 /* register internal max-MR on HCA */
1507 size_maxmr = (u64)high_memory - PAGE_OFFSET; 1642 size_maxmr = ehca_mr_len;
1508 iova_start = (u64 *)KERNELBASE; 1643 iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
1509 ib_pbuf.addr = 0; 1644 ib_pbuf.addr = 0;
1510 ib_pbuf.size = size_maxmr; 1645 ib_pbuf.size = size_maxmr;
1511 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1646 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -1524,7 +1659,7 @@ int ehca_reg_internal_maxmr(
1524 1659
1525 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, 1660 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1526 &pginfo, &e_mr->ib.ib_mr.lkey, 1661 &pginfo, &e_mr->ib.ib_mr.lkey,
1527 &e_mr->ib.ib_mr.rkey); 1662 &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1528 if (ret) { 1663 if (ret) {
1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1664 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " 1665 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
@@ -2077,8 +2212,8 @@ int ehca_mr_is_maxmr(u64 size,
2077 u64 *iova_start) 2212 u64 *iova_start)
2078{ 2213{
2079 /* a MR is treated as max-MR only if it fits following: */ 2214 /* a MR is treated as max-MR only if it fits following: */
2080 if ((size == ((u64)high_memory - PAGE_OFFSET)) && 2215 if ((size == ehca_mr_len) &&
2081 (iova_start == (void *)KERNELBASE)) { 2216 (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
2082 ehca_gen_dbg("this is a max-MR"); 2217 ehca_gen_dbg("this is a max-MR");
2083 return 1; 2218 return 1;
2084 } else 2219 } else
@@ -2184,3 +2319,350 @@ void ehca_cleanup_mrmw_cache(void)
2184 if (mw_cache) 2319 if (mw_cache)
2185 kmem_cache_destroy(mw_cache); 2320 kmem_cache_destroy(mw_cache);
2186} 2321}
2322
2323static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
2324 int dir)
2325{
2326 if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
2327 ehca_top_bmap->dir[dir] =
2328 kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
2329 if (!ehca_top_bmap->dir[dir])
2330 return -ENOMEM;
2331 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2332 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
2333 }
2334 return 0;
2335}
2336
2337static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
2338{
2339 if (!ehca_bmap_valid(ehca_bmap->top[top])) {
2340 ehca_bmap->top[top] =
2341 kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
2342 if (!ehca_bmap->top[top])
2343 return -ENOMEM;
2344 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2345 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
2346 }
2347 return ehca_init_top_bmap(ehca_bmap->top[top], dir);
2348}
2349
2350static inline int ehca_calc_index(unsigned long i, unsigned long s)
2351{
2352 return (i >> s) & EHCA_INDEX_MASK;
2353}
2354
2355void ehca_destroy_busmap(void)
2356{
2357 int top, dir;
2358
2359 if (!ehca_bmap)
2360 return;
2361
2362 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2363 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2364 continue;
2365 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
2366 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2367 continue;
2368
2369 kfree(ehca_bmap->top[top]->dir[dir]);
2370 }
2371
2372 kfree(ehca_bmap->top[top]);
2373 }
2374
2375 kfree(ehca_bmap);
2376 ehca_bmap = NULL;
2377}
2378
2379static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
2380{
2381 unsigned long i, start_section, end_section;
2382 int top, dir, idx;
2383
2384 if (!nr_pages)
2385 return 0;
2386
2387 if (!ehca_bmap) {
2388 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
2389 if (!ehca_bmap)
2390 return -ENOMEM;
2391 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2392 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
2393 }
2394
2395 start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
2396 end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
2397 for (i = start_section; i < end_section; i++) {
2398 int ret;
2399 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
2400 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
2401 idx = i & EHCA_INDEX_MASK;
2402
2403 ret = ehca_init_bmap(ehca_bmap, top, dir);
2404 if (ret) {
2405 ehca_destroy_busmap();
2406 return ret;
2407 }
2408 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
2409 ehca_mr_len += EHCA_SECTSIZE;
2410 }
2411 return 0;
2412}
2413
2414static int ehca_is_hugepage(unsigned long pfn)
2415{
2416 int page_order;
2417
2418 if (pfn & EHCA_HUGEPAGE_PFN_MASK)
2419 return 0;
2420
2421 page_order = compound_order(pfn_to_page(pfn));
2422 if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
2423 return 0;
2424
2425 return 1;
2426}
2427
2428static int ehca_create_busmap_callback(unsigned long initial_pfn,
2429 unsigned long total_nr_pages, void *arg)
2430{
2431 int ret;
2432 unsigned long pfn, start_pfn, end_pfn, nr_pages;
2433
2434 if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
2435 return ehca_update_busmap(initial_pfn, total_nr_pages);
2436
2437 /* Given chunk is >= 16GB -> check for hugepages */
2438 start_pfn = initial_pfn;
2439 end_pfn = initial_pfn + total_nr_pages;
2440 pfn = start_pfn;
2441
2442 while (pfn < end_pfn) {
2443 if (ehca_is_hugepage(pfn)) {
2444 /* Add mem found in front of the hugepage */
2445 nr_pages = pfn - start_pfn;
2446 ret = ehca_update_busmap(start_pfn, nr_pages);
2447 if (ret)
2448 return ret;
2449 /* Skip the hugepage */
2450 pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
2451 start_pfn = pfn;
2452 } else
2453 pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2454 }
2455
2456 /* Add mem found behind the hugepage(s) */
2457 nr_pages = pfn - start_pfn;
2458 return ehca_update_busmap(start_pfn, nr_pages);
2459}
2460
2461int ehca_create_busmap(void)
2462{
2463 int ret;
2464
2465 ehca_mr_len = 0;
2466 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2467 ehca_create_busmap_callback);
2468 return ret;
2469}
2470
2471static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2472 struct ehca_mr *e_mr,
2473 struct ehca_mr_pginfo *pginfo)
2474{
2475 int top;
2476 u64 hret, *kpage;
2477
2478 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2479 if (!kpage) {
2480 ehca_err(&shca->ib_device, "kpage alloc failed");
2481 return -ENOMEM;
2482 }
2483 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2484 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2485 continue;
2486 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2487 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2488 break;
2489 }
2490
2491 ehca_free_fw_ctrlblock(kpage);
2492
2493 if (hret == H_SUCCESS)
2494 return 0; /* Everything is fine */
2495 else {
2496 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2497 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2498 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2499 e_mr->ib.ib_mr.lkey,
2500 shca->ipz_hca_handle.handle,
2501 e_mr->ipz_mr_handle.handle);
2502 return ehca2ib_return_code(hret);
2503 }
2504}
2505
2506static u64 ehca_map_vaddr(void *caddr)
2507{
2508 int top, dir, idx;
2509 unsigned long abs_addr, offset;
2510 u64 entry;
2511
2512 if (!ehca_bmap)
2513 return EHCA_INVAL_ADDR;
2514
2515 abs_addr = virt_to_abs(caddr);
2516 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2517 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2518 return EHCA_INVAL_ADDR;
2519
2520 dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2521 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2522 return EHCA_INVAL_ADDR;
2523
2524 idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2525
2526 entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2527 if (ehca_bmap_valid(entry)) {
2528 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2529 return entry | offset;
2530 } else
2531 return EHCA_INVAL_ADDR;
2532}
2533
2534static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2535{
2536 return dma_addr == EHCA_INVAL_ADDR;
2537}
2538
2539static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2540 size_t size, enum dma_data_direction direction)
2541{
2542 if (cpu_addr)
2543 return ehca_map_vaddr(cpu_addr);
2544 else
2545 return EHCA_INVAL_ADDR;
2546}
2547
2548static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2549 enum dma_data_direction direction)
2550{
2551 /* This is only a stub; nothing to be done here */
2552}
2553
2554static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2555 unsigned long offset, size_t size,
2556 enum dma_data_direction direction)
2557{
2558 u64 addr;
2559
2560 if (offset + size > PAGE_SIZE)
2561 return EHCA_INVAL_ADDR;
2562
2563 addr = ehca_map_vaddr(page_address(page));
2564 if (!ehca_dma_mapping_error(dev, addr))
2565 addr += offset;
2566
2567 return addr;
2568}
2569
2570static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2571 enum dma_data_direction direction)
2572{
2573 /* This is only a stub; nothing to be done here */
2574}
2575
2576static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2577 int nents, enum dma_data_direction direction)
2578{
2579 struct scatterlist *sg;
2580 int i;
2581
2582 for_each_sg(sgl, sg, nents, i) {
2583 u64 addr;
2584 addr = ehca_map_vaddr(sg_virt(sg));
2585 if (ehca_dma_mapping_error(dev, addr))
2586 return 0;
2587
2588 sg->dma_address = addr;
2589 sg->dma_length = sg->length;
2590 }
2591 return nents;
2592}
2593
2594static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2595 int nents, enum dma_data_direction direction)
2596{
2597 /* This is only a stub; nothing to be done here */
2598}
2599
2600static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
2601{
2602 return sg->dma_address;
2603}
2604
2605static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
2606{
2607 return sg->length;
2608}
2609
2610static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2611 size_t size,
2612 enum dma_data_direction dir)
2613{
2614 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2615}
2616
2617static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2618 size_t size,
2619 enum dma_data_direction dir)
2620{
2621 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2622}
2623
2624static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2625 u64 *dma_handle, gfp_t flag)
2626{
2627 struct page *p;
2628 void *addr = NULL;
2629 u64 dma_addr;
2630
2631 p = alloc_pages(flag, get_order(size));
2632 if (p) {
2633 addr = page_address(p);
2634 dma_addr = ehca_map_vaddr(addr);
2635 if (ehca_dma_mapping_error(dev, dma_addr)) {
2636 free_pages((unsigned long)addr, get_order(size));
2637 return NULL;
2638 }
2639 if (dma_handle)
2640 *dma_handle = dma_addr;
2641 return addr;
2642 }
2643 return NULL;
2644}
2645
2646static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2647 void *cpu_addr, u64 dma_handle)
2648{
2649 if (cpu_addr && size)
2650 free_pages((unsigned long)cpu_addr, get_order(size));
2651}
2652
2653
2654struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2655 .mapping_error = ehca_dma_mapping_error,
2656 .map_single = ehca_dma_map_single,
2657 .unmap_single = ehca_dma_unmap_single,
2658 .map_page = ehca_dma_map_page,
2659 .unmap_page = ehca_dma_unmap_page,
2660 .map_sg = ehca_dma_map_sg,
2661 .unmap_sg = ehca_dma_unmap_sg,
2662 .dma_address = ehca_dma_address,
2663 .dma_len = ehca_dma_len,
2664 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2665 .sync_single_for_device = ehca_dma_sync_single_for_device,
2666 .alloc_coherent = ehca_dma_alloc_coherent,
2667 .free_coherent = ehca_dma_free_coherent,
2668};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index bc8f4e31c123..50d8b51306dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -42,6 +42,11 @@
42#ifndef _EHCA_MRMW_H_ 42#ifndef _EHCA_MRMW_H_
43#define _EHCA_MRMW_H_ 43#define _EHCA_MRMW_H_
44 44
45enum ehca_reg_type {
46 EHCA_REG_MR,
47 EHCA_REG_BUSMAP_MR
48};
49
45int ehca_reg_mr(struct ehca_shca *shca, 50int ehca_reg_mr(struct ehca_shca *shca,
46 struct ehca_mr *e_mr, 51 struct ehca_mr *e_mr,
47 u64 *iova_start, 52 u64 *iova_start,
@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
50 struct ehca_pd *e_pd, 55 struct ehca_pd *e_pd,
51 struct ehca_mr_pginfo *pginfo, 56 struct ehca_mr_pginfo *pginfo,
52 u32 *lkey, 57 u32 *lkey,
53 u32 *rkey); 58 u32 *rkey,
59 enum ehca_reg_type reg_type);
54 60
55int ehca_reg_mr_rpages(struct ehca_shca *shca, 61int ehca_reg_mr_rpages(struct ehca_shca *shca,
56 struct ehca_mr *e_mr, 62 struct ehca_mr *e_mr,
@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
118 124
119void ehca_mr_deletenew(struct ehca_mr *mr); 125void ehca_mr_deletenew(struct ehca_mr *mr);
120 126
127int ehca_create_busmap(void);
128
129void ehca_destroy_busmap(void);
130
131extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
121#endif /*_EHCA_MRMW_H_*/ 132#endif /*_EHCA_MRMW_H_*/