aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-23 14:36:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-23 14:36:01 -0400
commit32708e8e95265505d9627b3bc9ed16566e0dca1c (patch)
treec5161d2a3da5d75123c05eda5713469d02f743b1 /drivers
parentf5bcf5f44796bf30a058a01c10a61b19784f0540 (diff)
parent4a7eca824cec51168dcd5e0c9bf9edbc809fb975 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: RDMA: Add __init/__exit macros to addr.c and cma.c IB/ehca: Bump version number mlx4_core: Fix dma_sync_single_for_cpu() with matching for_device() calls IB/mthca: Replace dma_sync_single() use with proper functions RDMA/nes: Fix FIN state handling under error conditions RDMA/nes: Fix max_qp_init_rd_atom returned from query device IB/ehca: Ensure that guid_entry index is not negative IB/ehca: Tolerate dynamic memory operations before driver load
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_hca.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c20
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c508
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.h13
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/net/mlx4/mr.c14
10 files changed, 554 insertions, 34 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index ce511d8748ce..5be1bd4fc7ed 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -514,7 +514,7 @@ static struct notifier_block nb = {
514 .notifier_call = netevent_callback 514 .notifier_call = netevent_callback
515}; 515};
516 516
517static int addr_init(void) 517static int __init addr_init(void)
518{ 518{
519 addr_wq = create_singlethread_workqueue("ib_addr"); 519 addr_wq = create_singlethread_workqueue("ib_addr");
520 if (!addr_wq) 520 if (!addr_wq)
@@ -524,7 +524,7 @@ static int addr_init(void)
524 return 0; 524 return 0;
525} 525}
526 526
527static void addr_cleanup(void) 527static void __exit addr_cleanup(void)
528{ 528{
529 unregister_netevent_notifier(&nb); 529 unregister_netevent_notifier(&nb);
530 destroy_workqueue(addr_wq); 530 destroy_workqueue(addr_wq);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 851de83ff455..075317884b53 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2960,7 +2960,7 @@ static void cma_remove_one(struct ib_device *device)
2960 kfree(cma_dev); 2960 kfree(cma_dev);
2961} 2961}
2962 2962
2963static int cma_init(void) 2963static int __init cma_init(void)
2964{ 2964{
2965 int ret, low, high, remaining; 2965 int ret, low, high, remaining;
2966 2966
@@ -2990,7 +2990,7 @@ err:
2990 return ret; 2990 return ret;
2991} 2991}
2992 2992
2993static void cma_cleanup(void) 2993static void __exit cma_cleanup(void)
2994{ 2994{
2995 ib_unregister_client(&cma_client); 2995 ib_unregister_client(&cma_client);
2996 unregister_netdevice_notifier(&cma_nb); 2996 unregister_netdevice_notifier(&cma_nb);
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c
index 9209c5332dfe..8b92f85d4dd0 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/infiniband/hw/ehca/ehca_hca.c
@@ -319,7 +319,7 @@ int ehca_query_gid(struct ib_device *ibdev, u8 port,
319 ib_device); 319 ib_device);
320 struct hipz_query_port *rblock; 320 struct hipz_query_port *rblock;
321 321
322 if (index > 255) { 322 if (index < 0 || index > 255) {
323 ehca_err(&shca->ib_device, "Invalid index: %x.", index); 323 ehca_err(&shca->ib_device, "Invalid index: %x.", index);
324 return -EINVAL; 324 return -EINVAL;
325 } 325 }
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index ce4e6eff4792..fab18a2c74a8 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0027" 55#define HCAD_VERSION "0028"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -506,6 +506,7 @@ static int ehca_init_device(struct ehca_shca *shca)
506 shca->ib_device.detach_mcast = ehca_detach_mcast; 506 shca->ib_device.detach_mcast = ehca_detach_mcast;
507 shca->ib_device.process_mad = ehca_process_mad; 507 shca->ib_device.process_mad = ehca_process_mad;
508 shca->ib_device.mmap = ehca_mmap; 508 shca->ib_device.mmap = ehca_mmap;
509 shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
509 510
510 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { 511 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
511 shca->ib_device.uverbs_cmd_mask |= 512 shca->ib_device.uverbs_cmd_mask |=
@@ -1028,17 +1029,23 @@ static int __init ehca_module_init(void)
1028 goto module_init1; 1029 goto module_init1;
1029 } 1030 }
1030 1031
1032 ret = ehca_create_busmap();
1033 if (ret) {
1034 ehca_gen_err("Cannot create busmap.");
1035 goto module_init2;
1036 }
1037
1031 ret = ibmebus_register_driver(&ehca_driver); 1038 ret = ibmebus_register_driver(&ehca_driver);
1032 if (ret) { 1039 if (ret) {
1033 ehca_gen_err("Cannot register eHCA device driver"); 1040 ehca_gen_err("Cannot register eHCA device driver");
1034 ret = -EINVAL; 1041 ret = -EINVAL;
1035 goto module_init2; 1042 goto module_init3;
1036 } 1043 }
1037 1044
1038 ret = register_memory_notifier(&ehca_mem_nb); 1045 ret = register_memory_notifier(&ehca_mem_nb);
1039 if (ret) { 1046 if (ret) {
1040 ehca_gen_err("Failed registering memory add/remove notifier"); 1047 ehca_gen_err("Failed registering memory add/remove notifier");
1041 goto module_init3; 1048 goto module_init4;
1042 } 1049 }
1043 1050
1044 if (ehca_poll_all_eqs != 1) { 1051 if (ehca_poll_all_eqs != 1) {
@@ -1053,9 +1060,12 @@ static int __init ehca_module_init(void)
1053 1060
1054 return 0; 1061 return 0;
1055 1062
1056module_init3: 1063module_init4:
1057 ibmebus_unregister_driver(&ehca_driver); 1064 ibmebus_unregister_driver(&ehca_driver);
1058 1065
1066module_init3:
1067 ehca_destroy_busmap();
1068
1059module_init2: 1069module_init2:
1060 ehca_destroy_slab_caches(); 1070 ehca_destroy_slab_caches();
1061 1071
@@ -1073,6 +1083,8 @@ static void __exit ehca_module_exit(void)
1073 1083
1074 unregister_memory_notifier(&ehca_mem_nb); 1084 unregister_memory_notifier(&ehca_mem_nb);
1075 1085
1086 ehca_destroy_busmap();
1087
1076 ehca_destroy_slab_caches(); 1088 ehca_destroy_slab_caches();
1077 1089
1078 ehca_destroy_comp_pool(); 1090 ehca_destroy_comp_pool();
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 72f83f7df614..7663a2a9f130 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -53,6 +53,38 @@
53/* max number of rpages (per hcall register_rpages) */ 53/* max number of rpages (per hcall register_rpages) */
54#define MAX_RPAGES 512 54#define MAX_RPAGES 512
55 55
56/* DMEM toleration management */
57#define EHCA_SECTSHIFT SECTION_SIZE_BITS
58#define EHCA_SECTSIZE (1UL << EHCA_SECTSHIFT)
59#define EHCA_HUGEPAGESHIFT 34
60#define EHCA_HUGEPAGE_SIZE (1UL << EHCA_HUGEPAGESHIFT)
61#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
62#define EHCA_INVAL_ADDR 0xFFFFFFFFFFFFFFFFULL
63#define EHCA_DIR_INDEX_SHIFT 13 /* 8k Entries in 64k block */
64#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
65#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
66#define EHCA_TOP_MAP_SIZE (0x10000) /* currently fixed map size */
67#define EHCA_DIR_MAP_SIZE (0x10000)
68#define EHCA_ENT_MAP_SIZE (0x10000)
69#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
70
71static unsigned long ehca_mr_len;
72
73/*
74 * Memory map data structures
75 */
76struct ehca_dir_bmap {
77 u64 ent[EHCA_MAP_ENTRIES];
78};
79struct ehca_top_bmap {
80 struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
81};
82struct ehca_bmap {
83 struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
84};
85
86static struct ehca_bmap *ehca_bmap;
87
56static struct kmem_cache *mr_cache; 88static struct kmem_cache *mr_cache;
57static struct kmem_cache *mw_cache; 89static struct kmem_cache *mw_cache;
58 90
@@ -68,6 +100,8 @@ enum ehca_mr_pgsize {
68#define EHCA_MR_PGSHIFT1M 20 100#define EHCA_MR_PGSHIFT1M 20
69#define EHCA_MR_PGSHIFT16M 24 101#define EHCA_MR_PGSHIFT16M 24
70 102
103static u64 ehca_map_vaddr(void *caddr);
104
71static u32 ehca_encode_hwpage_size(u32 pgsize) 105static u32 ehca_encode_hwpage_size(u32 pgsize)
72{ 106{
73 int log = ilog2(pgsize); 107 int log = ilog2(pgsize);
@@ -135,7 +169,8 @@ struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
135 goto get_dma_mr_exit0; 169 goto get_dma_mr_exit0;
136 } 170 }
137 171
138 ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE, 172 ret = ehca_reg_maxmr(shca, e_maxmr,
173 (void *)ehca_map_vaddr((void *)KERNELBASE),
139 mr_access_flags, e_pd, 174 mr_access_flags, e_pd,
140 &e_maxmr->ib.ib_mr.lkey, 175 &e_maxmr->ib.ib_mr.lkey,
141 &e_maxmr->ib.ib_mr.rkey); 176 &e_maxmr->ib.ib_mr.rkey);
@@ -251,7 +286,7 @@ struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
251 286
252 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags, 287 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
253 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 288 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
254 &e_mr->ib.ib_mr.rkey); 289 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
255 if (ret) { 290 if (ret) {
256 ib_mr = ERR_PTR(ret); 291 ib_mr = ERR_PTR(ret);
257 goto reg_phys_mr_exit1; 292 goto reg_phys_mr_exit1;
@@ -370,7 +405,7 @@ reg_user_mr_fallback:
370 405
371 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags, 406 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
372 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey, 407 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
373 &e_mr->ib.ib_mr.rkey); 408 &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 409 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
375 ehca_warn(pd->device, "failed to register mr " 410 ehca_warn(pd->device, "failed to register mr "
376 "with hwpage_size=%llx", hwpage_size); 411 "with hwpage_size=%llx", hwpage_size);
@@ -794,7 +829,7 @@ struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
794 ret = ehca_reg_mr(shca, e_fmr, NULL, 829 ret = ehca_reg_mr(shca, e_fmr, NULL,
795 fmr_attr->max_pages * (1 << fmr_attr->page_shift), 830 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
796 mr_access_flags, e_pd, &pginfo, 831 mr_access_flags, e_pd, &pginfo,
797 &tmp_lkey, &tmp_rkey); 832 &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
798 if (ret) { 833 if (ret) {
799 ib_fmr = ERR_PTR(ret); 834 ib_fmr = ERR_PTR(ret);
800 goto alloc_fmr_exit1; 835 goto alloc_fmr_exit1;
@@ -983,6 +1018,10 @@ free_fmr_exit0:
983 1018
984/*----------------------------------------------------------------------*/ 1019/*----------------------------------------------------------------------*/
985 1020
1021static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
1022 struct ehca_mr *e_mr,
1023 struct ehca_mr_pginfo *pginfo);
1024
986int ehca_reg_mr(struct ehca_shca *shca, 1025int ehca_reg_mr(struct ehca_shca *shca,
987 struct ehca_mr *e_mr, 1026 struct ehca_mr *e_mr,
988 u64 *iova_start, 1027 u64 *iova_start,
@@ -991,7 +1030,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
991 struct ehca_pd *e_pd, 1030 struct ehca_pd *e_pd,
992 struct ehca_mr_pginfo *pginfo, 1031 struct ehca_mr_pginfo *pginfo,
993 u32 *lkey, /*OUT*/ 1032 u32 *lkey, /*OUT*/
994 u32 *rkey) /*OUT*/ 1033 u32 *rkey, /*OUT*/
1034 enum ehca_reg_type reg_type)
995{ 1035{
996 int ret; 1036 int ret;
997 u64 h_ret; 1037 u64 h_ret;
@@ -1015,7 +1055,13 @@ int ehca_reg_mr(struct ehca_shca *shca,
1015 1055
1016 e_mr->ipz_mr_handle = hipzout.handle; 1056 e_mr->ipz_mr_handle = hipzout.handle;
1017 1057
1018 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo); 1058 if (reg_type == EHCA_REG_BUSMAP_MR)
1059 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
1060 else if (reg_type == EHCA_REG_MR)
1061 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
1062 else
1063 ret = -EINVAL;
1064
1019 if (ret) 1065 if (ret)
1020 goto ehca_reg_mr_exit1; 1066 goto ehca_reg_mr_exit1;
1021 1067
@@ -1316,7 +1362,7 @@ int ehca_rereg_mr(struct ehca_shca *shca,
1316 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt; 1362 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1317 1363
1318 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl, 1364 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1319 e_pd, pginfo, lkey, rkey); 1365 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1320 if (ret) { 1366 if (ret) {
1321 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr; 1367 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1322 memcpy(&e_mr->flags, &(save_mr.flags), 1368 memcpy(&e_mr->flags, &(save_mr.flags),
@@ -1409,7 +1455,7 @@ int ehca_unmap_one_fmr(struct ehca_shca *shca,
1409 ret = ehca_reg_mr(shca, e_fmr, NULL, 1455 ret = ehca_reg_mr(shca, e_fmr, NULL,
1410 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size), 1456 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1411 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, 1457 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1412 &tmp_rkey); 1458 &tmp_rkey, EHCA_REG_MR);
1413 if (ret) { 1459 if (ret) {
1414 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr; 1460 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1415 memcpy(&e_fmr->flags, &(save_mr.flags), 1461 memcpy(&e_fmr->flags, &(save_mr.flags),
@@ -1478,6 +1524,90 @@ ehca_reg_smr_exit0:
1478} /* end ehca_reg_smr() */ 1524} /* end ehca_reg_smr() */
1479 1525
1480/*----------------------------------------------------------------------*/ 1526/*----------------------------------------------------------------------*/
1527static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1528{
1529 unsigned long ret = idx;
1530 ret |= dir << EHCA_DIR_INDEX_SHIFT;
1531 ret |= top << EHCA_TOP_INDEX_SHIFT;
1532 return abs_to_virt(ret << SECTION_SIZE_BITS);
1533}
1534
1535#define ehca_bmap_valid(entry) \
1536 ((u64)entry != (u64)EHCA_INVAL_ADDR)
1537
1538static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1539 struct ehca_shca *shca, struct ehca_mr *mr,
1540 struct ehca_mr_pginfo *pginfo)
1541{
1542 u64 h_ret = 0;
1543 unsigned long page = 0;
1544 u64 rpage = virt_to_abs(kpage);
1545 int page_count;
1546
1547 void *sectbase = ehca_calc_sectbase(top, dir, idx);
1548 if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1549 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1550 "hwpage_size does not fit to "
1551 "section start address");
1552 }
1553 page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1554
1555 while (page < page_count) {
1556 u64 rnum;
1557 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1558 rnum++) {
1559 void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1560 kpage[rnum] = virt_to_abs(pg);
1561 }
1562
1563 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1564 ehca_encode_hwpage_size(pginfo->hwpage_size),
1565 0, rpage, rnum);
1566
1567 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1568 ehca_err(&shca->ib_device, "register_rpage_mr failed");
1569 return h_ret;
1570 }
1571 }
1572 return h_ret;
1573}
1574
1575static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1576 struct ehca_shca *shca, struct ehca_mr *mr,
1577 struct ehca_mr_pginfo *pginfo)
1578{
1579 u64 hret = H_SUCCESS;
1580 int idx;
1581
1582 for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1583 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1584 continue;
1585
1586 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1587 pginfo);
1588 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1589 return hret;
1590 }
1591 return hret;
1592}
1593
1594static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1595 struct ehca_mr *mr,
1596 struct ehca_mr_pginfo *pginfo)
1597{
1598 u64 hret = H_SUCCESS;
1599 int dir;
1600
1601 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1602 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1603 continue;
1604
1605 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1606 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1607 return hret;
1608 }
1609 return hret;
1610}
1481 1611
1482/* register internal max-MR to internal SHCA */ 1612/* register internal max-MR to internal SHCA */
1483int ehca_reg_internal_maxmr( 1613int ehca_reg_internal_maxmr(
@@ -1495,6 +1625,11 @@ int ehca_reg_internal_maxmr(
1495 u32 num_hwpages; 1625 u32 num_hwpages;
1496 u64 hw_pgsize; 1626 u64 hw_pgsize;
1497 1627
1628 if (!ehca_bmap) {
1629 ret = -EFAULT;
1630 goto ehca_reg_internal_maxmr_exit0;
1631 }
1632
1498 e_mr = ehca_mr_new(); 1633 e_mr = ehca_mr_new();
1499 if (!e_mr) { 1634 if (!e_mr) {
1500 ehca_err(&shca->ib_device, "out of memory"); 1635 ehca_err(&shca->ib_device, "out of memory");
@@ -1504,8 +1639,8 @@ int ehca_reg_internal_maxmr(
1504 e_mr->flags |= EHCA_MR_FLAG_MAXMR; 1639 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1505 1640
1506 /* register internal max-MR on HCA */ 1641 /* register internal max-MR on HCA */
1507 size_maxmr = (u64)high_memory - PAGE_OFFSET; 1642 size_maxmr = ehca_mr_len;
1508 iova_start = (u64 *)KERNELBASE; 1643 iova_start = (u64 *)ehca_map_vaddr((void *)KERNELBASE);
1509 ib_pbuf.addr = 0; 1644 ib_pbuf.addr = 0;
1510 ib_pbuf.size = size_maxmr; 1645 ib_pbuf.size = size_maxmr;
1511 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr, 1646 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
@@ -1524,7 +1659,7 @@ int ehca_reg_internal_maxmr(
1524 1659
1525 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd, 1660 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1526 &pginfo, &e_mr->ib.ib_mr.lkey, 1661 &pginfo, &e_mr->ib.ib_mr.lkey,
1527 &e_mr->ib.ib_mr.rkey); 1662 &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1528 if (ret) { 1663 if (ret) {
1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1664 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1530 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " 1665 "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
@@ -2077,8 +2212,8 @@ int ehca_mr_is_maxmr(u64 size,
2077 u64 *iova_start) 2212 u64 *iova_start)
2078{ 2213{
2079 /* a MR is treated as max-MR only if it fits following: */ 2214 /* a MR is treated as max-MR only if it fits following: */
2080 if ((size == ((u64)high_memory - PAGE_OFFSET)) && 2215 if ((size == ehca_mr_len) &&
2081 (iova_start == (void *)KERNELBASE)) { 2216 (iova_start == (void *)ehca_map_vaddr((void *)KERNELBASE))) {
2082 ehca_gen_dbg("this is a max-MR"); 2217 ehca_gen_dbg("this is a max-MR");
2083 return 1; 2218 return 1;
2084 } else 2219 } else
@@ -2184,3 +2319,350 @@ void ehca_cleanup_mrmw_cache(void)
2184 if (mw_cache) 2319 if (mw_cache)
2185 kmem_cache_destroy(mw_cache); 2320 kmem_cache_destroy(mw_cache);
2186} 2321}
2322
2323static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
2324 int dir)
2325{
2326 if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
2327 ehca_top_bmap->dir[dir] =
2328 kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
2329 if (!ehca_top_bmap->dir[dir])
2330 return -ENOMEM;
2331 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2332 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
2333 }
2334 return 0;
2335}
2336
2337static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
2338{
2339 if (!ehca_bmap_valid(ehca_bmap->top[top])) {
2340 ehca_bmap->top[top] =
2341 kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
2342 if (!ehca_bmap->top[top])
2343 return -ENOMEM;
2344 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2345 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
2346 }
2347 return ehca_init_top_bmap(ehca_bmap->top[top], dir);
2348}
2349
2350static inline int ehca_calc_index(unsigned long i, unsigned long s)
2351{
2352 return (i >> s) & EHCA_INDEX_MASK;
2353}
2354
2355void ehca_destroy_busmap(void)
2356{
2357 int top, dir;
2358
2359 if (!ehca_bmap)
2360 return;
2361
2362 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2363 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2364 continue;
2365 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
2366 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2367 continue;
2368
2369 kfree(ehca_bmap->top[top]->dir[dir]);
2370 }
2371
2372 kfree(ehca_bmap->top[top]);
2373 }
2374
2375 kfree(ehca_bmap);
2376 ehca_bmap = NULL;
2377}
2378
2379static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
2380{
2381 unsigned long i, start_section, end_section;
2382 int top, dir, idx;
2383
2384 if (!nr_pages)
2385 return 0;
2386
2387 if (!ehca_bmap) {
2388 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
2389 if (!ehca_bmap)
2390 return -ENOMEM;
2391 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
2392 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
2393 }
2394
2395 start_section = phys_to_abs(pfn * PAGE_SIZE) / EHCA_SECTSIZE;
2396 end_section = phys_to_abs((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
2397 for (i = start_section; i < end_section; i++) {
2398 int ret;
2399 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
2400 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
2401 idx = i & EHCA_INDEX_MASK;
2402
2403 ret = ehca_init_bmap(ehca_bmap, top, dir);
2404 if (ret) {
2405 ehca_destroy_busmap();
2406 return ret;
2407 }
2408 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
2409 ehca_mr_len += EHCA_SECTSIZE;
2410 }
2411 return 0;
2412}
2413
2414static int ehca_is_hugepage(unsigned long pfn)
2415{
2416 int page_order;
2417
2418 if (pfn & EHCA_HUGEPAGE_PFN_MASK)
2419 return 0;
2420
2421 page_order = compound_order(pfn_to_page(pfn));
2422 if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
2423 return 0;
2424
2425 return 1;
2426}
2427
2428static int ehca_create_busmap_callback(unsigned long initial_pfn,
2429 unsigned long total_nr_pages, void *arg)
2430{
2431 int ret;
2432 unsigned long pfn, start_pfn, end_pfn, nr_pages;
2433
2434 if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
2435 return ehca_update_busmap(initial_pfn, total_nr_pages);
2436
2437 /* Given chunk is >= 16GB -> check for hugepages */
2438 start_pfn = initial_pfn;
2439 end_pfn = initial_pfn + total_nr_pages;
2440 pfn = start_pfn;
2441
2442 while (pfn < end_pfn) {
2443 if (ehca_is_hugepage(pfn)) {
2444 /* Add mem found in front of the hugepage */
2445 nr_pages = pfn - start_pfn;
2446 ret = ehca_update_busmap(start_pfn, nr_pages);
2447 if (ret)
2448 return ret;
2449 /* Skip the hugepage */
2450 pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
2451 start_pfn = pfn;
2452 } else
2453 pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2454 }
2455
2456 /* Add mem found behind the hugepage(s) */
2457 nr_pages = pfn - start_pfn;
2458 return ehca_update_busmap(start_pfn, nr_pages);
2459}
2460
2461int ehca_create_busmap(void)
2462{
2463 int ret;
2464
2465 ehca_mr_len = 0;
2466 ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2467 ehca_create_busmap_callback);
2468 return ret;
2469}
2470
2471static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2472 struct ehca_mr *e_mr,
2473 struct ehca_mr_pginfo *pginfo)
2474{
2475 int top;
2476 u64 hret, *kpage;
2477
2478 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2479 if (!kpage) {
2480 ehca_err(&shca->ib_device, "kpage alloc failed");
2481 return -ENOMEM;
2482 }
2483 for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2484 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2485 continue;
2486 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2487 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2488 break;
2489 }
2490
2491 ehca_free_fw_ctrlblock(kpage);
2492
2493 if (hret == H_SUCCESS)
2494 return 0; /* Everything is fine */
2495 else {
2496 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2497 "h_ret=%lli e_mr=%p top=%x lkey=%x "
2498 "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2499 e_mr->ib.ib_mr.lkey,
2500 shca->ipz_hca_handle.handle,
2501 e_mr->ipz_mr_handle.handle);
2502 return ehca2ib_return_code(hret);
2503 }
2504}
2505
2506static u64 ehca_map_vaddr(void *caddr)
2507{
2508 int top, dir, idx;
2509 unsigned long abs_addr, offset;
2510 u64 entry;
2511
2512 if (!ehca_bmap)
2513 return EHCA_INVAL_ADDR;
2514
2515 abs_addr = virt_to_abs(caddr);
2516 top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2517 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2518 return EHCA_INVAL_ADDR;
2519
2520 dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2521 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2522 return EHCA_INVAL_ADDR;
2523
2524 idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2525
2526 entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2527 if (ehca_bmap_valid(entry)) {
2528 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2529 return entry | offset;
2530 } else
2531 return EHCA_INVAL_ADDR;
2532}
2533
2534static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2535{
2536 return dma_addr == EHCA_INVAL_ADDR;
2537}
2538
2539static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2540 size_t size, enum dma_data_direction direction)
2541{
2542 if (cpu_addr)
2543 return ehca_map_vaddr(cpu_addr);
2544 else
2545 return EHCA_INVAL_ADDR;
2546}
2547
2548static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2549 enum dma_data_direction direction)
2550{
2551 /* This is only a stub; nothing to be done here */
2552}
2553
2554static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2555 unsigned long offset, size_t size,
2556 enum dma_data_direction direction)
2557{
2558 u64 addr;
2559
2560 if (offset + size > PAGE_SIZE)
2561 return EHCA_INVAL_ADDR;
2562
2563 addr = ehca_map_vaddr(page_address(page));
2564 if (!ehca_dma_mapping_error(dev, addr))
2565 addr += offset;
2566
2567 return addr;
2568}
2569
2570static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2571 enum dma_data_direction direction)
2572{
2573 /* This is only a stub; nothing to be done here */
2574}
2575
2576static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2577 int nents, enum dma_data_direction direction)
2578{
2579 struct scatterlist *sg;
2580 int i;
2581
2582 for_each_sg(sgl, sg, nents, i) {
2583 u64 addr;
2584 addr = ehca_map_vaddr(sg_virt(sg));
2585 if (ehca_dma_mapping_error(dev, addr))
2586 return 0;
2587
2588 sg->dma_address = addr;
2589 sg->dma_length = sg->length;
2590 }
2591 return nents;
2592}
2593
2594static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2595 int nents, enum dma_data_direction direction)
2596{
2597 /* This is only a stub; nothing to be done here */
2598}
2599
2600static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg)
2601{
2602 return sg->dma_address;
2603}
2604
2605static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg)
2606{
2607 return sg->length;
2608}
2609
2610static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2611 size_t size,
2612 enum dma_data_direction dir)
2613{
2614 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2615}
2616
2617static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2618 size_t size,
2619 enum dma_data_direction dir)
2620{
2621 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2622}
2623
2624static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2625 u64 *dma_handle, gfp_t flag)
2626{
2627 struct page *p;
2628 void *addr = NULL;
2629 u64 dma_addr;
2630
2631 p = alloc_pages(flag, get_order(size));
2632 if (p) {
2633 addr = page_address(p);
2634 dma_addr = ehca_map_vaddr(addr);
2635 if (ehca_dma_mapping_error(dev, dma_addr)) {
2636 free_pages((unsigned long)addr, get_order(size));
2637 return NULL;
2638 }
2639 if (dma_handle)
2640 *dma_handle = dma_addr;
2641 return addr;
2642 }
2643 return NULL;
2644}
2645
2646static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2647 void *cpu_addr, u64 dma_handle)
2648{
2649 if (cpu_addr && size)
2650 free_pages((unsigned long)cpu_addr, get_order(size));
2651}
2652
2653
2654struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2655 .mapping_error = ehca_dma_mapping_error,
2656 .map_single = ehca_dma_map_single,
2657 .unmap_single = ehca_dma_unmap_single,
2658 .map_page = ehca_dma_map_page,
2659 .unmap_page = ehca_dma_unmap_page,
2660 .map_sg = ehca_dma_map_sg,
2661 .unmap_sg = ehca_dma_unmap_sg,
2662 .dma_address = ehca_dma_address,
2663 .dma_len = ehca_dma_len,
2664 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu,
2665 .sync_single_for_device = ehca_dma_sync_single_for_device,
2666 .alloc_coherent = ehca_dma_alloc_coherent,
2667 .free_coherent = ehca_dma_free_coherent,
2668};
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/infiniband/hw/ehca/ehca_mrmw.h
index bc8f4e31c123..50d8b51306dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.h
@@ -42,6 +42,11 @@
42#ifndef _EHCA_MRMW_H_ 42#ifndef _EHCA_MRMW_H_
43#define _EHCA_MRMW_H_ 43#define _EHCA_MRMW_H_
44 44
45enum ehca_reg_type {
46 EHCA_REG_MR,
47 EHCA_REG_BUSMAP_MR
48};
49
45int ehca_reg_mr(struct ehca_shca *shca, 50int ehca_reg_mr(struct ehca_shca *shca,
46 struct ehca_mr *e_mr, 51 struct ehca_mr *e_mr,
47 u64 *iova_start, 52 u64 *iova_start,
@@ -50,7 +55,8 @@ int ehca_reg_mr(struct ehca_shca *shca,
50 struct ehca_pd *e_pd, 55 struct ehca_pd *e_pd,
51 struct ehca_mr_pginfo *pginfo, 56 struct ehca_mr_pginfo *pginfo,
52 u32 *lkey, 57 u32 *lkey,
53 u32 *rkey); 58 u32 *rkey,
59 enum ehca_reg_type reg_type);
54 60
55int ehca_reg_mr_rpages(struct ehca_shca *shca, 61int ehca_reg_mr_rpages(struct ehca_shca *shca,
56 struct ehca_mr *e_mr, 62 struct ehca_mr *e_mr,
@@ -118,4 +124,9 @@ void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
118 124
119void ehca_mr_deletenew(struct ehca_mr *mr); 125void ehca_mr_deletenew(struct ehca_mr *mr);
120 126
127int ehca_create_busmap(void);
128
129void ehca_destroy_busmap(void);
130
131extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
121#endif /*_EHCA_MRMW_H_*/ 132#endif /*_EHCA_MRMW_H_*/
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index d606edf10858..065b20899876 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -352,10 +352,14 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
352 352
353 BUG_ON(!mtts); 353 BUG_ON(!mtts);
354 354
355 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
356 list_len * sizeof (u64), DMA_TO_DEVICE);
357
355 for (i = 0; i < list_len; ++i) 358 for (i = 0; i < list_len; ++i)
356 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT); 359 mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
357 360
358 dma_sync_single(&dev->pdev->dev, dma_handle, list_len * sizeof (u64), DMA_TO_DEVICE); 361 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
362 list_len * sizeof (u64), DMA_TO_DEVICE);
359} 363}
360 364
361int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, 365int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
@@ -803,12 +807,15 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
803 807
804 wmb(); 808 wmb();
805 809
810 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
811 list_len * sizeof(u64), DMA_TO_DEVICE);
812
806 for (i = 0; i < list_len; ++i) 813 for (i = 0; i < list_len; ++i)
807 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] | 814 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
808 MTHCA_MTT_FLAG_PRESENT); 815 MTHCA_MTT_FLAG_PRESENT);
809 816
810 dma_sync_single(&dev->pdev->dev, fmr->mem.arbel.dma_handle, 817 dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
811 list_len * sizeof(u64), DMA_TO_DEVICE); 818 list_len * sizeof(u64), DMA_TO_DEVICE);
812 819
813 fmr->mem.arbel.mpt->key = cpu_to_be32(key); 820 fmr->mem.arbel.mpt->key = cpu_to_be32(key);
814 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key); 821 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 11c7d6642014..114b802771ad 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -472,6 +472,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
472 472
473static void nes_retrans_expired(struct nes_cm_node *cm_node) 473static void nes_retrans_expired(struct nes_cm_node *cm_node)
474{ 474{
475 struct iw_cm_id *cm_id = cm_node->cm_id;
475 switch (cm_node->state) { 476 switch (cm_node->state) {
476 case NES_CM_STATE_SYN_RCVD: 477 case NES_CM_STATE_SYN_RCVD:
477 case NES_CM_STATE_CLOSING: 478 case NES_CM_STATE_CLOSING:
@@ -479,7 +480,9 @@ static void nes_retrans_expired(struct nes_cm_node *cm_node)
479 break; 480 break;
480 case NES_CM_STATE_LAST_ACK: 481 case NES_CM_STATE_LAST_ACK:
481 case NES_CM_STATE_FIN_WAIT1: 482 case NES_CM_STATE_FIN_WAIT1:
482 case NES_CM_STATE_MPAREJ_RCVD: 483 if (cm_node->cm_id)
484 cm_id->rem_ref(cm_id);
485 cm_node->state = NES_CM_STATE_CLOSED;
483 send_reset(cm_node, NULL); 486 send_reset(cm_node, NULL);
484 break; 487 break;
485 default: 488 default:
@@ -1406,6 +1409,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1406 case NES_CM_STATE_CLOSED: 1409 case NES_CM_STATE_CLOSED:
1407 drop_packet(skb); 1410 drop_packet(skb);
1408 break; 1411 break;
1412 case NES_CM_STATE_FIN_WAIT1:
1409 case NES_CM_STATE_LAST_ACK: 1413 case NES_CM_STATE_LAST_ACK:
1410 cm_node->cm_id->rem_ref(cm_node->cm_id); 1414 cm_node->cm_id->rem_ref(cm_node->cm_id);
1411 case NES_CM_STATE_TIME_WAIT: 1415 case NES_CM_STATE_TIME_WAIT:
@@ -1413,8 +1417,6 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
1413 rem_ref_cm_node(cm_node->cm_core, cm_node); 1417 rem_ref_cm_node(cm_node->cm_core, cm_node);
1414 drop_packet(skb); 1418 drop_packet(skb);
1415 break; 1419 break;
1416 case NES_CM_STATE_FIN_WAIT1:
1417 nes_debug(NES_DBG_CM, "Bad state %s[%u]\n", __func__, __LINE__);
1418 default: 1420 default:
1419 drop_packet(skb); 1421 drop_packet(skb);
1420 break; 1422 break;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d5cfd8f380..21e0fd336cf7 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -654,7 +654,7 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
654 default: 654 default:
655 props->max_qp_rd_atom = 0; 655 props->max_qp_rd_atom = 0;
656 } 656 }
657 props->max_qp_init_rd_atom = props->max_qp_wr; 657 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
658 props->atomic_cap = IB_ATOMIC_NONE; 658 props->atomic_cap = IB_ATOMIC_NONE;
659 props->max_map_per_fmr = 1; 659 props->max_map_per_fmr = 1;
660 660
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 5887e4764d22..f96948be0a44 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
399 if (!mtts) 399 if (!mtts)
400 return -ENOMEM; 400 return -ENOMEM;
401 401
402 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
403 npages * sizeof (u64), DMA_TO_DEVICE);
404
402 for (i = 0; i < npages; ++i) 405 for (i = 0; i < npages; ++i)
403 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 406 mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
404 407
405 dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, 408 dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
406 npages * sizeof (u64), DMA_TO_DEVICE); 409 npages * sizeof (u64), DMA_TO_DEVICE);
407 410
408 return 0; 411 return 0;
409} 412}
@@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
547 /* Make sure MPT status is visible before writing MTT entries */ 550 /* Make sure MPT status is visible before writing MTT entries */
548 wmb(); 551 wmb();
549 552
553 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
554 npages * sizeof(u64), DMA_TO_DEVICE);
555
550 for (i = 0; i < npages; ++i) 556 for (i = 0; i < npages; ++i)
551 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); 557 fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
552 558
553 dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, 559 dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle,
554 npages * sizeof(u64), DMA_TO_DEVICE); 560 npages * sizeof(u64), DMA_TO_DEVICE);
555 561
556 fmr->mpt->key = cpu_to_be32(key); 562 fmr->mpt->key = cpu_to_be32(key);
557 fmr->mpt->lkey = cpu_to_be32(key); 563 fmr->mpt->lkey = cpu_to_be32(key);