aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArthur Kepner <akepner@sgi.com>2008-04-29 04:00:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:12 -0400
commitcb9fbc5c37b69ac584e61d449cfd590f5ae1f90d (patch)
tree3079752ba33535a21db08bed1390aca9136fadfe
parent309df0c503c35fbb5a09537fcbb1f4967b9ca489 (diff)
IB: expand ib_umem_get() prototype
Add a new parameter, dmasync, to the ib_umem_get() prototype. Use dmasync = 1 when mapping user-allocated CQs with ib_umem_get(). Signed-off-by: Arthur Kepner <akepner@sgi.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Jes Sorensen <jes@sgi.com> Cc: Randy Dunlap <randy.dunlap@oracle.com> Cc: Roland Dreier <rdreier@cisco.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Grant Grundler <grundler@parisc-linux.org> Cc: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/infiniband/core/umem.c17
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_mrmw.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c3
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/doorbell.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h10
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--include/rdma/ib_umem.h4
-rw-r--r--include/rdma/ib_verbs.h33
15 files changed, 75 insertions, 19 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 4e3128ff73c1..fe78f7d25099 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -38,6 +38,7 @@
38#include <linux/dma-mapping.h> 38#include <linux/dma-mapping.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/hugetlb.h> 40#include <linux/hugetlb.h>
41#include <linux/dma-attrs.h>
41 42
42#include "uverbs.h" 43#include "uverbs.h"
43 44
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
72 * @addr: userspace virtual address to start at 73 * @addr: userspace virtual address to start at
73 * @size: length of region to pin 74 * @size: length of region to pin
74 * @access: IB_ACCESS_xxx flags for memory being pinned 75 * @access: IB_ACCESS_xxx flags for memory being pinned
76 * @dmasync: flush in-flight DMA when the memory region is written
75 */ 77 */
76struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 78struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
77 size_t size, int access) 79 size_t size, int access, int dmasync)
78{ 80{
79 struct ib_umem *umem; 81 struct ib_umem *umem;
80 struct page **page_list; 82 struct page **page_list;
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
87 int ret; 89 int ret;
88 int off; 90 int off;
89 int i; 91 int i;
92 DEFINE_DMA_ATTRS(attrs);
93
94 if (dmasync)
95 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
90 96
91 if (!can_do_mlock()) 97 if (!can_do_mlock())
92 return ERR_PTR(-EPERM); 98 return ERR_PTR(-EPERM);
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
174 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); 180 sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
175 } 181 }
176 182
177 chunk->nmap = ib_dma_map_sg(context->device, 183 chunk->nmap = ib_dma_map_sg_attrs(context->device,
178 &chunk->page_list[0], 184 &chunk->page_list[0],
179 chunk->nents, 185 chunk->nents,
180 DMA_BIDIRECTIONAL); 186 DMA_BIDIRECTIONAL,
187 &attrs);
181 if (chunk->nmap <= 0) { 188 if (chunk->nmap <= 0) {
182 for (i = 0; i < chunk->nents; ++i) 189 for (i = 0; i < chunk->nents; ++i)
183 put_page(sg_page(&chunk->page_list[i])); 190 put_page(sg_page(&chunk->page_list[i]));
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 6af2c0f79a67..2acf9b62cf99 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
452 return ERR_PTR(-ENOMEM); 452 return ERR_PTR(-ENOMEM);
453 c2mr->pd = c2pd; 453 c2mr->pd = c2pd;
454 454
455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 455 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
456 if (IS_ERR(c2mr->umem)) { 456 if (IS_ERR(c2mr->umem)) {
457 err = PTR_ERR(c2mr->umem); 457 err = PTR_ERR(c2mr->umem);
458 kfree(c2mr); 458 kfree(c2mr);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ab4695c1dd56..e343e9e64844 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
602 if (!mhp) 602 if (!mhp)
603 return ERR_PTR(-ENOMEM); 603 return ERR_PTR(-ENOMEM);
604 604
605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); 605 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
606 if (IS_ERR(mhp->umem)) { 606 if (IS_ERR(mhp->umem)) {
607 err = PTR_ERR(mhp->umem); 607 err = PTR_ERR(mhp->umem);
608 kfree(mhp); 608 kfree(mhp);
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c
index 46ae4eb2c4e1..f974367cad40 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
323 } 323 }
324 324
325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length, 325 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
326 mr_access_flags); 326 mr_access_flags, 0);
327 if (IS_ERR(e_mr->umem)) { 327 if (IS_ERR(e_mr->umem)) {
328 ib_mr = (void *)e_mr->umem; 328 ib_mr = (void *)e_mr->umem;
329 goto reg_user_mr_exit1; 329 goto reg_user_mr_exit1;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index db4ba92f79fc..9d343b7c2f3b 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
195 goto bail; 195 goto bail;
196 } 196 }
197 197
198 umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); 198 umem = ib_umem_get(pd->uobject->context, start, length,
199 mr_access_flags, 0);
199 if (IS_ERR(umem)) 200 if (IS_ERR(umem))
200 return (void *) umem; 201 return (void *) umem;
201 202
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5e570bb0bb6f..e3dddfc687f9 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
137 int err; 137 int err;
138 138
139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), 139 *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
140 IB_ACCESS_LOCAL_WRITE); 140 IB_ACCESS_LOCAL_WRITE, 1);
141 if (IS_ERR(*umem)) 141 if (IS_ERR(*umem))
142 return PTR_ERR(*umem); 142 return PTR_ERR(*umem);
143 143
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c
index 8e342cc9baec..8aee4233b388 100644
--- a/drivers/infiniband/hw/mlx4/doorbell.c
+++ b/drivers/infiniband/hw/mlx4/doorbell.c
@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
63 page->user_virt = (virt & PAGE_MASK); 63 page->user_virt = (virt & PAGE_MASK);
64 page->refcnt = 0; 64 page->refcnt = 0;
65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, 65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
66 PAGE_SIZE, 0); 66 PAGE_SIZE, 0, 0);
67 if (IS_ERR(page->umem)) { 67 if (IS_ERR(page->umem)) {
68 err = PTR_ERR(page->umem); 68 err = PTR_ERR(page->umem);
69 kfree(page); 69 kfree(page);
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index fe2c2e94a5f8..68e92485fc76 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
132 if (!mr) 132 if (!mr)
133 return ERR_PTR(-ENOMEM); 133 return ERR_PTR(-ENOMEM);
134 134
135 mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); 135 mr->umem = ib_umem_get(pd->uobject->context, start, length,
136 access_flags, 0);
136 if (IS_ERR(mr->umem)) { 137 if (IS_ERR(mr->umem)) {
137 err = PTR_ERR(mr->umem); 138 err = PTR_ERR(mr->umem);
138 goto err_free; 139 goto err_free;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 80ea8b9e7761..8e02ecfec188 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
482 goto err; 482 goto err;
483 483
484 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 484 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
485 qp->buf_size, 0); 485 qp->buf_size, 0, 0);
486 if (IS_ERR(qp->umem)) { 486 if (IS_ERR(qp->umem)) {
487 err = PTR_ERR(qp->umem); 487 err = PTR_ERR(qp->umem);
488 goto err; 488 goto err;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 204619702f9d..12d6bc6f8007 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
109 } 109 }
110 110
111 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, 111 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
112 buf_size, 0); 112 buf_size, 0, 0);
113 if (IS_ERR(srq->umem)) { 113 if (IS_ERR(srq->umem)) {
114 err = PTR_ERR(srq->umem); 114 err = PTR_ERR(srq->umem);
115 goto err_srq; 115 goto err_srq;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 696e1f302332..2a9f460cf061 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1006 struct mthca_dev *dev = to_mdev(pd->device); 1006 struct mthca_dev *dev = to_mdev(pd->device);
1007 struct ib_umem_chunk *chunk; 1007 struct ib_umem_chunk *chunk;
1008 struct mthca_mr *mr; 1008 struct mthca_mr *mr;
1009 struct mthca_reg_mr ucmd;
1009 u64 *pages; 1010 u64 *pages;
1010 int shift, n, len; 1011 int shift, n, len;
1011 int i, j, k; 1012 int i, j, k;
1012 int err = 0; 1013 int err = 0;
1013 int write_mtt_size; 1014 int write_mtt_size;
1014 1015
1016 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
1017 return ERR_PTR(-EFAULT);
1018
1015 mr = kmalloc(sizeof *mr, GFP_KERNEL); 1019 mr = kmalloc(sizeof *mr, GFP_KERNEL);
1016 if (!mr) 1020 if (!mr)
1017 return ERR_PTR(-ENOMEM); 1021 return ERR_PTR(-ENOMEM);
1018 1022
1019 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); 1023 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
1024 ucmd.mr_attrs & MTHCA_MR_DMASYNC);
1025
1020 if (IS_ERR(mr->umem)) { 1026 if (IS_ERR(mr->umem)) {
1021 err = PTR_ERR(mr->umem); 1027 err = PTR_ERR(mr->umem);
1022 goto err; 1028 goto err;
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
index 02cc0a766f3a..f8cb3b664d37 100644
--- a/drivers/infiniband/hw/mthca/mthca_user.h
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -41,7 +41,7 @@
41 * Increment this value if any changes that break userspace ABI 41 * Increment this value if any changes that break userspace ABI
42 * compatibility are made. 42 * compatibility are made.
43 */ 43 */
44#define MTHCA_UVERBS_ABI_VERSION 1 44#define MTHCA_UVERBS_ABI_VERSION 2
45 45
46/* 46/*
47 * Make sure that all structs defined in this file remain laid out so 47 * Make sure that all structs defined in this file remain laid out so
@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp {
61 __u32 reserved; 61 __u32 reserved;
62}; 62};
63 63
64struct mthca_reg_mr {
65 __u32 mr_attrs;
66#define MTHCA_MR_DMASYNC 0x1
67/* mark the memory region with a DMA attribute that causes
68 * in-flight DMA to be flushed when the region is written to */
69 __u32 reserved;
70};
71
64struct mthca_create_cq { 72struct mthca_create_cq {
65 __u32 lkey; 73 __u32 lkey;
66 __u32 pdn; 74 __u32 pdn;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index ee74f7c7a6da..9ae397a0ff7e 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2377 u8 single_page = 1; 2377 u8 single_page = 1;
2378 u8 stag_key; 2378 u8 stag_key;
2379 2379
2380 region = ib_umem_get(pd->uobject->context, start, length, acc); 2380 region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
2381 if (IS_ERR(region)) { 2381 if (IS_ERR(region)) {
2382 return (struct ib_mr *)region; 2382 return (struct ib_mr *)region;
2383 } 2383 }
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 22298423cf0b..9ee0d2e51b16 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -62,7 +62,7 @@ struct ib_umem_chunk {
62#ifdef CONFIG_INFINIBAND_USER_MEM 62#ifdef CONFIG_INFINIBAND_USER_MEM
63 63
64struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, 64struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
65 size_t size, int access); 65 size_t size, int access, int dmasync);
66void ib_umem_release(struct ib_umem *umem); 66void ib_umem_release(struct ib_umem *umem);
67int ib_umem_page_count(struct ib_umem *umem); 67int ib_umem_page_count(struct ib_umem *umem);
68 68
@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem);
72 72
73static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, 73static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
74 unsigned long addr, size_t size, 74 unsigned long addr, size_t size,
75 int access) { 75 int access, int dmasync) {
76 return ERR_PTR(-EINVAL); 76 return ERR_PTR(-EINVAL);
77} 77}
78static inline void ib_umem_release(struct ib_umem *umem) { } 78static inline void ib_umem_release(struct ib_umem *umem) { }
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 2dcbecce3f61..911a661b7278 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
1542 dma_unmap_single(dev->dma_device, addr, size, direction); 1542 dma_unmap_single(dev->dma_device, addr, size, direction);
1543} 1543}
1544 1544
1545static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
1546 void *cpu_addr, size_t size,
1547 enum dma_data_direction direction,
1548 struct dma_attrs *attrs)
1549{
1550 return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
1551 direction, attrs);
1552}
1553
1554static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
1555 u64 addr, size_t size,
1556 enum dma_data_direction direction,
1557 struct dma_attrs *attrs)
1558{
1559 return dma_unmap_single_attrs(dev->dma_device, addr, size,
1560 direction, attrs);
1561}
1562
1545/** 1563/**
1546 * ib_dma_map_page - Map a physical page to DMA address 1564 * ib_dma_map_page - Map a physical page to DMA address
1547 * @dev: The device for which the dma_addr is to be created 1565 * @dev: The device for which the dma_addr is to be created
@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
1611 dma_unmap_sg(dev->dma_device, sg, nents, direction); 1629 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1612} 1630}
1613 1631
1632static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
1633 struct scatterlist *sg, int nents,
1634 enum dma_data_direction direction,
1635 struct dma_attrs *attrs)
1636{
1637 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1638}
1639
1640static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
1641 struct scatterlist *sg, int nents,
1642 enum dma_data_direction direction,
1643 struct dma_attrs *attrs)
1644{
1645 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
1646}
1614/** 1647/**
1615 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 1648 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1616 * @dev: The device for which the DMA addresses were created 1649 * @dev: The device for which the DMA addresses were created