diff options
Diffstat (limited to 'drivers/infiniband/core/umem.c')
-rw-r--r-- | drivers/infiniband/core/umem.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 4e3128ff73c1..fe78f7d25099 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/hugetlb.h> | 40 | #include <linux/hugetlb.h> |
41 | #include <linux/dma-attrs.h> | ||
41 | 42 | ||
42 | #include "uverbs.h" | 43 | #include "uverbs.h" |
43 | 44 | ||
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
72 | * @addr: userspace virtual address to start at | 73 | * @addr: userspace virtual address to start at |
73 | * @size: length of region to pin | 74 | * @size: length of region to pin |
74 | * @access: IB_ACCESS_xxx flags for memory being pinned | 75 | * @access: IB_ACCESS_xxx flags for memory being pinned |
76 | * @dmasync: flush in-flight DMA when the memory region is written | ||
75 | */ | 77 | */ |
76 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | 78 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
77 | size_t size, int access) | 79 | size_t size, int access, int dmasync) |
78 | { | 80 | { |
79 | struct ib_umem *umem; | 81 | struct ib_umem *umem; |
80 | struct page **page_list; | 82 | struct page **page_list; |
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
87 | int ret; | 89 | int ret; |
88 | int off; | 90 | int off; |
89 | int i; | 91 | int i; |
92 | DEFINE_DMA_ATTRS(attrs); | ||
93 | |||
94 | if (dmasync) | ||
95 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
90 | 96 | ||
91 | if (!can_do_mlock()) | 97 | if (!can_do_mlock()) |
92 | return ERR_PTR(-EPERM); | 98 | return ERR_PTR(-EPERM); |
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
174 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); | 180 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); |
175 | } | 181 | } |
176 | 182 | ||
177 | chunk->nmap = ib_dma_map_sg(context->device, | 183 | chunk->nmap = ib_dma_map_sg_attrs(context->device, |
178 | &chunk->page_list[0], | 184 | &chunk->page_list[0], |
179 | chunk->nents, | 185 | chunk->nents, |
180 | DMA_BIDIRECTIONAL); | 186 | DMA_BIDIRECTIONAL, |
187 | &attrs); | ||
181 | if (chunk->nmap <= 0) { | 188 | if (chunk->nmap <= 0) { |
182 | for (i = 0; i < chunk->nents; ++i) | 189 | for (i = 0; i < chunk->nents; ++i) |
183 | put_page(sg_page(&chunk->page_list[i])); | 190 | put_page(sg_page(&chunk->page_list[i])); |