diff options
author | David Gilhooley <dgilhooley@nvidia.com> | 2017-11-13 00:38:58 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-11-14 14:15:58 -0500 |
commit | b22c5911dd2d6f1c4bc218f020228ec23e7e0802 (patch) | |
tree | df6dca2f0eb4bb1745c44cc9358383e023f3a5a3 | |
parent | 90aeab9dee07a63e4bac6d92646dfd80e65d2edd (diff) |
gpu: nvgpu: Pass DMA allocation flags correctly
There are flags that need to be passed to both dma_alloc
and sg_alloc together. Update nvgpu_dma_alloc_flags_sys to always
pass flags.
Bug 1930032
Change-Id: I10c4c07d7b518d9ab6c48dd7a0758c68750d02a6
Signed-off-by: David Gilhooley <dgilhooley@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1596848
Reviewed-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/dma.c | 54 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/linux/dma.h | 8 |
2 files changed, 31 insertions, 31 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index 5bac42e3..22f2cefb 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c | |||
@@ -211,6 +211,8 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
211 | struct device *d = dev_from_gk20a(g); | 211 | struct device *d = dev_from_gk20a(g); |
212 | int err; | 212 | int err; |
213 | dma_addr_t iova; | 213 | dma_addr_t iova; |
214 | DEFINE_DMA_ATTRS(dma_attrs); | ||
215 | void *alloc_ret; | ||
214 | 216 | ||
215 | /* | 217 | /* |
216 | * Before the debug print so we see this in the total. But during | 218 | * Before the debug print so we see this in the total. But during |
@@ -227,37 +229,22 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
227 | mem->size = size; | 229 | mem->size = size; |
228 | size = PAGE_ALIGN(size); | 230 | size = PAGE_ALIGN(size); |
229 | 231 | ||
230 | if (flags) { | 232 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); |
231 | DEFINE_DMA_ATTRS(dma_attrs); | ||
232 | 233 | ||
233 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); | 234 | alloc_ret = dma_alloc_attrs(d, size, &iova, GFP_KERNEL, |
234 | |||
235 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { | ||
236 | mem->priv.pages = dma_alloc_attrs(d, | ||
237 | size, &iova, GFP_KERNEL, | ||
238 | __DMA_ATTR(dma_attrs)); | ||
239 | if (!mem->priv.pages) | ||
240 | return -ENOMEM; | ||
241 | } else { | ||
242 | mem->cpu_va = dma_alloc_attrs(d, | ||
243 | size, &iova, GFP_KERNEL, | ||
244 | __DMA_ATTR(dma_attrs)); | 235 | __DMA_ATTR(dma_attrs)); |
245 | if (!mem->cpu_va) | 236 | if (!alloc_ret) |
246 | return -ENOMEM; | 237 | return -ENOMEM; |
247 | } | ||
248 | } else { | ||
249 | mem->cpu_va = dma_alloc_coherent(d, size, &iova, GFP_KERNEL); | ||
250 | if (!mem->cpu_va) | ||
251 | return -ENOMEM; | ||
252 | } | ||
253 | 238 | ||
254 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) | 239 | if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { |
240 | mem->priv.pages = alloc_ret; | ||
255 | err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt, | 241 | err = nvgpu_get_sgtable_from_pages(g, &mem->priv.sgt, |
256 | mem->priv.pages, | 242 | mem->priv.pages, |
257 | iova, size); | 243 | iova, size); |
258 | else { | 244 | } else { |
259 | err = nvgpu_get_sgtable(g, &mem->priv.sgt, mem->cpu_va, | 245 | mem->cpu_va = alloc_ret; |
260 | iova, size); | 246 | err = nvgpu_get_sgtable_attrs(g, &mem->priv.sgt, mem->cpu_va, |
247 | iova, size, flags); | ||
261 | memset(mem->cpu_va, 0, size); | 248 | memset(mem->cpu_va, 0, size); |
262 | } | 249 | } |
263 | if (err) | 250 | if (err) |
@@ -273,7 +260,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | |||
273 | 260 | ||
274 | fail_free: | 261 | fail_free: |
275 | g->dma_memory_used -= mem->aligned_size; | 262 | g->dma_memory_used -= mem->aligned_size; |
276 | dma_free_coherent(d, size, mem->cpu_va, iova); | 263 | dma_free_attrs(d, size, alloc_ret, iova, __DMA_ATTR(dma_attrs)); |
277 | mem->cpu_va = NULL; | 264 | mem->cpu_va = NULL; |
278 | mem->priv.sgt = NULL; | 265 | mem->priv.sgt = NULL; |
279 | mem->size = 0; | 266 | mem->size = 0; |
@@ -571,11 +558,12 @@ void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) | |||
571 | nvgpu_dma_free(vm->mm->g, mem); | 558 | nvgpu_dma_free(vm->mm->g, mem); |
572 | } | 559 | } |
573 | 560 | ||
574 | int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | 561 | int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt, |
575 | void *cpuva, u64 iova, size_t size) | 562 | void *cpuva, u64 iova, size_t size, unsigned long flags) |
576 | { | 563 | { |
577 | int err = 0; | 564 | int err = 0; |
578 | struct sg_table *tbl; | 565 | struct sg_table *tbl; |
566 | DEFINE_DMA_ATTRS(dma_attrs); | ||
579 | 567 | ||
580 | tbl = nvgpu_kzalloc(g, sizeof(struct sg_table)); | 568 | tbl = nvgpu_kzalloc(g, sizeof(struct sg_table)); |
581 | if (!tbl) { | 569 | if (!tbl) { |
@@ -583,7 +571,9 @@ int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | |||
583 | goto fail; | 571 | goto fail; |
584 | } | 572 | } |
585 | 573 | ||
586 | err = dma_get_sgtable(dev_from_gk20a(g), tbl, cpuva, iova, size); | 574 | nvgpu_dma_flags_to_attrs(&dma_attrs, flags); |
575 | err = dma_get_sgtable_attrs(dev_from_gk20a(g), tbl, cpuva, iova, | ||
576 | size, __DMA_ATTR(dma_attrs)); | ||
587 | if (err) | 577 | if (err) |
588 | goto fail; | 578 | goto fail; |
589 | 579 | ||
@@ -599,6 +589,12 @@ fail: | |||
599 | return err; | 589 | return err; |
600 | } | 590 | } |
601 | 591 | ||
592 | int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | ||
593 | void *cpuva, u64 iova, size_t size) | ||
594 | { | ||
595 | return nvgpu_get_sgtable_attrs(g, sgt, cpuva, iova, size, 0); | ||
596 | } | ||
597 | |||
602 | int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, | 598 | int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, |
603 | struct page **pages, u64 iova, size_t size) | 599 | struct page **pages, u64 iova, size_t size) |
604 | { | 600 | { |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h b/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h index 3960e654..342b278e 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/dma.h | |||
@@ -21,9 +21,13 @@ | |||
21 | * Functions used internally for building the backing SGTs for nvgpu_mems. | 21 | * Functions used internally for building the backing SGTs for nvgpu_mems. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | 24 | |
25 | int nvgpu_get_sgtable_attrs(struct gk20a *g, struct sg_table **sgt, | ||
25 | void *cpuva, u64 iova, | 26 | void *cpuva, u64 iova, |
26 | size_t size); | 27 | size_t size, unsigned long flags); |
28 | |||
29 | int nvgpu_get_sgtable(struct gk20a *g, struct sg_table **sgt, | ||
30 | void *cpuva, u64 iova, size_t size); | ||
27 | 31 | ||
28 | int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, | 32 | int nvgpu_get_sgtable_from_pages(struct gk20a *g, struct sg_table **sgt, |
29 | struct page **pages, u64 iova, | 33 | struct page **pages, u64 iova, |