summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/nvgpu_mem.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c51
1 files changed, 27 insertions, 24 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 69897694..206b83e1 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -34,25 +34,40 @@
34#include "gk20a/gk20a.h" 34#include "gk20a/gk20a.h"
35#include "gk20a/mm_gk20a.h" 35#include "gk20a/mm_gk20a.h"
36 36
37u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
38 u32 sysmem_mask, u32 vidmem_mask)
39{
40 switch (aperture) {
41 case APERTURE_SYSMEM:
42 /* some igpus consider system memory vidmem */
43 return nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)
44 ? sysmem_mask : vidmem_mask;
45 case APERTURE_VIDMEM:
46 /* for dgpus only */
47 return vidmem_mask;
48 case APERTURE_INVALID:
49 WARN_ON("Bad aperture");
50 }
51 return 0;
52}
53
54u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
55 u32 sysmem_mask, u32 vidmem_mask)
56{
57 return __nvgpu_aperture_mask(g, mem->aperture,
58 sysmem_mask, vidmem_mask);
59}
60
37int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) 61int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
38{ 62{
39 void *cpu_va; 63 void *cpu_va;
40 pgprot_t prot = nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ? 64 pgprot_t prot = nvgpu_is_enabled(g, NVGPU_DMA_COHERENT) ? PAGE_KERNEL :
41 PAGE_KERNEL :
42 pgprot_writecombine(PAGE_KERNEL); 65 pgprot_writecombine(PAGE_KERNEL);
43 66
44 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 67 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
45 return 0; 68 return 0;
46 69
47 /* 70 /*
48 * WAR for bug 2040115: we already will always have a coherent vmap()
49 * for all sysmem buffers. The prot settings are left alone since
50 * eventually this should be deleted.
51 */
52 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
53 return 0;
54
55 /*
56 * A CPU mapping is implicitly made for all SYSMEM DMA allocations that 71 * A CPU mapping is implicitly made for all SYSMEM DMA allocations that
57 * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make 72 * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
58 * another CPU mapping. 73 * another CPU mapping.
@@ -82,13 +97,6 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
82 return; 97 return;
83 98
84 /* 99 /*
85 * WAR for bug 2040115: skip this since the map will be taken care of
86 * during the free in the DMA API.
87 */
88 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
89 return;
90
91 /*
92 * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping 100 * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
93 * already made by the DMA API. 101 * already made by the DMA API.
94 */ 102 */
@@ -307,8 +315,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
307 */ 315 */
308u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl) 316u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
309{ 317{
310 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) || 318 if (!nvgpu_iommuable(g))
311 !nvgpu_iommuable(g))
312 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl)); 319 return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl));
313 320
314 if (sg_dma_address(sgl) == 0) 321 if (sg_dma_address(sgl) == 0)
@@ -408,12 +415,8 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
408 415
409 /* 416 /*
410 * Re-use the CPU mapping only if the mapping was made by the DMA API. 417 * Re-use the CPU mapping only if the mapping was made by the DMA API.
411 *
412 * Bug 2040115: the DMA API wrapper makes the mapping that we should
413 * re-use.
414 */ 418 */
415 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) || 419 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
416 nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
417 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page); 420 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
418 421
419 dest->priv.pages = src->priv.pages + start_page; 422 dest->priv.pages = src->priv.pages + start_page;