summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
diff options
context:
space:
mode:
authorPeter Daifuku <pdaifuku@nvidia.com>2017-10-27 18:46:53 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-03 02:27:12 -0400
commitde399ccb0019513a5f9e8f2bcadb02486f99bc80 (patch)
treef3bc9e054f501fd4c9cf8c20b614ae160c8a6dd1 /drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
parent566223689538531783a86091f052f70a6ebdef29 (diff)
gpu: nvgpu: fix patch buf count update for vidmem
gr_gk20a_ctx_patch_write_begin() updates the patch buffer data_count when the associated graphics context memory buffer has been CPU-mapped; it was doing so by looking for a non-null cpu_va. However, if the graphics context has been allocated from vidmem, cpu_va is always 0, so we can't tell if nvgpu_mem_begin() was called for the context buffer or not. Instead: - add a cpu_accessible flag to the nvgpu_mem struct and set it in nvgpu_mem_begin() - return the value of that flag in nvgpu_mem_cpu_accessible() - gr_gk20a_ctx_patch_write_begin() now calls this new function instead of checking cpu_va. Bug 2012077 JIRA ESRM-74 Change-Id: I8401699f30b4ae7154111721c25c7ec3ff95d329 Signed-off-by: Peter Daifuku <pdaifuku@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1587293 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/nvgpu_mem.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 1dbbd1a0..2bf26602 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -60,6 +60,14 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
60{ 60{
61 void *cpu_va; 61 void *cpu_va;
62 62
63 if (WARN_ON(mem->cpu_accessible)) {
64 nvgpu_warn(g, "nested");
65 return -EBUSY;
66 }
67
68 /* flag that the intent is to allow CPU access to the memory. */
69 mem->cpu_accessible = true;
70
63 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 71 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
64 return 0; 72 return 0;
65 73
@@ -71,17 +79,14 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
71 if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) 79 if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
72 return 0; 80 return 0;
73 81
74 if (WARN_ON(mem->cpu_va)) {
75 nvgpu_warn(g, "nested");
76 return -EBUSY;
77 }
78
79 cpu_va = vmap(mem->priv.pages, 82 cpu_va = vmap(mem->priv.pages,
80 PAGE_ALIGN(mem->size) >> PAGE_SHIFT, 83 PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
81 0, pgprot_writecombine(PAGE_KERNEL)); 84 0, pgprot_writecombine(PAGE_KERNEL));
82 85
83 if (WARN_ON(!cpu_va)) 86 if (WARN_ON(!cpu_va)) {
87 mem->cpu_accessible = false;
84 return -ENOMEM; 88 return -ENOMEM;
89 }
85 90
86 mem->cpu_va = cpu_va; 91 mem->cpu_va = cpu_va;
87 return 0; 92 return 0;
@@ -89,6 +94,8 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
89 94
90void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) 95void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
91{ 96{
97 mem->cpu_accessible = false;
98
92 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 99 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
93 return; 100 return;
94 101