summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vidmem.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-08-17 17:33:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-13 18:19:23 -0400
commit88d5f6b4154d6803ecf3b0dee7208f9f1f10a793 (patch)
tree207e9ad3de18918aa2cdfbab4d80139a0a30d565 /drivers/gpu/nvgpu/common/mm/vidmem.c
parenta9ce91f910ca730a3abadb9e7491e3504af30d86 (diff)
gpu: nvgpu: Rename vidmem APIs
Rename the VIDMEM APIs to be prefixed by nvgpu_ to ensure consistency and that all the non-static vidmem functions are properly namespaced. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: I9986ee8f2c8f95a4b7c5e2b9607bc1e77933ccfc Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1540707 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vidmem.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 1ba07ca6..c95cedec 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -28,13 +28,13 @@
28#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
29#include "gk20a/mm_gk20a.h" 29#include "gk20a/mm_gk20a.h"
30 30
31void gk20a_vidmem_destroy(struct gk20a *g) 31void nvgpu_vidmem_destroy(struct gk20a *g)
32{ 32{
33 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) 33 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
34 nvgpu_alloc_destroy(&g->mm.vidmem.allocator); 34 nvgpu_alloc_destroy(&g->mm.vidmem.allocator);
35} 35}
36 36
37int gk20a_vidmem_clear_all(struct gk20a *g) 37int nvgpu_vidmem_clear_all(struct gk20a *g)
38{ 38{
39 struct mm_gk20a *mm = &g->mm; 39 struct mm_gk20a *mm = &g->mm;
40 struct gk20a_fence *gk20a_fence_out = NULL; 40 struct gk20a_fence *gk20a_fence_out = NULL;
@@ -106,7 +106,7 @@ int gk20a_vidmem_clear_all(struct gk20a *g)
106 return 0; 106 return 0;
107} 107}
108 108
109int gk20a_init_vidmem(struct mm_gk20a *mm) 109int nvgpu_vidmem_init(struct mm_gk20a *mm)
110{ 110{
111 struct gk20a *g = mm->g; 111 struct gk20a *g = mm->g;
112 size_t size = g->ops.mm.get_vidmem_size ? 112 size_t size = g->ops.mm.get_vidmem_size ?
@@ -157,7 +157,7 @@ int gk20a_init_vidmem(struct mm_gk20a *mm)
157 157
158 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex); 158 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
159 159
160 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); 160 INIT_WORK(&mm->vidmem.clear_mem_worker, nvgpu_vidmem_clear_mem_worker);
161 nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0); 161 nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
162 nvgpu_init_list_node(&mm->vidmem.clear_list_head); 162 nvgpu_init_list_node(&mm->vidmem.clear_list_head);
163 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex); 163 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
@@ -167,7 +167,7 @@ int gk20a_init_vidmem(struct mm_gk20a *mm)
167 return 0; 167 return 0;
168} 168}
169 169
170int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) 170int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
171{ 171{
172 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; 172 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
173 173
@@ -183,7 +183,7 @@ int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
183 return 0; 183 return 0;
184} 184}
185 185
186int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) 186int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
187{ 187{
188 struct gk20a_fence *gk20a_fence_out = NULL; 188 struct gk20a_fence *gk20a_fence_out = NULL;
189 struct gk20a_fence *gk20a_last_fence = NULL; 189 struct gk20a_fence *gk20a_last_fence = NULL;
@@ -194,7 +194,7 @@ int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
194 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 194 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 197 alloc = nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl);
198 198
199 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) { 199 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) {
200 if (gk20a_last_fence) 200 if (gk20a_last_fence)
@@ -243,7 +243,7 @@ int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
243 return err; 243 return err;
244} 244}
245 245
246struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm) 246struct nvgpu_mem *nvgpu_vidmem_get_pending_alloc(struct mm_gk20a *mm)
247{ 247{
248 struct nvgpu_mem *mem = NULL; 248 struct nvgpu_mem *mem = NULL;
249 249