summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-08-17 17:33:46 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-13 18:19:23 -0400
commit88d5f6b4154d6803ecf3b0dee7208f9f1f10a793 (patch)
tree207e9ad3de18918aa2cdfbab4d80139a0a30d565
parenta9ce91f910ca730a3abadb9e7491e3504af30d86 (diff)
gpu: nvgpu: Rename vidmem APIs
Rename the VIDMEM APIs to be prefixed by nvgpu_ to ensure consistency and that all the non-static vidmem functions are properly namespaced. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: I9986ee8f2c8f95a4b7c5e2b9607bc1e77933ccfc Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1540707 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c4
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c6
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c46
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c5
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c2
-rw-r--r--drivers/gpu/nvgpu/common/mm/vidmem.c16
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vidmem.h52
11 files changed, 72 insertions, 71 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 2ed1cc5a..ca657da2 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -343,7 +343,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
343 if (err) 343 if (err)
344 goto fail_kfree; 344 goto fail_kfree;
345 345
346 set_vidmem_page_alloc(mem->priv.sgt->sgl, addr); 346 nvgpu_vidmem_set_page_alloc(mem->priv.sgt->sgl, addr);
347 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0); 347 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0);
348 348
349 mem->aligned_size = size; 349 mem->aligned_size = size;
@@ -535,7 +535,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
535 } else { 535 } else {
536 nvgpu_memset(g, mem, 0, 0, mem->aligned_size); 536 nvgpu_memset(g, mem, 0, 0, mem->aligned_size);
537 nvgpu_free(mem->allocator, 537 nvgpu_free(mem->allocator,
538 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); 538 (u64)nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl));
539 nvgpu_free_sgtable(g, &mem->priv.sgt); 539 nvgpu_free_sgtable(g, &mem->priv.sgt);
540 540
541 mem->size = 0; 541 mem->size = 0;
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
index c8460bd9..7180256a 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl.c
@@ -693,7 +693,7 @@ static int nvgpu_gpu_alloc_vidmem(struct gk20a *g,
693 return -EINVAL; 693 return -EINVAL;
694 } 694 }
695 695
696 fd = gk20a_vidmem_buf_alloc(g, args->in.size); 696 fd = nvgpu_vidmem_buf_alloc(g, args->in.size);
697 if (fd < 0) 697 if (fd < 0)
698 return fd; 698 return fd;
699 699
@@ -715,7 +715,7 @@ static int nvgpu_gpu_get_memory_state(struct gk20a *g,
715 args->reserved[2] || args->reserved[3]) 715 args->reserved[2] || args->reserved[3])
716 return -EINVAL; 716 return -EINVAL;
717 717
718 err = gk20a_vidmem_get_space(g, &args->total_free_bytes); 718 err = nvgpu_vidmem_get_space(g, &args->total_free_bytes);
719 719
720 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes); 720 gk20a_dbg_fn("done, err=%d, bytes=%lld", err, args->total_free_bytes);
721 721
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index eb51676c..e1f8a598 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -355,7 +355,7 @@ u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem)
355 /* 355 /*
356 * Otherwise get the vidmem address. 356 * Otherwise get the vidmem address.
357 */ 357 */
358 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 358 alloc = nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl);
359 359
360 /* This API should not be used with > 1 chunks */ 360 /* This API should not be used with > 1 chunks */
361 WARN_ON(alloc->nr_chunks != 1); 361 WARN_ON(alloc->nr_chunks != 1);
@@ -549,7 +549,7 @@ static struct nvgpu_sgt *__nvgpu_mem_get_sgl_from_vidmem(
549{ 549{
550 struct nvgpu_page_alloc *vidmem_alloc; 550 struct nvgpu_page_alloc *vidmem_alloc;
551 551
552 vidmem_alloc = get_vidmem_page_alloc(linux_sgl); 552 vidmem_alloc = nvgpu_vidmem_get_page_alloc(linux_sgl);
553 if (!vidmem_alloc) 553 if (!vidmem_alloc)
554 return NULL; 554 return NULL;
555 555
@@ -561,7 +561,7 @@ struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt)
561 struct nvgpu_sgt *nvgpu_sgt; 561 struct nvgpu_sgt *nvgpu_sgt;
562 struct scatterlist *linux_sgl = sgt->sgl; 562 struct scatterlist *linux_sgl = sgt->sgl;
563 563
564 if (is_vidmem_page_alloc(sg_dma_address(linux_sgl))) 564 if (nvgpu_addr_is_vidmem_page_alloc(sg_dma_address(linux_sgl)))
565 return __nvgpu_mem_get_sgl_from_vidmem(g, linux_sgl); 565 return __nvgpu_mem_get_sgl_from_vidmem(g, linux_sgl);
566 566
567 nvgpu_sgt = nvgpu_kzalloc(g, sizeof(*nvgpu_sgt)); 567 nvgpu_sgt = nvgpu_kzalloc(g, sizeof(*nvgpu_sgt));
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
index e89dd07a..5d47c858 100644
--- a/drivers/gpu/nvgpu/common/linux/vidmem.c
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -31,24 +31,24 @@
31 31
32#include "vm_priv.h" 32#include "vm_priv.h"
33 33
34void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr) 34bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
35{ 35{
36 /* set bit 0 to indicate vidmem allocation */ 36 return !!(addr & 1ULL);
37 sg_dma_address(sgl) = (addr | 1ULL);
38} 37}
39 38
40bool is_vidmem_page_alloc(u64 addr) 39void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr)
41{ 40{
42 return !!(addr & 1ULL); 41 /* set bit 0 to indicate vidmem allocation */
42 sg_dma_address(sgl) = (addr | 1ULL);
43} 43}
44 44
45struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl) 45struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl)
46{ 46{
47 u64 addr; 47 u64 addr;
48 48
49 addr = sg_dma_address(sgl); 49 addr = sg_dma_address(sgl);
50 50
51 if (is_vidmem_page_alloc(addr)) 51 if (nvgpu_addr_is_vidmem_page_alloc(addr))
52 addr = addr & ~1ULL; 52 addr = addr & ~1ULL;
53 else 53 else
54 WARN_ON(1); 54 WARN_ON(1);
@@ -59,7 +59,7 @@ struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl)
59static struct sg_table *gk20a_vidbuf_map_dma_buf( 59static struct sg_table *gk20a_vidbuf_map_dma_buf(
60 struct dma_buf_attachment *attach, enum dma_data_direction dir) 60 struct dma_buf_attachment *attach, enum dma_data_direction dir)
61{ 61{
62 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv; 62 struct nvgpu_vidmem_buf *buf = attach->dmabuf->priv;
63 63
64 return buf->mem->priv.sgt; 64 return buf->mem->priv.sgt;
65} 65}
@@ -72,7 +72,7 @@ static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
72 72
73static void gk20a_vidbuf_release(struct dma_buf *dmabuf) 73static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
74{ 74{
75 struct gk20a_vidmem_buf *buf = dmabuf->priv; 75 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
76 76
77 gk20a_dbg_fn(""); 77 gk20a_dbg_fn("");
78 78
@@ -104,7 +104,7 @@ static int gk20a_vidbuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
104static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf, 104static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
105 struct device *dev, void *priv, void (*delete)(void *priv)) 105 struct device *dev, void *priv, void (*delete)(void *priv))
106{ 106{
107 struct gk20a_vidmem_buf *buf = dmabuf->priv; 107 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
108 108
109 buf->dmabuf_priv = priv; 109 buf->dmabuf_priv = priv;
110 buf->dmabuf_priv_delete = delete; 110 buf->dmabuf_priv_delete = delete;
@@ -115,7 +115,7 @@ static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
115static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf, 115static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf,
116 struct device *dev) 116 struct device *dev)
117{ 117{
118 struct gk20a_vidmem_buf *buf = dmabuf->priv; 118 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
119 119
120 return buf->dmabuf_priv; 120 return buf->dmabuf_priv;
121} 121}
@@ -131,7 +131,7 @@ static const struct dma_buf_ops gk20a_vidbuf_ops = {
131 .get_drvdata = gk20a_vidbuf_get_private, 131 .get_drvdata = gk20a_vidbuf_get_private,
132}; 132};
133 133
134static struct dma_buf *gk20a_vidbuf_export(struct gk20a_vidmem_buf *buf) 134static struct dma_buf *gk20a_vidbuf_export(struct nvgpu_vidmem_buf *buf)
135{ 135{
136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
137 137
@@ -143,9 +143,9 @@ static struct dma_buf *gk20a_vidbuf_export(struct gk20a_vidmem_buf *buf)
143 return dma_buf_export(&exp_info); 143 return dma_buf_export(&exp_info);
144} 144}
145 145
146struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf) 146struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
147{ 147{
148 struct gk20a_vidmem_buf *buf = dmabuf->priv; 148 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
149 149
150 if (dmabuf->ops != &gk20a_vidbuf_ops) 150 if (dmabuf->ops != &gk20a_vidbuf_ops)
151 return NULL; 151 return NULL;
@@ -153,9 +153,9 @@ struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf)
153 return buf->g; 153 return buf->g;
154} 154}
155 155
156int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) 156int nvgpu_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
157{ 157{
158 struct gk20a_vidmem_buf *buf; 158 struct nvgpu_vidmem_buf *buf;
159 int err = 0, fd; 159 int err = 0, fd;
160 160
161 gk20a_dbg_fn(""); 161 gk20a_dbg_fn("");
@@ -169,7 +169,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
169 if (!g->mm.vidmem.cleared) { 169 if (!g->mm.vidmem.cleared) {
170 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex); 170 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex);
171 if (!g->mm.vidmem.cleared) { 171 if (!g->mm.vidmem.cleared) {
172 err = gk20a_vidmem_clear_all(g); 172 err = nvgpu_vidmem_clear_all(g);
173 if (err) { 173 if (err) {
174 nvgpu_err(g, 174 nvgpu_err(g,
175 "failed to clear whole vidmem"); 175 "failed to clear whole vidmem");
@@ -216,10 +216,10 @@ err_kfree:
216 return err; 216 return err;
217} 217}
218 218
219int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, 219int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
220 void *buffer, u64 offset, u64 size, u32 cmd) 220 void *buffer, u64 offset, u64 size, u32 cmd)
221{ 221{
222 struct gk20a_vidmem_buf *vidmem_buf; 222 struct nvgpu_vidmem_buf *vidmem_buf;
223 struct nvgpu_mem *mem; 223 struct nvgpu_mem *mem;
224 int err = 0; 224 int err = 0;
225 225
@@ -245,17 +245,17 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
245 return err; 245 return err;
246} 246}
247 247
248void gk20a_vidmem_clear_mem_worker(struct work_struct *work) 248void nvgpu_vidmem_clear_mem_worker(struct work_struct *work)
249{ 249{
250 struct mm_gk20a *mm = container_of(work, struct mm_gk20a, 250 struct mm_gk20a *mm = container_of(work, struct mm_gk20a,
251 vidmem.clear_mem_worker); 251 vidmem.clear_mem_worker);
252 struct gk20a *g = mm->g; 252 struct gk20a *g = mm->g;
253 struct nvgpu_mem *mem; 253 struct nvgpu_mem *mem;
254 254
255 while ((mem = get_pending_mem_desc(mm)) != NULL) { 255 while ((mem = nvgpu_vidmem_get_pending_alloc(mm)) != NULL) {
256 gk20a_gmmu_clear_vidmem_mem(g, mem); 256 nvgpu_vidmem_clear(g, mem);
257 nvgpu_free(mem->allocator, 257 nvgpu_free(mem->allocator,
258 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); 258 (u64)nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl));
259 nvgpu_free_sgtable(g, &mem->priv.sgt); 259 nvgpu_free_sgtable(g, &mem->priv.sgt);
260 260
261 WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size, 261 WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size,
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index f4ac3d41..d0f87148 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -42,7 +42,7 @@
42enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, 42enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
43 struct dma_buf *dmabuf) 43 struct dma_buf *dmabuf)
44{ 44{
45 struct gk20a *buf_owner = gk20a_vidmem_buf_owner(dmabuf); 45 struct gk20a *buf_owner = nvgpu_vidmem_buf_owner(dmabuf);
46 bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY); 46 bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY);
47 47
48 if (buf_owner == NULL) { 48 if (buf_owner == NULL) {
@@ -97,7 +97,8 @@ static u64 nvgpu_get_buffer_alignment(struct gk20a *g, struct scatterlist *sgl,
97 u64 buf_addr; 97 u64 buf_addr;
98 98
99 if (aperture == APERTURE_VIDMEM) { 99 if (aperture == APERTURE_VIDMEM) {
100 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl); 100 struct nvgpu_page_alloc *alloc =
101 nvgpu_vidmem_get_page_alloc(sgl);
101 struct nvgpu_sgt *sgt = &alloc->sgt; 102 struct nvgpu_sgt *sgt = &alloc->sgt;
102 void *sgl_vid = sgt->sgl; 103 void *sgl_vid = sgt->sgl;
103 104
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 2b6e6e6a..9f677058 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -62,7 +62,7 @@ void nvgpu_sgt_free(struct nvgpu_sgt *sgt, struct gk20a *g)
62u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) 62u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
63{ 63{
64 /* ensure it is not vidmem allocation */ 64 /* ensure it is not vidmem allocation */
65 WARN_ON(is_vidmem_page_alloc(phys)); 65 WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys));
66 66
67 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) 67 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit)
68 return phys | 1ULL << g->ops.mm.get_iommu_bit(g); 68 return phys | 1ULL << g->ops.mm.get_iommu_bit(g);
diff --git a/drivers/gpu/nvgpu/common/mm/vidmem.c b/drivers/gpu/nvgpu/common/mm/vidmem.c
index 1ba07ca6..c95cedec 100644
--- a/drivers/gpu/nvgpu/common/mm/vidmem.c
+++ b/drivers/gpu/nvgpu/common/mm/vidmem.c
@@ -28,13 +28,13 @@
28#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
29#include "gk20a/mm_gk20a.h" 29#include "gk20a/mm_gk20a.h"
30 30
31void gk20a_vidmem_destroy(struct gk20a *g) 31void nvgpu_vidmem_destroy(struct gk20a *g)
32{ 32{
33 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) 33 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
34 nvgpu_alloc_destroy(&g->mm.vidmem.allocator); 34 nvgpu_alloc_destroy(&g->mm.vidmem.allocator);
35} 35}
36 36
37int gk20a_vidmem_clear_all(struct gk20a *g) 37int nvgpu_vidmem_clear_all(struct gk20a *g)
38{ 38{
39 struct mm_gk20a *mm = &g->mm; 39 struct mm_gk20a *mm = &g->mm;
40 struct gk20a_fence *gk20a_fence_out = NULL; 40 struct gk20a_fence *gk20a_fence_out = NULL;
@@ -106,7 +106,7 @@ int gk20a_vidmem_clear_all(struct gk20a *g)
106 return 0; 106 return 0;
107} 107}
108 108
109int gk20a_init_vidmem(struct mm_gk20a *mm) 109int nvgpu_vidmem_init(struct mm_gk20a *mm)
110{ 110{
111 struct gk20a *g = mm->g; 111 struct gk20a *g = mm->g;
112 size_t size = g->ops.mm.get_vidmem_size ? 112 size_t size = g->ops.mm.get_vidmem_size ?
@@ -157,7 +157,7 @@ int gk20a_init_vidmem(struct mm_gk20a *mm)
157 157
158 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex); 158 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
159 159
160 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); 160 INIT_WORK(&mm->vidmem.clear_mem_worker, nvgpu_vidmem_clear_mem_worker);
161 nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0); 161 nvgpu_atomic64_set(&mm->vidmem.bytes_pending, 0);
162 nvgpu_init_list_node(&mm->vidmem.clear_list_head); 162 nvgpu_init_list_node(&mm->vidmem.clear_list_head);
163 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex); 163 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
@@ -167,7 +167,7 @@ int gk20a_init_vidmem(struct mm_gk20a *mm)
167 return 0; 167 return 0;
168} 168}
169 169
170int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) 170int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
171{ 171{
172 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator; 172 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
173 173
@@ -183,7 +183,7 @@ int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
183 return 0; 183 return 0;
184} 184}
185 185
186int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem) 186int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem)
187{ 187{
188 struct gk20a_fence *gk20a_fence_out = NULL; 188 struct gk20a_fence *gk20a_fence_out = NULL;
189 struct gk20a_fence *gk20a_last_fence = NULL; 189 struct gk20a_fence *gk20a_last_fence = NULL;
@@ -194,7 +194,7 @@ int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
194 if (g->mm.vidmem.ce_ctx_id == (u32)~0) 194 if (g->mm.vidmem.ce_ctx_id == (u32)~0)
195 return -EINVAL; 195 return -EINVAL;
196 196
197 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 197 alloc = nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl);
198 198
199 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) { 199 nvgpu_sgt_for_each_sgl(sgl, &alloc->sgt) {
200 if (gk20a_last_fence) 200 if (gk20a_last_fence)
@@ -243,7 +243,7 @@ int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
243 return err; 243 return err;
244} 244}
245 245
246struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm) 246struct nvgpu_mem *nvgpu_vidmem_get_pending_alloc(struct mm_gk20a *mm)
247{ 247{
248 struct nvgpu_mem *mem = NULL; 248 struct nvgpu_mem *mem = NULL;
249 249
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index abe5b561..b7bc7439 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -55,7 +55,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
55 if (!g->regs && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING)) 55 if (!g->regs && nvgpu_is_enabled(g, NVGPU_DRIVER_IS_DYING))
56 return; 56 return;
57 57
58 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl); 58 alloc = nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl);
59 sgt = &alloc->sgt; 59 sgt = &alloc->sgt;
60 60
61 nvgpu_sgt_for_each_sgl(sgl, sgt) { 61 nvgpu_sgt_for_each_sgl(sgl, sgt) {
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index bf28ed1e..83bd0156 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -973,7 +973,7 @@ static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
973 goto fail_idle; 973 goto fail_idle;
974 } 974 }
975 975
976 err = gk20a_vidbuf_access_memory(g, dmabuf, buffer, 976 err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer,
977 args->offset + offset, access_size, 977 args->offset + offset, access_size,
978 args->cmd); 978 args->cmd);
979 if (err) 979 if (err)
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 670e16d0..8936cd03 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -350,7 +350,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm)
350 nvgpu_vm_put(mm->cde.vm); 350 nvgpu_vm_put(mm->cde.vm);
351 351
352 nvgpu_semaphore_sea_destroy(g); 352 nvgpu_semaphore_sea_destroy(g);
353 gk20a_vidmem_destroy(g); 353 nvgpu_vidmem_destroy(g);
354 nvgpu_pd_cache_fini(g); 354 nvgpu_pd_cache_fini(g);
355} 355}
356 356
@@ -387,7 +387,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
387 387
388 mm->vidmem.ce_ctx_id = (u32)~0; 388 mm->vidmem.ce_ctx_id = (u32)~0;
389 389
390 err = gk20a_init_vidmem(mm); 390 err = nvgpu_vidmem_init(mm);
391 if (err) 391 if (err)
392 return err; 392 return err;
393 393
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vidmem.h b/drivers/gpu/nvgpu/include/nvgpu/vidmem.h
index a5d0ae11..1b250f90 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vidmem.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vidmem.h
@@ -34,7 +34,7 @@ struct gk20a;
34struct mm_gk20a; 34struct mm_gk20a;
35struct nvgpu_mem; 35struct nvgpu_mem;
36 36
37struct gk20a_vidmem_buf { 37struct nvgpu_vidmem_buf {
38 struct gk20a *g; 38 struct gk20a *g;
39 struct nvgpu_mem *mem; 39 struct nvgpu_mem *mem;
40 struct dma_buf *dmabuf; 40 struct dma_buf *dmabuf;
@@ -44,26 +44,26 @@ struct gk20a_vidmem_buf {
44 44
45#if defined(CONFIG_GK20A_VIDMEM) 45#if defined(CONFIG_GK20A_VIDMEM)
46 46
47struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl); 47struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl);
48void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr); 48void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr);
49bool is_vidmem_page_alloc(u64 addr); 49bool nvgpu_addr_is_vidmem_page_alloc(u64 addr);
50int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes); 50int nvgpu_vidmem_buf_alloc(struct gk20a *g, size_t bytes);
51int gk20a_vidmem_get_space(struct gk20a *g, u64 *space); 51int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space);
52 52
53struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm); 53struct nvgpu_mem *nvgpu_vidmem_get_pending_alloc(struct mm_gk20a *mm);
54 54
55void gk20a_vidmem_destroy(struct gk20a *g); 55void nvgpu_vidmem_destroy(struct gk20a *g);
56int gk20a_init_vidmem(struct mm_gk20a *mm); 56int nvgpu_vidmem_init(struct mm_gk20a *mm);
57int gk20a_vidmem_clear_all(struct gk20a *g); 57int nvgpu_vidmem_clear_all(struct gk20a *g);
58 58
59void gk20a_vidmem_clear_mem_worker(struct work_struct *work); 59void nvgpu_vidmem_clear_mem_worker(struct work_struct *work);
60int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem); 60int nvgpu_vidmem_clear(struct gk20a *g, struct nvgpu_mem *mem);
61 61
62/* 62/*
63 * Will need to be moved later on once we have the Linux vidmem.h file. 63 * Will need to be moved later on once we have the Linux vidmem.h file.
64 */ 64 */
65struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf); 65struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf);
66int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, 66int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
67 void *buffer, u64 offset, u64 size, u32 cmd); 67 void *buffer, u64 offset, u64 size, u32 cmd);
68 68
69#else /* !defined(CONFIG_GK20A_VIDMEM) */ 69#else /* !defined(CONFIG_GK20A_VIDMEM) */
@@ -73,60 +73,60 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
73 */ 73 */
74 74
75static inline struct nvgpu_page_alloc * 75static inline struct nvgpu_page_alloc *
76get_vidmem_page_alloc(struct scatterlist *sgl) 76nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl)
77{ 77{
78 return NULL; 78 return NULL;
79} 79}
80 80
81static inline void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr) 81static inline void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr)
82{ 82{
83} 83}
84 84
85static inline bool is_vidmem_page_alloc(u64 addr) 85static inline bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
86{ 86{
87 return false; 87 return false;
88} 88}
89 89
90static inline int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) 90static inline int nvgpu_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
91{ 91{
92 return -ENOSYS; 92 return -ENOSYS;
93} 93}
94static inline int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) 94static inline int nvgpu_vidmem_get_space(struct gk20a *g, u64 *space)
95{ 95{
96 return -ENOSYS; 96 return -ENOSYS;
97} 97}
98 98
99static inline struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm) 99static inline struct nvgpu_mem *nvgpu_vidmem_get_pending_alloc(struct mm_gk20a *mm)
100{ 100{
101 return NULL; 101 return NULL;
102} 102}
103 103
104static inline void gk20a_vidmem_destroy(struct gk20a *g) 104static inline void nvgpu_vidmem_destroy(struct gk20a *g)
105{ 105{
106} 106}
107 107
108static inline int gk20a_init_vidmem(struct mm_gk20a *mm) 108static inline int nvgpu_vidmem_init(struct mm_gk20a *mm)
109{ 109{
110 return 0; 110 return 0;
111} 111}
112 112
113static inline int gk20a_vidmem_clear_all(struct gk20a *g) 113static inline int nvgpu_vidmem_clear_all(struct gk20a *g)
114{ 114{
115 return -ENOSYS; 115 return -ENOSYS;
116} 116}
117 117
118static inline int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, 118static inline int nvgpu_vidmem_clear(struct gk20a *g,
119 struct nvgpu_mem *mem) 119 struct nvgpu_mem *mem)
120{ 120{
121 return -ENOSYS; 121 return -ENOSYS;
122} 122}
123 123
124static inline struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf) 124static inline struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
125{ 125{
126 return NULL; 126 return NULL;
127} 127}
128 128
129static inline int gk20a_vidbuf_access_memory(struct gk20a *g, 129static inline int nvgpu_vidmem_buf_access_memory(struct gk20a *g,
130 struct dma_buf *dmabuf, 130 struct dma_buf *dmabuf,
131 void *buffer, u64 offset, 131 void *buffer, u64 offset,
132 u64 size, u32 cmd) 132 u64 size, u32 cmd)