summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vidmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vidmem.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
index e89dd07a..5d47c858 100644
--- a/drivers/gpu/nvgpu/common/linux/vidmem.c
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -31,24 +31,24 @@
31 31
32#include "vm_priv.h" 32#include "vm_priv.h"
33 33
34void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr) 34bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
35{ 35{
36 /* set bit 0 to indicate vidmem allocation */ 36 return !!(addr & 1ULL);
37 sg_dma_address(sgl) = (addr | 1ULL);
38} 37}
39 38
40bool is_vidmem_page_alloc(u64 addr) 39void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr)
41{ 40{
42 return !!(addr & 1ULL); 41 /* set bit 0 to indicate vidmem allocation */
42 sg_dma_address(sgl) = (addr | 1ULL);
43} 43}
44 44
45struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl) 45struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl)
46{ 46{
47 u64 addr; 47 u64 addr;
48 48
49 addr = sg_dma_address(sgl); 49 addr = sg_dma_address(sgl);
50 50
51 if (is_vidmem_page_alloc(addr)) 51 if (nvgpu_addr_is_vidmem_page_alloc(addr))
52 addr = addr & ~1ULL; 52 addr = addr & ~1ULL;
53 else 53 else
54 WARN_ON(1); 54 WARN_ON(1);
@@ -59,7 +59,7 @@ struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl)
59static struct sg_table *gk20a_vidbuf_map_dma_buf( 59static struct sg_table *gk20a_vidbuf_map_dma_buf(
60 struct dma_buf_attachment *attach, enum dma_data_direction dir) 60 struct dma_buf_attachment *attach, enum dma_data_direction dir)
61{ 61{
62 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv; 62 struct nvgpu_vidmem_buf *buf = attach->dmabuf->priv;
63 63
64 return buf->mem->priv.sgt; 64 return buf->mem->priv.sgt;
65} 65}
@@ -72,7 +72,7 @@ static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
72 72
73static void gk20a_vidbuf_release(struct dma_buf *dmabuf) 73static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
74{ 74{
75 struct gk20a_vidmem_buf *buf = dmabuf->priv; 75 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
76 76
77 gk20a_dbg_fn(""); 77 gk20a_dbg_fn("");
78 78
@@ -104,7 +104,7 @@ static int gk20a_vidbuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
104static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf, 104static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
105 struct device *dev, void *priv, void (*delete)(void *priv)) 105 struct device *dev, void *priv, void (*delete)(void *priv))
106{ 106{
107 struct gk20a_vidmem_buf *buf = dmabuf->priv; 107 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
108 108
109 buf->dmabuf_priv = priv; 109 buf->dmabuf_priv = priv;
110 buf->dmabuf_priv_delete = delete; 110 buf->dmabuf_priv_delete = delete;
@@ -115,7 +115,7 @@ static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
115static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf, 115static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf,
116 struct device *dev) 116 struct device *dev)
117{ 117{
118 struct gk20a_vidmem_buf *buf = dmabuf->priv; 118 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
119 119
120 return buf->dmabuf_priv; 120 return buf->dmabuf_priv;
121} 121}
@@ -131,7 +131,7 @@ static const struct dma_buf_ops gk20a_vidbuf_ops = {
131 .get_drvdata = gk20a_vidbuf_get_private, 131 .get_drvdata = gk20a_vidbuf_get_private,
132}; 132};
133 133
134static struct dma_buf *gk20a_vidbuf_export(struct gk20a_vidmem_buf *buf) 134static struct dma_buf *gk20a_vidbuf_export(struct nvgpu_vidmem_buf *buf)
135{ 135{
136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
137 137
@@ -143,9 +143,9 @@ static struct dma_buf *gk20a_vidbuf_export(struct gk20a_vidmem_buf *buf)
143 return dma_buf_export(&exp_info); 143 return dma_buf_export(&exp_info);
144} 144}
145 145
146struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf) 146struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
147{ 147{
148 struct gk20a_vidmem_buf *buf = dmabuf->priv; 148 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
149 149
150 if (dmabuf->ops != &gk20a_vidbuf_ops) 150 if (dmabuf->ops != &gk20a_vidbuf_ops)
151 return NULL; 151 return NULL;
@@ -153,9 +153,9 @@ struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf)
153 return buf->g; 153 return buf->g;
154} 154}
155 155
156int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) 156int nvgpu_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
157{ 157{
158 struct gk20a_vidmem_buf *buf; 158 struct nvgpu_vidmem_buf *buf;
159 int err = 0, fd; 159 int err = 0, fd;
160 160
161 gk20a_dbg_fn(""); 161 gk20a_dbg_fn("");
@@ -169,7 +169,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
169 if (!g->mm.vidmem.cleared) { 169 if (!g->mm.vidmem.cleared) {
170 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex); 170 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex);
171 if (!g->mm.vidmem.cleared) { 171 if (!g->mm.vidmem.cleared) {
172 err = gk20a_vidmem_clear_all(g); 172 err = nvgpu_vidmem_clear_all(g);
173 if (err) { 173 if (err) {
174 nvgpu_err(g, 174 nvgpu_err(g,
175 "failed to clear whole vidmem"); 175 "failed to clear whole vidmem");
@@ -216,10 +216,10 @@ err_kfree:
216 return err; 216 return err;
217} 217}
218 218
219int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf, 219int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
220 void *buffer, u64 offset, u64 size, u32 cmd) 220 void *buffer, u64 offset, u64 size, u32 cmd)
221{ 221{
222 struct gk20a_vidmem_buf *vidmem_buf; 222 struct nvgpu_vidmem_buf *vidmem_buf;
223 struct nvgpu_mem *mem; 223 struct nvgpu_mem *mem;
224 int err = 0; 224 int err = 0;
225 225
@@ -245,17 +245,17 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
245 return err; 245 return err;
246} 246}
247 247
248void gk20a_vidmem_clear_mem_worker(struct work_struct *work) 248void nvgpu_vidmem_clear_mem_worker(struct work_struct *work)
249{ 249{
250 struct mm_gk20a *mm = container_of(work, struct mm_gk20a, 250 struct mm_gk20a *mm = container_of(work, struct mm_gk20a,
251 vidmem.clear_mem_worker); 251 vidmem.clear_mem_worker);
252 struct gk20a *g = mm->g; 252 struct gk20a *g = mm->g;
253 struct nvgpu_mem *mem; 253 struct nvgpu_mem *mem;
254 254
255 while ((mem = get_pending_mem_desc(mm)) != NULL) { 255 while ((mem = nvgpu_vidmem_get_pending_alloc(mm)) != NULL) {
256 gk20a_gmmu_clear_vidmem_mem(g, mem); 256 nvgpu_vidmem_clear(g, mem);
257 nvgpu_free(mem->allocator, 257 nvgpu_free(mem->allocator,
258 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); 258 (u64)nvgpu_vidmem_get_page_alloc(mem->priv.sgt->sgl));
259 nvgpu_free_sgtable(g, &mem->priv.sgt); 259 nvgpu_free_sgtable(g, &mem->priv.sgt);
260 260
261 WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size, 261 WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size,