summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vidmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vidmem.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c268
1 files changed, 268 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
new file mode 100644
index 00000000..e89dd07a
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -0,0 +1,268 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/dma-buf.h>
18
19#include <linux/platform/tegra/tegra_fd.h>
20
21#include <nvgpu/dma.h>
22#include <nvgpu/enabled.h>
23#include <nvgpu/vidmem.h>
24#include <nvgpu/nvgpu_mem.h>
25#include <nvgpu/page_allocator.h>
26
27#include <nvgpu/linux/dma.h>
28
29#include "gk20a/gk20a.h"
30#include "gk20a/mm_gk20a.h"
31
32#include "vm_priv.h"
33
34void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr)
35{
36 /* set bit 0 to indicate vidmem allocation */
37 sg_dma_address(sgl) = (addr | 1ULL);
38}
39
40bool is_vidmem_page_alloc(u64 addr)
41{
42 return !!(addr & 1ULL);
43}
44
45struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl)
46{
47 u64 addr;
48
49 addr = sg_dma_address(sgl);
50
51 if (is_vidmem_page_alloc(addr))
52 addr = addr & ~1ULL;
53 else
54 WARN_ON(1);
55
56 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
57}
58
59static struct sg_table *gk20a_vidbuf_map_dma_buf(
60 struct dma_buf_attachment *attach, enum dma_data_direction dir)
61{
62 struct gk20a_vidmem_buf *buf = attach->dmabuf->priv;
63
64 return buf->mem->priv.sgt;
65}
66
67static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
68 struct sg_table *sgt,
69 enum dma_data_direction dir)
70{
71}
72
73static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
74{
75 struct gk20a_vidmem_buf *buf = dmabuf->priv;
76
77 gk20a_dbg_fn("");
78
79 if (buf->dmabuf_priv)
80 buf->dmabuf_priv_delete(buf->dmabuf_priv);
81
82 nvgpu_dma_free(buf->g, buf->mem);
83 nvgpu_kfree(buf->g, buf);
84}
85
86static void *gk20a_vidbuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
87{
88 WARN_ON("Not supported");
89 return NULL;
90}
91
92static void *gk20a_vidbuf_kmap_atomic(struct dma_buf *dmabuf,
93 unsigned long page_num)
94{
95 WARN_ON("Not supported");
96 return NULL;
97}
98
99static int gk20a_vidbuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
100{
101 return -EINVAL;
102}
103
104static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
105 struct device *dev, void *priv, void (*delete)(void *priv))
106{
107 struct gk20a_vidmem_buf *buf = dmabuf->priv;
108
109 buf->dmabuf_priv = priv;
110 buf->dmabuf_priv_delete = delete;
111
112 return 0;
113}
114
115static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf,
116 struct device *dev)
117{
118 struct gk20a_vidmem_buf *buf = dmabuf->priv;
119
120 return buf->dmabuf_priv;
121}
122
123static const struct dma_buf_ops gk20a_vidbuf_ops = {
124 .map_dma_buf = gk20a_vidbuf_map_dma_buf,
125 .unmap_dma_buf = gk20a_vidbuf_unmap_dma_buf,
126 .release = gk20a_vidbuf_release,
127 .kmap_atomic = gk20a_vidbuf_kmap_atomic,
128 .kmap = gk20a_vidbuf_kmap,
129 .mmap = gk20a_vidbuf_mmap,
130 .set_drvdata = gk20a_vidbuf_set_private,
131 .get_drvdata = gk20a_vidbuf_get_private,
132};
133
134static struct dma_buf *gk20a_vidbuf_export(struct gk20a_vidmem_buf *buf)
135{
136 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
137
138 exp_info.priv = buf;
139 exp_info.ops = &gk20a_vidbuf_ops;
140 exp_info.size = buf->mem->size;
141 exp_info.flags = O_RDWR;
142
143 return dma_buf_export(&exp_info);
144}
145
146struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf)
147{
148 struct gk20a_vidmem_buf *buf = dmabuf->priv;
149
150 if (dmabuf->ops != &gk20a_vidbuf_ops)
151 return NULL;
152
153 return buf->g;
154}
155
156int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
157{
158 struct gk20a_vidmem_buf *buf;
159 int err = 0, fd;
160
161 gk20a_dbg_fn("");
162
163 buf = nvgpu_kzalloc(g, sizeof(*buf));
164 if (!buf)
165 return -ENOMEM;
166
167 buf->g = g;
168
169 if (!g->mm.vidmem.cleared) {
170 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex);
171 if (!g->mm.vidmem.cleared) {
172 err = gk20a_vidmem_clear_all(g);
173 if (err) {
174 nvgpu_err(g,
175 "failed to clear whole vidmem");
176 goto err_kfree;
177 }
178 }
179 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex);
180 }
181
182 buf->mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem));
183 if (!buf->mem)
184 goto err_kfree;
185
186 buf->mem->mem_flags |= NVGPU_MEM_FLAG_USER_MEM;
187
188 err = nvgpu_dma_alloc_vid(g, bytes, buf->mem);
189 if (err)
190 goto err_memfree;
191
192 buf->dmabuf = gk20a_vidbuf_export(buf);
193 if (IS_ERR(buf->dmabuf)) {
194 err = PTR_ERR(buf->dmabuf);
195 goto err_bfree;
196 }
197
198 fd = tegra_alloc_fd(current->files, 1024, O_RDWR);
199 if (fd < 0) {
200 /* ->release frees what we have done */
201 dma_buf_put(buf->dmabuf);
202 return fd;
203 }
204
205 /* fclose() on this drops one ref, freeing the dma buf */
206 fd_install(fd, buf->dmabuf->file);
207
208 return fd;
209
210err_bfree:
211 nvgpu_dma_free(g, buf->mem);
212err_memfree:
213 nvgpu_kfree(g, buf->mem);
214err_kfree:
215 nvgpu_kfree(g, buf);
216 return err;
217}
218
219int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
220 void *buffer, u64 offset, u64 size, u32 cmd)
221{
222 struct gk20a_vidmem_buf *vidmem_buf;
223 struct nvgpu_mem *mem;
224 int err = 0;
225
226 if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM)
227 return -EINVAL;
228
229 vidmem_buf = dmabuf->priv;
230 mem = vidmem_buf->mem;
231
232 switch (cmd) {
233 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ:
234 nvgpu_mem_rd_n(g, mem, offset, buffer, size);
235 break;
236
237 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE:
238 nvgpu_mem_wr_n(g, mem, offset, buffer, size);
239 break;
240
241 default:
242 err = -EINVAL;
243 }
244
245 return err;
246}
247
248void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
249{
250 struct mm_gk20a *mm = container_of(work, struct mm_gk20a,
251 vidmem.clear_mem_worker);
252 struct gk20a *g = mm->g;
253 struct nvgpu_mem *mem;
254
255 while ((mem = get_pending_mem_desc(mm)) != NULL) {
256 gk20a_gmmu_clear_vidmem_mem(g, mem);
257 nvgpu_free(mem->allocator,
258 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
259 nvgpu_free_sgtable(g, &mem->priv.sgt);
260
261 WARN_ON(nvgpu_atomic64_sub_return(mem->aligned_size,
262 &g->mm.vidmem.bytes_pending) < 0);
263 mem->size = 0;
264 mem->aperture = APERTURE_INVALID;
265
266 nvgpu_kfree(g, mem);
267 }
268}