summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vidmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vidmem.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vidmem.c254
1 files changed, 254 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c
new file mode 100644
index 00000000..1d9fea71
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/vidmem.c
@@ -0,0 +1,254 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/dma-buf.h>
18#include <uapi/linux/nvgpu.h>
19
20#include <linux/platform/tegra/tegra_fd.h>
21
22#include <nvgpu/dma.h>
23#include <nvgpu/enabled.h>
24#include <nvgpu/vidmem.h>
25#include <nvgpu/nvgpu_mem.h>
26#include <nvgpu/page_allocator.h>
27
28#include <nvgpu/linux/vm.h>
29#include <nvgpu/linux/dma.h>
30#include <nvgpu/linux/vidmem.h>
31
32#include "gk20a/gk20a.h"
33#include "gk20a/mm_gk20a.h"
34
35bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
36{
37 return !!(addr & 1ULL);
38}
39
40void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr)
41{
42 /* set bit 0 to indicate vidmem allocation */
43 sg_dma_address(sgl) = (addr | 1ULL);
44}
45
46struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl)
47{
48 u64 addr;
49
50 addr = sg_dma_address(sgl);
51
52 if (nvgpu_addr_is_vidmem_page_alloc(addr))
53 addr = addr & ~1ULL;
54 else
55 WARN_ON(1);
56
57 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
58}
59
60static struct sg_table *gk20a_vidbuf_map_dma_buf(
61 struct dma_buf_attachment *attach, enum dma_data_direction dir)
62{
63 struct nvgpu_vidmem_buf *buf = attach->dmabuf->priv;
64
65 return buf->mem->priv.sgt;
66}
67
68static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
69 struct sg_table *sgt,
70 enum dma_data_direction dir)
71{
72}
73
74static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
75{
76 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
77 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
78 struct gk20a *g = buf->g;
79
80 vidmem_dbg(g, "Releasing Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
81 dmabuf, buf->mem->size >> 10);
82
83 if (linux_buf && linux_buf->dmabuf_priv_delete)
84 linux_buf->dmabuf_priv_delete(linux_buf->dmabuf_priv);
85
86 nvgpu_kfree(g, linux_buf);
87 nvgpu_vidmem_buf_free(g, buf);
88
89 gk20a_put(g);
90}
91
92static void *gk20a_vidbuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
93{
94 WARN_ON("Not supported");
95 return NULL;
96}
97
98static void *gk20a_vidbuf_kmap_atomic(struct dma_buf *dmabuf,
99 unsigned long page_num)
100{
101 WARN_ON("Not supported");
102 return NULL;
103}
104
105static int gk20a_vidbuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
106{
107 return -EINVAL;
108}
109
110static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
111 struct device *dev, void *priv, void (*delete)(void *priv))
112{
113 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
114 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
115
116 linux_buf->dmabuf_priv = priv;
117 linux_buf->dmabuf_priv_delete = delete;
118
119 return 0;
120}
121
122static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf,
123 struct device *dev)
124{
125 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
126 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
127
128 return linux_buf->dmabuf_priv;
129}
130
131static const struct dma_buf_ops gk20a_vidbuf_ops = {
132 .map_dma_buf = gk20a_vidbuf_map_dma_buf,
133 .unmap_dma_buf = gk20a_vidbuf_unmap_dma_buf,
134 .release = gk20a_vidbuf_release,
135 .kmap_atomic = gk20a_vidbuf_kmap_atomic,
136 .kmap = gk20a_vidbuf_kmap,
137 .mmap = gk20a_vidbuf_mmap,
138 .set_drvdata = gk20a_vidbuf_set_private,
139 .get_drvdata = gk20a_vidbuf_get_private,
140};
141
142static struct dma_buf *gk20a_vidbuf_export(struct nvgpu_vidmem_buf *buf)
143{
144 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
145
146 exp_info.priv = buf;
147 exp_info.ops = &gk20a_vidbuf_ops;
148 exp_info.size = buf->mem->size;
149 exp_info.flags = O_RDWR;
150
151 return dma_buf_export(&exp_info);
152}
153
154struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
155{
156 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
157
158 if (dmabuf->ops != &gk20a_vidbuf_ops)
159 return NULL;
160
161 return buf->g;
162}
163
164int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes)
165{
166 struct nvgpu_vidmem_buf *buf = NULL;
167 struct nvgpu_vidmem_linux *priv;
168 int err, fd;
169
170 /*
171 * This ref is released when the dma_buf is closed.
172 */
173 if (!gk20a_get(g))
174 return -ENODEV;
175
176 priv = nvgpu_kzalloc(g, sizeof(*priv));
177 if (!priv) {
178 err = -ENOMEM;
179 goto fail;
180 }
181
182 buf = nvgpu_vidmem_user_alloc(g, bytes);
183 if (!buf) {
184 err = -ENOMEM;
185 goto fail;
186 }
187
188 priv->dmabuf = gk20a_vidbuf_export(buf);
189 if (IS_ERR(priv->dmabuf)) {
190 err = PTR_ERR(priv->dmabuf);
191 goto fail;
192 }
193
194 buf->priv = priv;
195
196 fd = tegra_alloc_fd(current->files, 1024, O_RDWR);
197 if (fd < 0) {
198 /* ->release frees what we have done */
199 dma_buf_put(priv->dmabuf);
200 return fd;
201 }
202
203 /* fclose() on this drops one ref, freeing the dma buf */
204 fd_install(fd, priv->dmabuf->file);
205
206 vidmem_dbg(g, "Alloced Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
207 priv->dmabuf, buf->mem->size >> 10);
208
209 return fd;
210
211fail:
212 nvgpu_vidmem_buf_free(g, buf);
213 nvgpu_kfree(g, priv);
214 gk20a_put(g);
215
216 vidmem_dbg(g, "Failed to alloc Linux VIDMEM buf: %d", err);
217 return err;
218}
219
220int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
221 void *buffer, u64 offset, u64 size, u32 cmd)
222{
223 struct nvgpu_vidmem_buf *vidmem_buf;
224 struct nvgpu_mem *mem;
225 int err = 0;
226
227 if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM)
228 return -EINVAL;
229
230 vidmem_buf = dmabuf->priv;
231 mem = vidmem_buf->mem;
232
233 switch (cmd) {
234 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ:
235 nvgpu_mem_rd_n(g, mem, offset, buffer, size);
236 break;
237
238 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE:
239 nvgpu_mem_wr_n(g, mem, offset, buffer, size);
240 break;
241
242 default:
243 err = -EINVAL;
244 }
245
246 return err;
247}
248
249void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
250{
251 nvgpu_free(vidmem->allocator,
252 (u64)nvgpu_vidmem_get_page_alloc(vidmem->priv.sgt->sgl));
253 nvgpu_free_sgtable(g, &vidmem->priv.sgt);
254}