aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/dmabuf_vidmem.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/dmabuf_vidmem.c')
-rw-r--r--include/os/linux/dmabuf_vidmem.c269
1 files changed, 269 insertions, 0 deletions
diff --git a/include/os/linux/dmabuf_vidmem.c b/include/os/linux/dmabuf_vidmem.c
new file mode 100644
index 0000000..bada5dc
--- /dev/null
+++ b/include/os/linux/dmabuf_vidmem.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/dma-buf.h>
18#include <linux/version.h>
19#include <uapi/linux/nvgpu.h>
20
21#ifdef CONFIG_NVGPU_USE_TEGRA_ALLOC_FD
22#include <linux/platform/tegra/tegra_fd.h>
23#endif
24
25#include <nvgpu/dma.h>
26#include <nvgpu/enabled.h>
27#include <nvgpu/vidmem.h>
28#include <nvgpu/nvgpu_mem.h>
29#include <nvgpu/page_allocator.h>
30#include <nvgpu/gk20a.h>
31
32#include <nvgpu/linux/vm.h>
33#include <nvgpu/linux/dma.h>
34
35#include "gk20a/mm_gk20a.h"
36#include "dmabuf_vidmem.h"
37
38bool nvgpu_addr_is_vidmem_page_alloc(u64 addr)
39{
40 return !!(addr & 1ULL);
41}
42
43void nvgpu_vidmem_set_page_alloc(struct scatterlist *sgl, u64 addr)
44{
45 /* set bit 0 to indicate vidmem allocation */
46 sg_dma_address(sgl) = (addr | 1ULL);
47}
48
49struct nvgpu_page_alloc *nvgpu_vidmem_get_page_alloc(struct scatterlist *sgl)
50{
51 u64 addr;
52
53 addr = sg_dma_address(sgl);
54
55 if (nvgpu_addr_is_vidmem_page_alloc(addr))
56 addr = addr & ~1ULL;
57 else
58 WARN_ON(1);
59
60 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
61}
62
63static struct sg_table *gk20a_vidbuf_map_dma_buf(
64 struct dma_buf_attachment *attach, enum dma_data_direction dir)
65{
66 struct nvgpu_vidmem_buf *buf = attach->dmabuf->priv;
67
68 return buf->mem->priv.sgt;
69}
70
71static void gk20a_vidbuf_unmap_dma_buf(struct dma_buf_attachment *attach,
72 struct sg_table *sgt,
73 enum dma_data_direction dir)
74{
75}
76
77static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
78{
79 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
80 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
81 struct gk20a *g = buf->g;
82
83 vidmem_dbg(g, "Releasing Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
84 dmabuf, buf->mem->size >> 10);
85
86 if (linux_buf && linux_buf->dmabuf_priv_delete)
87 linux_buf->dmabuf_priv_delete(linux_buf->dmabuf_priv);
88
89 nvgpu_kfree(g, linux_buf);
90 nvgpu_vidmem_buf_free(g, buf);
91
92 gk20a_put(g);
93}
94
95static void *gk20a_vidbuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
96{
97 WARN_ON("Not supported");
98 return NULL;
99}
100
101static void *gk20a_vidbuf_kmap_atomic(struct dma_buf *dmabuf,
102 unsigned long page_num)
103{
104 WARN_ON("Not supported");
105 return NULL;
106}
107
108static int gk20a_vidbuf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
109{
110 return -EINVAL;
111}
112
113static int gk20a_vidbuf_set_private(struct dma_buf *dmabuf,
114 struct device *dev, void *priv, void (*delete)(void *priv))
115{
116 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
117 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
118
119 linux_buf->dmabuf_priv = priv;
120 linux_buf->dmabuf_priv_delete = delete;
121
122 return 0;
123}
124
125static void *gk20a_vidbuf_get_private(struct dma_buf *dmabuf,
126 struct device *dev)
127{
128 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
129 struct nvgpu_vidmem_linux *linux_buf = buf->priv;
130
131 return linux_buf->dmabuf_priv;
132}
133
134static const struct dma_buf_ops gk20a_vidbuf_ops = {
135 .map_dma_buf = gk20a_vidbuf_map_dma_buf,
136 .unmap_dma_buf = gk20a_vidbuf_unmap_dma_buf,
137 .release = gk20a_vidbuf_release,
138#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)
139 .map_atomic = gk20a_vidbuf_kmap_atomic,
140 .map = gk20a_vidbuf_kmap,
141#else
142 .kmap_atomic = gk20a_vidbuf_kmap_atomic,
143 .kmap = gk20a_vidbuf_kmap,
144#endif
145 .mmap = gk20a_vidbuf_mmap,
146 .set_drvdata = gk20a_vidbuf_set_private,
147 .get_drvdata = gk20a_vidbuf_get_private,
148};
149
150static struct dma_buf *gk20a_vidbuf_export(struct nvgpu_vidmem_buf *buf)
151{
152 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
153
154 exp_info.priv = buf;
155 exp_info.ops = &gk20a_vidbuf_ops;
156 exp_info.size = buf->mem->size;
157 exp_info.flags = O_RDWR;
158
159 return dma_buf_export(&exp_info);
160}
161
162struct gk20a *nvgpu_vidmem_buf_owner(struct dma_buf *dmabuf)
163{
164 struct nvgpu_vidmem_buf *buf = dmabuf->priv;
165
166 if (dmabuf->ops != &gk20a_vidbuf_ops)
167 return NULL;
168
169 return buf->g;
170}
171
172int nvgpu_vidmem_export_linux(struct gk20a *g, size_t bytes)
173{
174 struct nvgpu_vidmem_buf *buf = NULL;
175 struct nvgpu_vidmem_linux *priv;
176 int err, fd;
177
178 /*
179 * This ref is released when the dma_buf is closed.
180 */
181 if (!gk20a_get(g))
182 return -ENODEV;
183
184 vidmem_dbg(g, "Allocating vidmem buf: %zu bytes", bytes);
185
186 priv = nvgpu_kzalloc(g, sizeof(*priv));
187 if (!priv) {
188 err = -ENOMEM;
189 goto fail;
190 }
191
192 buf = nvgpu_vidmem_user_alloc(g, bytes);
193 if (IS_ERR(buf)) {
194 err = PTR_ERR(buf);
195 goto fail;
196 }
197
198 priv->dmabuf = gk20a_vidbuf_export(buf);
199 if (IS_ERR(priv->dmabuf)) {
200 err = PTR_ERR(priv->dmabuf);
201 goto fail;
202 }
203
204 buf->priv = priv;
205
206#ifdef CONFIG_NVGPU_USE_TEGRA_ALLOC_FD
207 fd = tegra_alloc_fd(current->files, 1024, O_RDWR);
208#else
209 fd = get_unused_fd_flags(O_RDWR);
210#endif
211 if (fd < 0) {
212 /* ->release frees what we have done */
213 dma_buf_put(priv->dmabuf);
214 return fd;
215 }
216
217 /* fclose() on this drops one ref, freeing the dma buf */
218 fd_install(fd, priv->dmabuf->file);
219
220 vidmem_dbg(g, "Alloced Linux VIDMEM buf: dmabuf=0x%p size=%zuKB",
221 priv->dmabuf, buf->mem->size >> 10);
222
223 return fd;
224
225fail:
226 nvgpu_vidmem_buf_free(g, buf);
227 nvgpu_kfree(g, priv);
228 gk20a_put(g);
229
230 vidmem_dbg(g, "Failed to alloc Linux VIDMEM buf: %d", err);
231 return err;
232}
233
234int nvgpu_vidmem_buf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
235 void *buffer, u64 offset, u64 size, u32 cmd)
236{
237 struct nvgpu_vidmem_buf *vidmem_buf;
238 struct nvgpu_mem *mem;
239 int err = 0;
240
241 if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM)
242 return -EINVAL;
243
244 vidmem_buf = dmabuf->priv;
245 mem = vidmem_buf->mem;
246
247 nvgpu_speculation_barrier();
248 switch (cmd) {
249 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ:
250 nvgpu_mem_rd_n(g, mem, offset, buffer, size);
251 break;
252
253 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE:
254 nvgpu_mem_wr_n(g, mem, offset, buffer, size);
255 break;
256
257 default:
258 err = -EINVAL;
259 }
260
261 return err;
262}
263
264void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem)
265{
266 nvgpu_free(vidmem->allocator,
267 (u64)nvgpu_vidmem_get_page_alloc(vidmem->priv.sgt->sgl));
268 nvgpu_free_sgtable(g, &vidmem->priv.sgt);
269}