aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/nvgpu_mem.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
commitf347fde22f1297e4f022600d201780d5ead78114 (patch)
tree76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/nvgpu_mem.c
parent8340d234d78a7d0f46c11a584de538148b78b7cb (diff)
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/nvgpu_mem.c')
-rw-r--r--include/os/linux/nvgpu_mem.c348
1 files changed, 0 insertions, 348 deletions
diff --git a/include/os/linux/nvgpu_mem.c b/include/os/linux/nvgpu_mem.c
deleted file mode 100644
index d6a3189..0000000
--- a/include/os/linux/nvgpu_mem.c
+++ /dev/null
@@ -1,348 +0,0 @@
1/*
2 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/dma.h>
18#include <nvgpu/gmmu.h>
19#include <nvgpu/nvgpu_mem.h>
20#include <nvgpu/page_allocator.h>
21#include <nvgpu/log.h>
22#include <nvgpu/bug.h>
23#include <nvgpu/enabled.h>
24#include <nvgpu/kmem.h>
25#include <nvgpu/vidmem.h>
26#include <nvgpu/gk20a.h>
27
28#include <nvgpu/linux/dma.h>
29
30#include <linux/vmalloc.h>
31#include <linux/dma-mapping.h>
32
33#include "os_linux.h"
34#include "dmabuf_vidmem.h"
35
36#include "gk20a/mm_gk20a.h"
37#include "platform_gk20a.h"
38
39static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
40{
41 struct device *dev = dev_from_gk20a(g);
42 struct gk20a_platform *platform = gk20a_get_platform(dev);
43 u64 ipa = sg_phys((struct scatterlist *)sgl);
44
45 if (platform->phys_addr)
46 return platform->phys_addr(g, ipa);
47
48 return ipa;
49}
50
51/*
52 * Obtain a SYSMEM address from a Linux SGL. This should eventually go away
53 * and/or become private to this file once all bad usages of Linux SGLs are
54 * cleaned up in the driver.
55 */
56u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
57{
58 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ||
59 !nvgpu_iommuable(g))
60 return g->ops.mm.gpu_phys_addr(g, NULL,
61 __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
62
63 if (sg_dma_address(sgl) == 0)
64 return g->ops.mm.gpu_phys_addr(g, NULL,
65 __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)sgl));
66
67 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
68 return 0;
69
70 return nvgpu_mem_iommu_translate(g, sg_dma_address(sgl));
71}
72
73/*
74 * Obtain the address the GPU should use from the %mem assuming this is a SYSMEM
75 * allocation.
76 */
77static u64 nvgpu_mem_get_addr_sysmem(struct gk20a *g, struct nvgpu_mem *mem)
78{
79 return nvgpu_mem_get_addr_sgl(g, mem->priv.sgt->sgl);
80}
81
82/*
83 * Return the base address of %mem. Handles whether this is a VIDMEM or SYSMEM
84 * allocation.
85 *
86 * Note: this API does not make sense to use for _VIDMEM_ buffers with greater
87 * than one scatterlist chunk. If there's more than one scatterlist chunk then
88 * the buffer will not be contiguous. As such the base address probably isn't
89 * very useful. This is true for SYSMEM as well, if there's no IOMMU.
90 *
91 * However! It _is_ OK to use this on discontiguous sysmem buffers _if_ there's
92 * an IOMMU present and enabled for the GPU.
93 *
94 * %attrs can be NULL. If it is not NULL then it may be inspected to determine
95 * if the address needs to be modified before writing into a PTE.
96 */
97u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem)
98{
99 struct nvgpu_page_alloc *alloc;
100
101 if (mem->aperture == APERTURE_SYSMEM)
102 return nvgpu_mem_get_addr_sysmem(g, mem);
103
104 /*
105 * Otherwise get the vidmem address.
106 */
107 alloc = mem->vidmem_alloc;
108
109 /* This API should not be used with > 1 chunks */
110 WARN_ON(alloc->nr_chunks != 1);
111
112 return alloc->base;
113}
114
115/*
116 * This should only be used on contiguous buffers regardless of whether
117 * there's an IOMMU present/enabled. This applies to both SYSMEM and
118 * VIDMEM.
119 */
120u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem)
121{
122 /*
123 * For a VIDMEM buf, this is identical to simply get_addr() so just fall
124 * back to that.
125 */
126 if (mem->aperture == APERTURE_VIDMEM)
127 return nvgpu_mem_get_addr(g, mem);
128
129 return __nvgpu_sgl_phys(g, (struct nvgpu_sgl *)mem->priv.sgt->sgl);
130}
131
132/*
133 * Be careful how you use this! You are responsible for correctly freeing this
134 * memory.
135 */
136int nvgpu_mem_create_from_mem(struct gk20a *g,
137 struct nvgpu_mem *dest, struct nvgpu_mem *src,
138 u64 start_page, int nr_pages)
139{
140 int ret;
141 u64 start = start_page * PAGE_SIZE;
142 u64 size = nr_pages * PAGE_SIZE;
143 dma_addr_t new_iova;
144
145 if (src->aperture != APERTURE_SYSMEM)
146 return -EINVAL;
147
148 /* Some silly things a caller might do... */
149 if (size > src->size)
150 return -EINVAL;
151 if ((start + size) > src->size)
152 return -EINVAL;
153
154 dest->mem_flags = src->mem_flags | NVGPU_MEM_FLAG_SHADOW_COPY;
155 dest->aperture = src->aperture;
156 dest->skip_wmb = src->skip_wmb;
157 dest->size = size;
158
159 /*
160 * Re-use the CPU mapping only if the mapping was made by the DMA API.
161 *
162 * Bug 2040115: the DMA API wrapper makes the mapping that we should
163 * re-use.
164 */
165 if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) ||
166 nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
167 dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
168
169 dest->priv.pages = src->priv.pages + start_page;
170 dest->priv.flags = src->priv.flags;
171
172 new_iova = sg_dma_address(src->priv.sgt->sgl) ?
173 sg_dma_address(src->priv.sgt->sgl) + start : 0;
174
175 /*
176 * Make a new SG table that is based only on the subset of pages that
177 * is passed to us. This table gets freed by the dma free routines.
178 */
179 if (src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)
180 ret = nvgpu_get_sgtable_from_pages(g, &dest->priv.sgt,
181 src->priv.pages + start_page,
182 new_iova, size);
183 else
184 ret = nvgpu_get_sgtable(g, &dest->priv.sgt, dest->cpu_va,
185 new_iova, size);
186
187 return ret;
188}
189
190int __nvgpu_mem_create_from_pages(struct gk20a *g, struct nvgpu_mem *dest,
191 struct page **pages, int nr_pages)
192{
193 struct sg_table *sgt;
194 struct page **our_pages =
195 nvgpu_kmalloc(g, sizeof(struct page *) * nr_pages);
196
197 if (!our_pages)
198 return -ENOMEM;
199
200 memcpy(our_pages, pages, sizeof(struct page *) * nr_pages);
201
202 if (nvgpu_get_sgtable_from_pages(g, &sgt, pages, 0,
203 nr_pages * PAGE_SIZE)) {
204 nvgpu_kfree(g, our_pages);
205 return -ENOMEM;
206 }
207
208 /*
209 * If we are making an SGT from physical pages we can be reasonably
210 * certain that this should bypass the SMMU - thus we set the DMA (aka
211 * IOVA) address to 0. This tells the GMMU mapping code to not make a
212 * mapping directed to the SMMU.
213 */
214 sg_dma_address(sgt->sgl) = 0;
215
216 dest->mem_flags = __NVGPU_MEM_FLAG_NO_DMA;
217 dest->aperture = APERTURE_SYSMEM;
218 dest->skip_wmb = 0;
219 dest->size = PAGE_SIZE * nr_pages;
220
221 dest->priv.flags = 0;
222 dest->priv.pages = our_pages;
223 dest->priv.sgt = sgt;
224
225 return 0;
226}
227
228#ifdef CONFIG_TEGRA_GK20A_NVHOST
229int __nvgpu_mem_create_from_phys(struct gk20a *g, struct nvgpu_mem *dest,
230 u64 src_phys, int nr_pages)
231{
232 struct page **pages =
233 nvgpu_kmalloc(g, sizeof(struct page *) * nr_pages);
234 int i, ret = 0;
235
236 if (!pages)
237 return -ENOMEM;
238
239 for (i = 0; i < nr_pages; i++)
240 pages[i] = phys_to_page(src_phys + PAGE_SIZE * i);
241
242 ret = __nvgpu_mem_create_from_pages(g, dest, pages, nr_pages);
243 nvgpu_kfree(g, pages);
244
245 return ret;
246}
247#endif
248
249static struct nvgpu_sgl *nvgpu_mem_linux_sgl_next(struct nvgpu_sgl *sgl)
250{
251 return (struct nvgpu_sgl *)sg_next((struct scatterlist *)sgl);
252}
253
254static u64 nvgpu_mem_linux_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
255{
256 return (u64)__nvgpu_sgl_phys(g, sgl);
257}
258
259static u64 nvgpu_mem_linux_sgl_dma(struct nvgpu_sgl *sgl)
260{
261 return (u64)sg_dma_address((struct scatterlist *)sgl);
262}
263
264static u64 nvgpu_mem_linux_sgl_length(struct nvgpu_sgl *sgl)
265{
266 return (u64)((struct scatterlist *)sgl)->length;
267}
268
269static u64 nvgpu_mem_linux_sgl_gpu_addr(struct gk20a *g,
270 struct nvgpu_sgl *sgl,
271 struct nvgpu_gmmu_attrs *attrs)
272{
273 if (sg_dma_address((struct scatterlist *)sgl) == 0)
274 return g->ops.mm.gpu_phys_addr(g, attrs,
275 __nvgpu_sgl_phys(g, sgl));
276
277 if (sg_dma_address((struct scatterlist *)sgl) == DMA_ERROR_CODE)
278 return 0;
279
280 return nvgpu_mem_iommu_translate(g,
281 sg_dma_address((struct scatterlist *)sgl));
282}
283
284static bool nvgpu_mem_linux_sgt_iommuable(struct gk20a *g,
285 struct nvgpu_sgt *sgt)
286{
287 if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG))
288 return false;
289 return true;
290}
291
292static void nvgpu_mem_linux_sgl_free(struct gk20a *g, struct nvgpu_sgt *sgt)
293{
294 /*
295 * Free this SGT. All we do is free the passed SGT. The actual Linux
296 * SGT/SGL needs to be freed separately.
297 */
298 nvgpu_kfree(g, sgt);
299}
300
301static const struct nvgpu_sgt_ops nvgpu_linux_sgt_ops = {
302 .sgl_next = nvgpu_mem_linux_sgl_next,
303 .sgl_phys = nvgpu_mem_linux_sgl_phys,
304 .sgl_dma = nvgpu_mem_linux_sgl_dma,
305 .sgl_length = nvgpu_mem_linux_sgl_length,
306 .sgl_gpu_addr = nvgpu_mem_linux_sgl_gpu_addr,
307 .sgt_iommuable = nvgpu_mem_linux_sgt_iommuable,
308 .sgt_free = nvgpu_mem_linux_sgl_free,
309};
310
311static struct nvgpu_sgt *__nvgpu_mem_get_sgl_from_vidmem(
312 struct gk20a *g,
313 struct scatterlist *linux_sgl)
314{
315 struct nvgpu_page_alloc *vidmem_alloc;
316
317 vidmem_alloc = nvgpu_vidmem_get_page_alloc(linux_sgl);
318 if (!vidmem_alloc)
319 return NULL;
320
321 return &vidmem_alloc->sgt;
322}
323
324struct nvgpu_sgt *nvgpu_linux_sgt_create(struct gk20a *g, struct sg_table *sgt)
325{
326 struct nvgpu_sgt *nvgpu_sgt;
327 struct scatterlist *linux_sgl = sgt->sgl;
328
329 if (nvgpu_addr_is_vidmem_page_alloc(sg_dma_address(linux_sgl)))
330 return __nvgpu_mem_get_sgl_from_vidmem(g, linux_sgl);
331
332 nvgpu_sgt = nvgpu_kzalloc(g, sizeof(*nvgpu_sgt));
333 if (!nvgpu_sgt)
334 return NULL;
335
336 nvgpu_log(g, gpu_dbg_sgl, "Making Linux SGL!");
337
338 nvgpu_sgt->sgl = (struct nvgpu_sgl *)linux_sgl;
339 nvgpu_sgt->ops = &nvgpu_linux_sgt_ops;
340
341 return nvgpu_sgt;
342}
343
344struct nvgpu_sgt *nvgpu_sgt_create_from_mem(struct gk20a *g,
345 struct nvgpu_mem *mem)
346{
347 return nvgpu_linux_sgt_create(g, mem->priv.sgt);
348}