diff options
author | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
---|---|---|
committer | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
commit | f347fde22f1297e4f022600d201780d5ead78114 (patch) | |
tree | 76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/vm.c | |
parent | 8340d234d78a7d0f46c11a584de538148b78b7cb (diff) |
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/vm.c')
-rw-r--r-- | include/os/linux/vm.c | 358 |
1 files changed, 0 insertions, 358 deletions
diff --git a/include/os/linux/vm.c b/include/os/linux/vm.c deleted file mode 100644 index 8956cce..0000000 --- a/include/os/linux/vm.c +++ /dev/null | |||
@@ -1,358 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | |||
17 | #include <linux/dma-buf.h> | ||
18 | #include <linux/scatterlist.h> | ||
19 | #include <uapi/linux/nvgpu.h> | ||
20 | |||
21 | #include <nvgpu/log.h> | ||
22 | #include <nvgpu/lock.h> | ||
23 | #include <nvgpu/rbtree.h> | ||
24 | #include <nvgpu/vm_area.h> | ||
25 | #include <nvgpu/nvgpu_mem.h> | ||
26 | #include <nvgpu/page_allocator.h> | ||
27 | #include <nvgpu/vidmem.h> | ||
28 | #include <nvgpu/utils.h> | ||
29 | #include <nvgpu/gk20a.h> | ||
30 | |||
31 | #include <nvgpu/linux/vm.h> | ||
32 | #include <nvgpu/linux/nvgpu_mem.h> | ||
33 | |||
34 | #include "gk20a/mm_gk20a.h" | ||
35 | |||
36 | #include "platform_gk20a.h" | ||
37 | #include "os_linux.h" | ||
38 | #include "dmabuf.h" | ||
39 | #include "dmabuf_vidmem.h" | ||
40 | |||
41 | static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags) | ||
42 | { | ||
43 | u32 core_flags = 0; | ||
44 | |||
45 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) | ||
46 | core_flags |= NVGPU_VM_MAP_FIXED_OFFSET; | ||
47 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) | ||
48 | core_flags |= NVGPU_VM_MAP_CACHEABLE; | ||
49 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT) | ||
50 | core_flags |= NVGPU_VM_MAP_IO_COHERENT; | ||
51 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE) | ||
52 | core_flags |= NVGPU_VM_MAP_UNMAPPED_PTE; | ||
53 | if (!nvgpu_is_enabled(g, NVGPU_DISABLE_L3_SUPPORT)) { | ||
54 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC) | ||
55 | core_flags |= NVGPU_VM_MAP_L3_ALLOC; | ||
56 | } | ||
57 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) | ||
58 | core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL; | ||
59 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC) | ||
60 | core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC; | ||
61 | |||
62 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS) | ||
63 | nvgpu_warn(g, "Ignoring deprecated flag: " | ||
64 | "NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS"); | ||
65 | |||
66 | return core_flags; | ||
67 | } | ||
68 | |||
69 | static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse( | ||
70 | struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind) | ||
71 | { | ||
72 | struct nvgpu_rbtree_node *node = NULL; | ||
73 | struct nvgpu_rbtree_node *root = vm->mapped_buffers; | ||
74 | |||
75 | nvgpu_rbtree_enum_start(0, &node, root); | ||
76 | |||
77 | while (node) { | ||
78 | struct nvgpu_mapped_buf *mapped_buffer = | ||
79 | mapped_buffer_from_rbtree_node(node); | ||
80 | |||
81 | if (mapped_buffer->os_priv.dmabuf == dmabuf && | ||
82 | mapped_buffer->kind == kind) | ||
83 | return mapped_buffer; | ||
84 | |||
85 | nvgpu_rbtree_enum_next(&node, node); | ||
86 | } | ||
87 | |||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, | ||
92 | struct dma_buf **dmabuf, | ||
93 | u64 *offset) | ||
94 | { | ||
95 | struct nvgpu_mapped_buf *mapped_buffer; | ||
96 | struct gk20a *g = gk20a_from_vm(vm); | ||
97 | |||
98 | nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va); | ||
99 | |||
100 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
101 | |||
102 | mapped_buffer = __nvgpu_vm_find_mapped_buf_range(vm, gpu_va); | ||
103 | if (!mapped_buffer) { | ||
104 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | *dmabuf = mapped_buffer->os_priv.dmabuf; | ||
109 | *offset = gpu_va - mapped_buffer->addr; | ||
110 | |||
111 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | u64 nvgpu_os_buf_get_size(struct nvgpu_os_buffer *os_buf) | ||
117 | { | ||
118 | return os_buf->dmabuf->size; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * vm->update_gmmu_lock must be held. This checks to see if we already have | ||
123 | * mapped the passed buffer into this VM. If so, just return the existing | ||
124 | * mapping address. | ||
125 | */ | ||
126 | struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm, | ||
127 | struct nvgpu_os_buffer *os_buf, | ||
128 | u64 map_addr, | ||
129 | u32 flags, | ||
130 | int kind) | ||
131 | { | ||
132 | struct gk20a *g = gk20a_from_vm(vm); | ||
133 | struct nvgpu_mapped_buf *mapped_buffer = NULL; | ||
134 | |||
135 | if (flags & NVGPU_VM_MAP_FIXED_OFFSET) { | ||
136 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr); | ||
137 | if (!mapped_buffer) | ||
138 | return NULL; | ||
139 | |||
140 | if (mapped_buffer->os_priv.dmabuf != os_buf->dmabuf || | ||
141 | mapped_buffer->kind != (u32)kind) | ||
142 | return NULL; | ||
143 | } else { | ||
144 | mapped_buffer = | ||
145 | __nvgpu_vm_find_mapped_buf_reverse(vm, | ||
146 | os_buf->dmabuf, | ||
147 | kind); | ||
148 | if (!mapped_buffer) | ||
149 | return NULL; | ||
150 | } | ||
151 | |||
152 | if (mapped_buffer->flags != flags) | ||
153 | return NULL; | ||
154 | |||
155 | /* | ||
156 | * If we find the mapping here then that means we have mapped it already | ||
157 | * and the prior pin and get must be undone. | ||
158 | */ | ||
159 | gk20a_mm_unpin(os_buf->dev, os_buf->dmabuf, os_buf->attachment, | ||
160 | mapped_buffer->os_priv.sgt); | ||
161 | dma_buf_put(os_buf->dmabuf); | ||
162 | |||
163 | nvgpu_log(g, gpu_dbg_map, | ||
164 | "gv: 0x%04x_%08x + 0x%-7zu " | ||
165 | "[dma: 0x%010llx, pa: 0x%010llx] " | ||
166 | "pgsz=%-3dKb as=%-2d " | ||
167 | "flags=0x%x apt=%s (reused)", | ||
168 | u64_hi32(mapped_buffer->addr), u64_lo32(mapped_buffer->addr), | ||
169 | os_buf->dmabuf->size, | ||
170 | (u64)sg_dma_address(mapped_buffer->os_priv.sgt->sgl), | ||
171 | (u64)sg_phys(mapped_buffer->os_priv.sgt->sgl), | ||
172 | vm->gmmu_page_sizes[mapped_buffer->pgsz_idx] >> 10, | ||
173 | vm_aspace_id(vm), | ||
174 | mapped_buffer->flags, | ||
175 | nvgpu_aperture_str(g, | ||
176 | gk20a_dmabuf_aperture(g, os_buf->dmabuf))); | ||
177 | |||
178 | return mapped_buffer; | ||
179 | } | ||
180 | |||
181 | int nvgpu_vm_map_linux(struct vm_gk20a *vm, | ||
182 | struct dma_buf *dmabuf, | ||
183 | u64 map_addr, | ||
184 | u32 flags, | ||
185 | u32 page_size, | ||
186 | s16 compr_kind, | ||
187 | s16 incompr_kind, | ||
188 | int rw_flag, | ||
189 | u64 buffer_offset, | ||
190 | u64 mapping_size, | ||
191 | struct vm_gk20a_mapping_batch *batch, | ||
192 | u64 *gpu_va) | ||
193 | { | ||
194 | struct gk20a *g = gk20a_from_vm(vm); | ||
195 | struct device *dev = dev_from_gk20a(g); | ||
196 | struct nvgpu_os_buffer os_buf; | ||
197 | struct sg_table *sgt; | ||
198 | struct nvgpu_sgt *nvgpu_sgt = NULL; | ||
199 | struct nvgpu_mapped_buf *mapped_buffer = NULL; | ||
200 | struct dma_buf_attachment *attachment; | ||
201 | int err = 0; | ||
202 | |||
203 | sgt = gk20a_mm_pin(dev, dmabuf, &attachment); | ||
204 | if (IS_ERR(sgt)) { | ||
205 | nvgpu_warn(g, "Failed to pin dma_buf!"); | ||
206 | return PTR_ERR(sgt); | ||
207 | } | ||
208 | os_buf.dmabuf = dmabuf; | ||
209 | os_buf.attachment = attachment; | ||
210 | os_buf.dev = dev; | ||
211 | |||
212 | if (gk20a_dmabuf_aperture(g, dmabuf) == APERTURE_INVALID) { | ||
213 | err = -EINVAL; | ||
214 | goto clean_up; | ||
215 | } | ||
216 | |||
217 | nvgpu_sgt = nvgpu_linux_sgt_create(g, sgt); | ||
218 | if (!nvgpu_sgt) { | ||
219 | err = -ENOMEM; | ||
220 | goto clean_up; | ||
221 | } | ||
222 | |||
223 | mapped_buffer = nvgpu_vm_map(vm, | ||
224 | &os_buf, | ||
225 | nvgpu_sgt, | ||
226 | map_addr, | ||
227 | mapping_size, | ||
228 | buffer_offset, | ||
229 | rw_flag, | ||
230 | flags, | ||
231 | compr_kind, | ||
232 | incompr_kind, | ||
233 | batch, | ||
234 | gk20a_dmabuf_aperture(g, dmabuf)); | ||
235 | |||
236 | nvgpu_sgt_free(g, nvgpu_sgt); | ||
237 | |||
238 | if (IS_ERR(mapped_buffer)) { | ||
239 | err = PTR_ERR(mapped_buffer); | ||
240 | goto clean_up; | ||
241 | } | ||
242 | |||
243 | mapped_buffer->os_priv.dmabuf = dmabuf; | ||
244 | mapped_buffer->os_priv.attachment = attachment; | ||
245 | mapped_buffer->os_priv.sgt = sgt; | ||
246 | |||
247 | *gpu_va = mapped_buffer->addr; | ||
248 | return 0; | ||
249 | |||
250 | clean_up: | ||
251 | gk20a_mm_unpin(dev, dmabuf, attachment, sgt); | ||
252 | |||
253 | return err; | ||
254 | } | ||
255 | |||
256 | int nvgpu_vm_map_buffer(struct vm_gk20a *vm, | ||
257 | int dmabuf_fd, | ||
258 | u64 *map_addr, | ||
259 | u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/ | ||
260 | u32 page_size, | ||
261 | s16 compr_kind, | ||
262 | s16 incompr_kind, | ||
263 | u64 buffer_offset, | ||
264 | u64 mapping_size, | ||
265 | struct vm_gk20a_mapping_batch *batch) | ||
266 | { | ||
267 | struct gk20a *g = gk20a_from_vm(vm); | ||
268 | struct dma_buf *dmabuf; | ||
269 | u64 ret_va; | ||
270 | int err = 0; | ||
271 | |||
272 | /* get ref to the mem handle (released on unmap_locked) */ | ||
273 | dmabuf = dma_buf_get(dmabuf_fd); | ||
274 | if (IS_ERR(dmabuf)) { | ||
275 | nvgpu_warn(g, "%s: fd %d is not a dmabuf", | ||
276 | __func__, dmabuf_fd); | ||
277 | return PTR_ERR(dmabuf); | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * For regular maps we do not accept either an input address or a | ||
282 | * buffer_offset. | ||
283 | */ | ||
284 | if (!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) && | ||
285 | (buffer_offset || *map_addr)) { | ||
286 | nvgpu_err(g, | ||
287 | "Regular map with addr/buf offset is not supported!"); | ||
288 | dma_buf_put(dmabuf); | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Map size is always buffer size for non fixed mappings. As such map | ||
294 | * size should be left as zero by userspace for non-fixed maps. | ||
295 | */ | ||
296 | if (mapping_size && !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { | ||
297 | nvgpu_err(g, "map_size && non-fixed-mapping!"); | ||
298 | dma_buf_put(dmabuf); | ||
299 | return -EINVAL; | ||
300 | } | ||
301 | |||
302 | /* verify that we're not overflowing the buffer, i.e. | ||
303 | * (buffer_offset + mapping_size) > dmabuf->size. | ||
304 | * | ||
305 | * Since buffer_offset + mapping_size could overflow, first check | ||
306 | * that mapping size < dmabuf_size, at which point we can subtract | ||
307 | * mapping_size from both sides for the final comparison. | ||
308 | */ | ||
309 | if ((mapping_size > dmabuf->size) || | ||
310 | (buffer_offset > (dmabuf->size - mapping_size))) { | ||
311 | nvgpu_err(g, | ||
312 | "buf size %llx < (offset(%llx) + map_size(%llx))", | ||
313 | (u64)dmabuf->size, buffer_offset, mapping_size); | ||
314 | dma_buf_put(dmabuf); | ||
315 | return -EINVAL; | ||
316 | } | ||
317 | |||
318 | err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev_from_vm(vm)); | ||
319 | if (err) { | ||
320 | dma_buf_put(dmabuf); | ||
321 | return err; | ||
322 | } | ||
323 | |||
324 | err = nvgpu_vm_map_linux(vm, dmabuf, *map_addr, | ||
325 | nvgpu_vm_translate_linux_flags(g, flags), | ||
326 | page_size, | ||
327 | compr_kind, incompr_kind, | ||
328 | gk20a_mem_flag_none, | ||
329 | buffer_offset, | ||
330 | mapping_size, | ||
331 | batch, | ||
332 | &ret_va); | ||
333 | |||
334 | if (!err) | ||
335 | *map_addr = ret_va; | ||
336 | else | ||
337 | dma_buf_put(dmabuf); | ||
338 | |||
339 | return err; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * This is the function call-back for freeing OS specific components of an | ||
344 | * nvgpu_mapped_buf. This should most likely never be called outside of the | ||
345 | * core MM framework! | ||
346 | * | ||
347 | * Note: the VM lock will be held. | ||
348 | */ | ||
349 | void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer) | ||
350 | { | ||
351 | struct vm_gk20a *vm = mapped_buffer->vm; | ||
352 | |||
353 | gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->os_priv.dmabuf, | ||
354 | mapped_buffer->os_priv.attachment, | ||
355 | mapped_buffer->os_priv.sgt); | ||
356 | |||
357 | dma_buf_put(mapped_buffer->os_priv.dmabuf); | ||
358 | } | ||