diff options
Diffstat (limited to 'include/nvgpu/vm.h')
-rw-r--r-- | include/nvgpu/vm.h | 330 |
1 files changed, 0 insertions, 330 deletions
diff --git a/include/nvgpu/vm.h b/include/nvgpu/vm.h deleted file mode 100644 index 3867c74..0000000 --- a/include/nvgpu/vm.h +++ /dev/null | |||
@@ -1,330 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef NVGPU_VM_H | ||
24 | #define NVGPU_VM_H | ||
25 | |||
26 | #include <nvgpu/kref.h> | ||
27 | #include <nvgpu/list.h> | ||
28 | #include <nvgpu/rbtree.h> | ||
29 | #include <nvgpu/types.h> | ||
30 | #include <nvgpu/gmmu.h> | ||
31 | #include <nvgpu/nvgpu_mem.h> | ||
32 | #include <nvgpu/allocator.h> | ||
33 | |||
34 | struct vm_gk20a; | ||
35 | struct nvgpu_vm_area; | ||
36 | struct gk20a_comptag_allocator; | ||
37 | |||
38 | /* | ||
39 | * Defined by each OS. Allows the common VM code do things to the OS specific | ||
40 | * buffer structures. | ||
41 | */ | ||
42 | struct nvgpu_os_buffer; | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | #include <nvgpu/linux/vm.h> | ||
46 | #elif defined(__NVGPU_POSIX__) | ||
47 | #include <nvgpu/posix/vm.h> | ||
48 | #else | ||
49 | /* QNX include goes here. */ | ||
50 | #include <nvgpu_rmos/include/vm.h> | ||
51 | #endif | ||
52 | |||
53 | /** | ||
54 | * This header contains the OS agnostic APIs for dealing with VMs. Most of the | ||
55 | * VM implementation is system specific - it must translate from a platform's | ||
56 | * representation of DMA'able memory to our nvgpu_mem notion. | ||
57 | * | ||
58 | * However, some stuff is platform agnostic. VM ref-counting and the VM struct | ||
59 | * itself are platform agnostic. Also, the initialization and destruction of | ||
60 | * VMs is the same across all platforms (for now). | ||
61 | * | ||
62 | * VM Architecture: | ||
63 | * ---------------- | ||
64 | * | ||
65 | * The VM managment in nvgpu is split up as follows: a vm_gk20a struct which | ||
66 | * defines an address space. Each address space is a set of page tables and a | ||
67 | * GPU Virtual Address (GVA) allocator. Any number of channels may bind to a VM. | ||
68 | * | ||
69 | * +----+ +----+ +----+ +-----+ +-----+ | ||
70 | * | C1 | | C2 | ... | Cn | | VM1 | ... | VMn | | ||
71 | * +-+--+ +-+--+ +-+--+ +--+--+ +--+--+ | ||
72 | * | | | | | | ||
73 | * | | +----->-----+ | | ||
74 | * | +---------------->-----+ | | ||
75 | * +------------------------>-----------------+ | ||
76 | * | ||
77 | * Each VM also manages a set of mapped buffers (struct nvgpu_mapped_buf) | ||
78 | * which corresponds to _user space_ buffers which have been mapped into this VM. | ||
79 | * Kernel space mappings (created by nvgpu_gmmu_map()) are not tracked by VMs. | ||
80 | * This may be an architectural bug, but for now it seems to be OK. VMs can be | ||
81 | * closed in various ways - refs counts hitting zero, direct calls to the remove | ||
82 | * routine, etc. Note: this is going to change. VM cleanup is going to be | ||
83 | * homogonized around ref-counts. When a VM is closed all mapped buffers in the | ||
84 | * VM are unmapped from the GMMU. This means that those mappings will no longer | ||
85 | * be valid and any subsequent access by the GPU will fault. That means one must | ||
86 | * ensure the VM is not in use before closing it. | ||
87 | * | ||
88 | * VMs may also contain VM areas (struct nvgpu_vm_area) which are created for | ||
89 | * the purpose of sparse and/or fixed mappings. If userspace wishes to create a | ||
90 | * fixed mapping it must first create a VM area - either with a fixed address or | ||
91 | * not. VM areas are reserved - other mapping operations will not use the space. | ||
92 | * Userspace may then create fixed mappings within that VM area. | ||
93 | */ | ||
94 | |||
95 | /* map/unmap batch state */ | ||
96 | struct vm_gk20a_mapping_batch { | ||
97 | bool gpu_l2_flushed; | ||
98 | bool need_tlb_invalidate; | ||
99 | }; | ||
100 | |||
101 | struct nvgpu_mapped_buf { | ||
102 | struct vm_gk20a *vm; | ||
103 | struct nvgpu_vm_area *vm_area; | ||
104 | |||
105 | struct nvgpu_ref ref; | ||
106 | |||
107 | struct nvgpu_rbtree_node node; | ||
108 | struct nvgpu_list_node buffer_list; | ||
109 | u64 addr; | ||
110 | u64 size; | ||
111 | |||
112 | u32 pgsz_idx; | ||
113 | |||
114 | u32 flags; | ||
115 | u32 kind; | ||
116 | bool va_allocated; | ||
117 | |||
118 | /* | ||
119 | * Separate from the nvgpu_os_buffer struct to clearly distinguish | ||
120 | * lifetime. A nvgpu_mapped_buf_priv will _always_ be wrapped by a | ||
121 | * struct nvgpu_mapped_buf; however, there are times when a struct | ||
122 | * nvgpu_os_buffer would be separate. This aims to prevent dangerous | ||
123 | * usage of container_of() or the like in OS code. | ||
124 | */ | ||
125 | struct nvgpu_mapped_buf_priv os_priv; | ||
126 | }; | ||
127 | |||
128 | static inline struct nvgpu_mapped_buf * | ||
129 | nvgpu_mapped_buf_from_buffer_list(struct nvgpu_list_node *node) | ||
130 | { | ||
131 | return (struct nvgpu_mapped_buf *) | ||
132 | ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf, | ||
133 | buffer_list)); | ||
134 | } | ||
135 | |||
136 | static inline struct nvgpu_mapped_buf * | ||
137 | mapped_buffer_from_rbtree_node(struct nvgpu_rbtree_node *node) | ||
138 | { | ||
139 | return (struct nvgpu_mapped_buf *) | ||
140 | ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf, node)); | ||
141 | } | ||
142 | |||
143 | struct vm_gk20a { | ||
144 | struct mm_gk20a *mm; | ||
145 | struct gk20a_as_share *as_share; /* as_share this represents */ | ||
146 | char name[20]; | ||
147 | |||
148 | u64 va_start; | ||
149 | u64 va_limit; | ||
150 | |||
151 | int num_user_mapped_buffers; | ||
152 | |||
153 | bool big_pages; /* enable large page support */ | ||
154 | bool enable_ctag; | ||
155 | bool guest_managed; /* whether the vm addr space is managed by guest */ | ||
156 | |||
157 | u32 big_page_size; | ||
158 | |||
159 | bool userspace_managed; | ||
160 | |||
161 | const struct gk20a_mmu_level *mmu_levels; | ||
162 | |||
163 | struct nvgpu_ref ref; | ||
164 | |||
165 | struct nvgpu_mutex update_gmmu_lock; | ||
166 | |||
167 | struct nvgpu_gmmu_pd pdb; | ||
168 | |||
169 | /* | ||
170 | * These structs define the address spaces. In some cases it's possible | ||
171 | * to merge address spaces (user and user_lp) and in other cases it's | ||
172 | * not. vma[] allows the code to be agnostic to this by always using | ||
173 | * address spaces through this pointer array. | ||
174 | */ | ||
175 | struct nvgpu_allocator *vma[GMMU_NR_PAGE_SIZES]; | ||
176 | struct nvgpu_allocator kernel; | ||
177 | struct nvgpu_allocator user; | ||
178 | struct nvgpu_allocator user_lp; | ||
179 | |||
180 | struct nvgpu_rbtree_node *mapped_buffers; | ||
181 | |||
182 | struct nvgpu_list_node vm_area_list; | ||
183 | |||
184 | #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION | ||
185 | u64 handle; | ||
186 | #endif | ||
187 | u32 gmmu_page_sizes[GMMU_NR_PAGE_SIZES]; | ||
188 | |||
189 | /* if non-NULL, kref_put will use this batch when | ||
190 | unmapping. Must hold vm->update_gmmu_lock. */ | ||
191 | struct vm_gk20a_mapping_batch *kref_put_batch; | ||
192 | |||
193 | /* | ||
194 | * Each address space needs to have a semaphore pool. | ||
195 | */ | ||
196 | struct nvgpu_semaphore_pool *sema_pool; | ||
197 | |||
198 | /* | ||
199 | * Create sync point read only map for sync point range. | ||
200 | * Channels sharing same vm will also share same sync point ro map | ||
201 | */ | ||
202 | u64 syncpt_ro_map_gpu_va; | ||
203 | /* Protect allocation of sync point map */ | ||
204 | struct nvgpu_mutex syncpt_ro_map_lock; | ||
205 | }; | ||
206 | |||
207 | /* | ||
208 | * Mapping flags. | ||
209 | */ | ||
210 | #define NVGPU_VM_MAP_FIXED_OFFSET BIT32(0) | ||
211 | #define NVGPU_VM_MAP_CACHEABLE BIT32(1) | ||
212 | #define NVGPU_VM_MAP_IO_COHERENT BIT32(2) | ||
213 | #define NVGPU_VM_MAP_UNMAPPED_PTE BIT32(3) | ||
214 | #define NVGPU_VM_MAP_DIRECT_KIND_CTRL BIT32(4) | ||
215 | #define NVGPU_VM_MAP_L3_ALLOC BIT32(5) | ||
216 | #define NVGPU_VM_MAP_PLATFORM_ATOMIC BIT32(6) | ||
217 | |||
218 | #define NVGPU_KIND_INVALID -1 | ||
219 | |||
220 | void nvgpu_vm_get(struct vm_gk20a *vm); | ||
221 | void nvgpu_vm_put(struct vm_gk20a *vm); | ||
222 | |||
223 | int vm_aspace_id(struct vm_gk20a *vm); | ||
224 | bool nvgpu_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); | ||
225 | |||
226 | int nvgpu_vm_pde_coverage_bit_count(struct vm_gk20a *vm); | ||
227 | |||
228 | /* batching eliminates redundant cache flushes and invalidates */ | ||
229 | void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch); | ||
230 | void nvgpu_vm_mapping_batch_finish( | ||
231 | struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *batch); | ||
232 | /* called when holding vm->update_gmmu_lock */ | ||
233 | void nvgpu_vm_mapping_batch_finish_locked( | ||
234 | struct vm_gk20a *vm, struct vm_gk20a_mapping_batch *batch); | ||
235 | |||
236 | /* get reference to all currently mapped buffers */ | ||
237 | int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | ||
238 | struct nvgpu_mapped_buf ***mapped_buffers, | ||
239 | int *num_buffers); | ||
240 | /* put references on the given buffers */ | ||
241 | void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | ||
242 | struct nvgpu_mapped_buf **mapped_buffers, | ||
243 | int num_buffers); | ||
244 | |||
245 | struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm, | ||
246 | struct nvgpu_os_buffer *os_buf, | ||
247 | u64 map_addr, | ||
248 | u32 flags, | ||
249 | int kind); | ||
250 | |||
251 | struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | ||
252 | struct nvgpu_os_buffer *os_buf, | ||
253 | struct nvgpu_sgt *sgt, | ||
254 | u64 map_addr, | ||
255 | u64 map_size, | ||
256 | u64 phys_offset, | ||
257 | int rw, | ||
258 | u32 flags, | ||
259 | s16 compr_kind, | ||
260 | s16 incompr_kind, | ||
261 | struct vm_gk20a_mapping_batch *batch, | ||
262 | enum nvgpu_aperture aperture); | ||
263 | |||
264 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, | ||
265 | struct vm_gk20a_mapping_batch *batch); | ||
266 | |||
267 | /* | ||
268 | * Implemented by each OS. Called from within the core VM code to handle OS | ||
269 | * specific components of an nvgpu_mapped_buf. | ||
270 | */ | ||
271 | void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer); | ||
272 | |||
273 | /* | ||
274 | * Don't use this outside of the core VM code! | ||
275 | */ | ||
276 | void __nvgpu_vm_unmap_ref(struct nvgpu_ref *ref); | ||
277 | |||
278 | u64 nvgpu_os_buf_get_size(struct nvgpu_os_buffer *os_buf); | ||
279 | |||
280 | /* | ||
281 | * These all require the VM update lock to be held. | ||
282 | */ | ||
283 | struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf( | ||
284 | struct vm_gk20a *vm, u64 addr); | ||
285 | struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range( | ||
286 | struct vm_gk20a *vm, u64 addr); | ||
287 | struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( | ||
288 | struct vm_gk20a *vm, u64 addr); | ||
289 | |||
290 | int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, | ||
291 | struct nvgpu_mapped_buf *mapped_buffer); | ||
292 | void nvgpu_remove_mapped_buf(struct vm_gk20a *vm, | ||
293 | struct nvgpu_mapped_buf *mapped_buffer); | ||
294 | |||
295 | /* | ||
296 | * Initialize a preallocated vm | ||
297 | */ | ||
298 | int __nvgpu_vm_init(struct mm_gk20a *mm, | ||
299 | struct vm_gk20a *vm, | ||
300 | u32 big_page_size, | ||
301 | u64 low_hole, | ||
302 | u64 kernel_reserved, | ||
303 | u64 aperture_size, | ||
304 | bool big_pages, | ||
305 | bool userspace_managed, | ||
306 | char *name); | ||
307 | |||
308 | struct vm_gk20a *nvgpu_vm_init(struct gk20a *g, | ||
309 | u32 big_page_size, | ||
310 | u64 low_hole, | ||
311 | u64 kernel_reserved, | ||
312 | u64 aperture_size, | ||
313 | bool big_pages, | ||
314 | bool userspace_managed, | ||
315 | char *name); | ||
316 | |||
317 | /* | ||
318 | * These are private to the VM code but are unfortunately used by the vgpu code. | ||
319 | * It appears to be used for an optimization in reducing the number of server | ||
320 | * requests to the vgpu server. Basically the vgpu implementation of | ||
321 | * map_global_ctx_buffers() sends a bunch of VA ranges over to the RM server. | ||
322 | * Ideally the RM server can just batch mappings but until such a time this | ||
323 | * will be used by the vgpu code. | ||
324 | */ | ||
325 | u64 __nvgpu_vm_alloc_va(struct vm_gk20a *vm, u64 size, | ||
326 | u32 pgsz_idx); | ||
327 | int __nvgpu_vm_free_va(struct vm_gk20a *vm, u64 addr, | ||
328 | u32 pgsz_idx); | ||
329 | |||
330 | #endif /* NVGPU_VM_H */ | ||