diff options
Diffstat (limited to 'include/nvgpu/allocator.h')
-rw-r--r-- | include/nvgpu/allocator.h | 331 |
1 files changed, 0 insertions, 331 deletions
diff --git a/include/nvgpu/allocator.h b/include/nvgpu/allocator.h deleted file mode 100644 index c444543..0000000 --- a/include/nvgpu/allocator.h +++ /dev/null | |||
@@ -1,331 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef NVGPU_ALLOCATOR_H | ||
24 | #define NVGPU_ALLOCATOR_H | ||
25 | |||
26 | #ifdef __KERNEL__ | ||
27 | /* | ||
28 | * The Linux kernel has this notion of seq_files for printing info to userspace. | ||
29 | * One of the allocator function pointers takes advantage of this and allows the | ||
30 | * debug output to be directed either to nvgpu_log() or a seq_file. | ||
31 | */ | ||
32 | #include <linux/seq_file.h> | ||
33 | #endif | ||
34 | |||
35 | #include <nvgpu/log.h> | ||
36 | #include <nvgpu/lock.h> | ||
37 | #include <nvgpu/list.h> | ||
38 | #include <nvgpu/types.h> | ||
39 | |||
40 | /* #define ALLOCATOR_DEBUG_FINE */ | ||
41 | |||
42 | struct nvgpu_allocator; | ||
43 | struct nvgpu_alloc_carveout; | ||
44 | struct vm_gk20a; | ||
45 | struct gk20a; | ||
46 | |||
47 | /* | ||
48 | * Operations for an allocator to implement. | ||
49 | */ | ||
50 | struct nvgpu_allocator_ops { | ||
51 | u64 (*alloc)(struct nvgpu_allocator *allocator, u64 len); | ||
52 | u64 (*alloc_pte)(struct nvgpu_allocator *allocator, u64 len, | ||
53 | u32 page_size); | ||
54 | void (*free)(struct nvgpu_allocator *allocator, u64 addr); | ||
55 | |||
56 | /* | ||
57 | * Special interface to allocate a memory region with a specific | ||
58 | * starting address. Yikes. Note: if free() works for freeing both | ||
59 | * regular and fixed allocations then free_fixed() does not need to | ||
60 | * be implemented. This behavior exists for legacy reasons and should | ||
61 | * not be propagated to new allocators. | ||
62 | * | ||
63 | * For allocators where the @page_size field is not applicable it can | ||
64 | * be left as 0. Otherwise a valid page size should be passed (4k or | ||
65 | * what the large page size is). | ||
66 | */ | ||
67 | u64 (*alloc_fixed)(struct nvgpu_allocator *allocator, | ||
68 | u64 base, u64 len, u32 page_size); | ||
69 | void (*free_fixed)(struct nvgpu_allocator *allocator, | ||
70 | u64 base, u64 len); | ||
71 | |||
72 | /* | ||
73 | * Allow allocators to reserve space for carveouts. | ||
74 | */ | ||
75 | int (*reserve_carveout)(struct nvgpu_allocator *allocator, | ||
76 | struct nvgpu_alloc_carveout *co); | ||
77 | void (*release_carveout)(struct nvgpu_allocator *allocator, | ||
78 | struct nvgpu_alloc_carveout *co); | ||
79 | |||
80 | /* | ||
81 | * Returns info about the allocator. | ||
82 | */ | ||
83 | u64 (*base)(struct nvgpu_allocator *allocator); | ||
84 | u64 (*length)(struct nvgpu_allocator *allocator); | ||
85 | u64 (*end)(struct nvgpu_allocator *allocator); | ||
86 | bool (*inited)(struct nvgpu_allocator *allocator); | ||
87 | u64 (*space)(struct nvgpu_allocator *allocator); | ||
88 | |||
89 | /* Destructor. */ | ||
90 | void (*fini)(struct nvgpu_allocator *allocator); | ||
91 | |||
92 | #ifdef __KERNEL__ | ||
93 | /* Debugging. */ | ||
94 | void (*print_stats)(struct nvgpu_allocator *allocator, | ||
95 | struct seq_file *s, int lock); | ||
96 | #endif | ||
97 | }; | ||
98 | |||
99 | struct nvgpu_allocator { | ||
100 | struct gk20a *g; | ||
101 | |||
102 | char name[32]; | ||
103 | struct nvgpu_mutex lock; | ||
104 | |||
105 | void *priv; | ||
106 | const struct nvgpu_allocator_ops *ops; | ||
107 | |||
108 | struct dentry *debugfs_entry; | ||
109 | bool debug; /* Control for debug msgs. */ | ||
110 | }; | ||
111 | |||
112 | struct nvgpu_alloc_carveout { | ||
113 | const char *name; | ||
114 | u64 base; | ||
115 | u64 length; | ||
116 | |||
117 | struct nvgpu_allocator *allocator; | ||
118 | |||
119 | /* | ||
120 | * For usage by the allocator implementation. | ||
121 | */ | ||
122 | struct nvgpu_list_node co_entry; | ||
123 | }; | ||
124 | |||
125 | static inline struct nvgpu_alloc_carveout * | ||
126 | nvgpu_alloc_carveout_from_co_entry(struct nvgpu_list_node *node) | ||
127 | { | ||
128 | return (struct nvgpu_alloc_carveout *) | ||
129 | ((uintptr_t)node - offsetof(struct nvgpu_alloc_carveout, co_entry)); | ||
130 | }; | ||
131 | |||
132 | #define NVGPU_CARVEOUT(local_name, local_base, local_length) \ | ||
133 | { \ | ||
134 | .name = (local_name), \ | ||
135 | .base = (local_base), \ | ||
136 | .length = (local_length) \ | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * These are the available allocator flags. | ||
141 | * | ||
142 | * GPU_ALLOC_GVA_SPACE | ||
143 | * | ||
144 | * This flag makes sense for the buddy allocator only. It specifies that the | ||
145 | * allocator will be used for managing a GVA space. When managing GVA spaces | ||
146 | * special care has to be taken to ensure that allocations of similar PTE | ||
147 | * sizes are placed in the same PDE block. This allows the higher level | ||
148 | * code to skip defining both small and large PTE tables for every PDE. That | ||
149 | * can save considerable memory for address spaces that have a lot of | ||
150 | * allocations. | ||
151 | * | ||
152 | * GPU_ALLOC_NO_ALLOC_PAGE | ||
153 | * | ||
154 | * For any allocator that needs to manage a resource in a latency critical | ||
155 | * path this flag specifies that the allocator should not use any kmalloc() | ||
156 | * or similar functions during normal operation. Initialization routines | ||
157 | * may still use kmalloc(). This prevents the possibility of long waits for | ||
158 | * pages when using alloc_page(). Currently only the bitmap allocator | ||
159 | * implements this functionality. | ||
160 | * | ||
161 | * Also note that if you accept this flag then you must also define the | ||
162 | * free_fixed() function. Since no meta-data is allocated to help free | ||
163 | * allocations you need to keep track of the meta-data yourself (in this | ||
164 | * case the base and length of the allocation as opposed to just the base | ||
165 | * of the allocation). | ||
166 | * | ||
167 | * GPU_ALLOC_4K_VIDMEM_PAGES | ||
168 | * | ||
169 | * We manage vidmem pages at a large page granularity for performance | ||
170 | * reasons; however, this can lead to wasting memory. For page allocators | ||
171 | * setting this flag will tell the allocator to manage pools of 4K pages | ||
172 | * inside internally allocated large pages. | ||
173 | * | ||
174 | * Currently this flag is ignored since the only usage of the page allocator | ||
175 | * uses a 4K block size already. However, this flag has been reserved since | ||
176 | * it will be necessary in the future. | ||
177 | * | ||
178 | * GPU_ALLOC_FORCE_CONTIG | ||
179 | * | ||
180 | * Force allocations to be contiguous. Currently only relevant for page | ||
181 | * allocators since all other allocators are naturally contiguous. | ||
182 | * | ||
183 | * GPU_ALLOC_NO_SCATTER_GATHER | ||
184 | * | ||
185 | * The page allocator normally returns a scatter gather data structure for | ||
186 | * allocations (to handle discontiguous pages). However, at times that can | ||
187 | * be annoying so this flag forces the page allocator to return a u64 | ||
188 | * pointing to the allocation base (requires GPU_ALLOC_FORCE_CONTIG to be | ||
189 | * set as well). | ||
190 | */ | ||
191 | #define GPU_ALLOC_GVA_SPACE BIT64(0) | ||
192 | #define GPU_ALLOC_NO_ALLOC_PAGE BIT64(1) | ||
193 | #define GPU_ALLOC_4K_VIDMEM_PAGES BIT64(2) | ||
194 | #define GPU_ALLOC_FORCE_CONTIG BIT64(3) | ||
195 | #define GPU_ALLOC_NO_SCATTER_GATHER BIT64(4) | ||
196 | |||
197 | static inline void alloc_lock(struct nvgpu_allocator *a) | ||
198 | { | ||
199 | nvgpu_mutex_acquire(&a->lock); | ||
200 | } | ||
201 | |||
202 | static inline void alloc_unlock(struct nvgpu_allocator *a) | ||
203 | { | ||
204 | nvgpu_mutex_release(&a->lock); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Buddy allocator specific initializers. | ||
209 | */ | ||
210 | int nvgpu_buddy_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | ||
211 | struct vm_gk20a *vm, const char *name, | ||
212 | u64 base, u64 size, u64 blk_size, | ||
213 | u64 max_order, u64 flags); | ||
214 | |||
215 | /* | ||
216 | * Bitmap initializers. | ||
217 | */ | ||
218 | int nvgpu_bitmap_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | ||
219 | const char *name, u64 base, u64 length, | ||
220 | u64 blk_size, u64 flags); | ||
221 | |||
222 | /* | ||
223 | * Page allocator initializers. | ||
224 | */ | ||
225 | int nvgpu_page_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | ||
226 | const char *name, u64 base, u64 length, | ||
227 | u64 blk_size, u64 flags); | ||
228 | |||
229 | /* | ||
230 | * Lockless allocatior initializers. | ||
231 | * Note: This allocator can only allocate fixed-size structures of a | ||
232 | * pre-defined size. | ||
233 | */ | ||
234 | int nvgpu_lockless_allocator_init(struct gk20a *g, struct nvgpu_allocator *na, | ||
235 | const char *name, u64 base, u64 length, | ||
236 | u64 struct_size, u64 flags); | ||
237 | |||
238 | #define GPU_BALLOC_MAX_ORDER 31U | ||
239 | |||
240 | /* | ||
241 | * Allocator APIs. | ||
242 | */ | ||
243 | u64 nvgpu_alloc(struct nvgpu_allocator *allocator, u64 len); | ||
244 | u64 nvgpu_alloc_pte(struct nvgpu_allocator *a, u64 len, u32 page_size); | ||
245 | void nvgpu_free(struct nvgpu_allocator *allocator, u64 addr); | ||
246 | |||
247 | u64 nvgpu_alloc_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len, | ||
248 | u32 page_size); | ||
249 | void nvgpu_free_fixed(struct nvgpu_allocator *allocator, u64 base, u64 len); | ||
250 | |||
251 | int nvgpu_alloc_reserve_carveout(struct nvgpu_allocator *a, | ||
252 | struct nvgpu_alloc_carveout *co); | ||
253 | void nvgpu_alloc_release_carveout(struct nvgpu_allocator *a, | ||
254 | struct nvgpu_alloc_carveout *co); | ||
255 | |||
256 | u64 nvgpu_alloc_base(struct nvgpu_allocator *a); | ||
257 | u64 nvgpu_alloc_length(struct nvgpu_allocator *a); | ||
258 | u64 nvgpu_alloc_end(struct nvgpu_allocator *a); | ||
259 | bool nvgpu_alloc_initialized(struct nvgpu_allocator *a); | ||
260 | u64 nvgpu_alloc_space(struct nvgpu_allocator *a); | ||
261 | |||
262 | void nvgpu_alloc_destroy(struct nvgpu_allocator *allocator); | ||
263 | |||
264 | #ifdef __KERNEL__ | ||
265 | void nvgpu_alloc_print_stats(struct nvgpu_allocator *a, | ||
266 | struct seq_file *s, int lock); | ||
267 | #endif | ||
268 | |||
269 | static inline struct gk20a *nvgpu_alloc_to_gpu(struct nvgpu_allocator *a) | ||
270 | { | ||
271 | return a->g; | ||
272 | } | ||
273 | |||
274 | #ifdef CONFIG_DEBUG_FS | ||
275 | /* | ||
276 | * Common functionality for the internals of the allocators. | ||
277 | */ | ||
278 | void nvgpu_init_alloc_debug(struct gk20a *g, struct nvgpu_allocator *a); | ||
279 | void nvgpu_fini_alloc_debug(struct nvgpu_allocator *a); | ||
280 | #endif | ||
281 | |||
282 | int nvgpu_alloc_common_init(struct nvgpu_allocator *a, struct gk20a *g, | ||
283 | const char *name, void *priv, bool dbg, | ||
284 | const struct nvgpu_allocator_ops *ops); | ||
285 | |||
286 | static inline void nvgpu_alloc_enable_dbg(struct nvgpu_allocator *a) | ||
287 | { | ||
288 | a->debug = true; | ||
289 | } | ||
290 | |||
291 | static inline void nvgpu_alloc_disable_dbg(struct nvgpu_allocator *a) | ||
292 | { | ||
293 | a->debug = false; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * Debug stuff. | ||
298 | */ | ||
299 | #ifdef __KERNEL__ | ||
300 | #define __alloc_pstat(seq, allocator, fmt, arg...) \ | ||
301 | do { \ | ||
302 | if (seq) \ | ||
303 | seq_printf(seq, fmt "\n", ##arg); \ | ||
304 | else \ | ||
305 | alloc_dbg(allocator, fmt, ##arg); \ | ||
306 | } while (0) | ||
307 | #endif | ||
308 | |||
309 | #define do_alloc_dbg(a, fmt, arg...) \ | ||
310 | nvgpu_log((a)->g, gpu_dbg_alloc, "%25s " fmt, (a)->name, ##arg) | ||
311 | |||
312 | /* | ||
313 | * This gives finer control over debugging messages. By defining the | ||
314 | * ALLOCATOR_DEBUG_FINE macro prints for an allocator will only get made if | ||
315 | * that allocator's debug flag is set. | ||
316 | * | ||
317 | * Otherwise debugging is as normal: debug statements for all allocators | ||
318 | * if the GPU debugging mask bit is set. Note: even when ALLOCATOR_DEBUG_FINE | ||
319 | * is set gpu_dbg_alloc must still also be set to true. | ||
320 | */ | ||
321 | #if defined(ALLOCATOR_DEBUG_FINE) | ||
322 | #define alloc_dbg(a, fmt, arg...) \ | ||
323 | do { \ | ||
324 | if ((a)->debug) \ | ||
325 | do_alloc_dbg((a), fmt, ##arg); \ | ||
326 | } while (0) | ||
327 | #else | ||
328 | #define alloc_dbg(a, fmt, arg...) do_alloc_dbg(a, fmt, ##arg) | ||
329 | #endif | ||
330 | |||
331 | #endif /* NVGPU_ALLOCATOR_H */ | ||