diff options
Diffstat (limited to 'include/nvgpu/page_allocator.h')
-rw-r--r-- | include/nvgpu/page_allocator.h | 185 |
1 files changed, 0 insertions, 185 deletions
diff --git a/include/nvgpu/page_allocator.h b/include/nvgpu/page_allocator.h deleted file mode 100644 index a6e0205..0000000 --- a/include/nvgpu/page_allocator.h +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef PAGE_ALLOCATOR_PRIV_H | ||
24 | #define PAGE_ALLOCATOR_PRIV_H | ||
25 | |||
26 | #include <nvgpu/allocator.h> | ||
27 | #include <nvgpu/nvgpu_mem.h> | ||
28 | #include <nvgpu/kmem.h> | ||
29 | #include <nvgpu/list.h> | ||
30 | #include <nvgpu/rbtree.h> | ||
31 | |||
32 | struct nvgpu_allocator; | ||
33 | |||
34 | /* | ||
35 | * This allocator implements the ability to do SLAB style allocation since the | ||
36 | * GPU has two page sizes available - 4k and 64k/128k. When the default | ||
37 | * granularity is the large page size (64k/128k) small allocations become very | ||
38 | * space inefficient. This is most notable in PDE and PTE blocks which are 4k | ||
39 | * in size. | ||
40 | * | ||
41 | * Thus we need the ability to suballocate in 64k pages. The way we do this for | ||
42 | * the GPU is as follows. We have several buckets for sub-64K allocations: | ||
43 | * | ||
44 | * B0 - 4k | ||
45 | * B1 - 8k | ||
46 | * B3 - 16k | ||
47 | * B4 - 32k | ||
48 | * B5 - 64k (for when large pages are 128k) | ||
49 | * | ||
50 | * When an allocation comes in for less than the large page size (from now on | ||
51 | * assumed to be 64k) the allocation is satisfied by one of the buckets. | ||
52 | */ | ||
53 | struct page_alloc_slab { | ||
54 | struct nvgpu_list_node empty; | ||
55 | struct nvgpu_list_node partial; | ||
56 | struct nvgpu_list_node full; | ||
57 | |||
58 | int nr_empty; | ||
59 | int nr_partial; | ||
60 | int nr_full; | ||
61 | |||
62 | u32 slab_size; | ||
63 | }; | ||
64 | |||
65 | enum slab_page_state { | ||
66 | SP_EMPTY, | ||
67 | SP_PARTIAL, | ||
68 | SP_FULL, | ||
69 | SP_NONE | ||
70 | }; | ||
71 | |||
72 | struct page_alloc_slab_page { | ||
73 | unsigned long bitmap; | ||
74 | u64 page_addr; | ||
75 | u32 slab_size; | ||
76 | |||
77 | u32 nr_objects; | ||
78 | u32 nr_objects_alloced; | ||
79 | |||
80 | enum slab_page_state state; | ||
81 | |||
82 | struct page_alloc_slab *owner; | ||
83 | struct nvgpu_list_node list_entry; | ||
84 | }; | ||
85 | |||
86 | static inline struct page_alloc_slab_page * | ||
87 | page_alloc_slab_page_from_list_entry(struct nvgpu_list_node *node) | ||
88 | { | ||
89 | return (struct page_alloc_slab_page *) | ||
90 | ((uintptr_t)node - offsetof(struct page_alloc_slab_page, list_entry)); | ||
91 | }; | ||
92 | |||
93 | /* | ||
94 | * Struct to handle internal management of page allocation. It holds a list | ||
95 | * of the chunks of pages that make up the overall allocation - much like a | ||
96 | * scatter gather table. | ||
97 | */ | ||
98 | struct nvgpu_page_alloc { | ||
99 | /* | ||
100 | * nvgpu_sgt for describing the actual allocation. Convenient for | ||
101 | * GMMU mapping. | ||
102 | */ | ||
103 | struct nvgpu_sgt sgt; | ||
104 | |||
105 | int nr_chunks; | ||
106 | u64 length; | ||
107 | |||
108 | /* | ||
109 | * Only useful for the RB tree - since the alloc may have discontiguous | ||
110 | * pages the base is essentially irrelevant except for the fact that it | ||
111 | * is guarenteed to be unique. | ||
112 | */ | ||
113 | u64 base; | ||
114 | |||
115 | struct nvgpu_rbtree_node tree_entry; | ||
116 | |||
117 | /* | ||
118 | * Set if this is a slab alloc. Points back to the slab page that owns | ||
119 | * this particular allocation. nr_chunks will always be 1 if this is | ||
120 | * set. | ||
121 | */ | ||
122 | struct page_alloc_slab_page *slab_page; | ||
123 | }; | ||
124 | |||
125 | static inline struct nvgpu_page_alloc * | ||
126 | nvgpu_page_alloc_from_rbtree_node(struct nvgpu_rbtree_node *node) | ||
127 | { | ||
128 | return (struct nvgpu_page_alloc *) | ||
129 | ((uintptr_t)node - offsetof(struct nvgpu_page_alloc, tree_entry)); | ||
130 | }; | ||
131 | |||
132 | struct nvgpu_page_allocator { | ||
133 | struct nvgpu_allocator *owner; /* Owner of this allocator. */ | ||
134 | |||
135 | /* | ||
136 | * Use a buddy allocator to manage the allocation of the underlying | ||
137 | * pages. This lets us abstract the discontiguous allocation handling | ||
138 | * out of the annoyingly complicated buddy allocator. | ||
139 | */ | ||
140 | struct nvgpu_allocator source_allocator; | ||
141 | |||
142 | /* | ||
143 | * Page params. | ||
144 | */ | ||
145 | u64 base; | ||
146 | u64 length; | ||
147 | u64 page_size; | ||
148 | u32 page_shift; | ||
149 | |||
150 | struct nvgpu_rbtree_node *allocs; /* Outstanding allocations. */ | ||
151 | |||
152 | struct page_alloc_slab *slabs; | ||
153 | int nr_slabs; | ||
154 | |||
155 | struct nvgpu_kmem_cache *alloc_cache; | ||
156 | struct nvgpu_kmem_cache *slab_page_cache; | ||
157 | |||
158 | u64 flags; | ||
159 | |||
160 | /* | ||
161 | * Stat tracking. | ||
162 | */ | ||
163 | u64 nr_allocs; | ||
164 | u64 nr_frees; | ||
165 | u64 nr_fixed_allocs; | ||
166 | u64 nr_fixed_frees; | ||
167 | u64 nr_slab_allocs; | ||
168 | u64 nr_slab_frees; | ||
169 | u64 pages_alloced; | ||
170 | u64 pages_freed; | ||
171 | }; | ||
172 | |||
173 | static inline struct nvgpu_page_allocator *page_allocator( | ||
174 | struct nvgpu_allocator *a) | ||
175 | { | ||
176 | return (struct nvgpu_page_allocator *)(a)->priv; | ||
177 | } | ||
178 | |||
179 | static inline struct nvgpu_allocator *palloc_owner( | ||
180 | struct nvgpu_page_allocator *a) | ||
181 | { | ||
182 | return a->owner; | ||
183 | } | ||
184 | |||
185 | #endif | ||