diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mm.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_mm.c | 271 |
1 files changed, 271 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c new file mode 100644 index 000000000000..cdbb11eb701b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Ben Skeggs | ||
23 | */ | ||
24 | |||
25 | #include "drmP.h" | ||
26 | #include "nouveau_drv.h" | ||
27 | #include "nouveau_mm.h" | ||
28 | |||
29 | static inline void | ||
30 | region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a) | ||
31 | { | ||
32 | list_del(&a->nl_entry); | ||
33 | list_del(&a->fl_entry); | ||
34 | kfree(a); | ||
35 | } | ||
36 | |||
37 | static struct nouveau_mm_node * | ||
38 | region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size) | ||
39 | { | ||
40 | struct nouveau_mm_node *b; | ||
41 | |||
42 | if (a->length == size) | ||
43 | return a; | ||
44 | |||
45 | b = kmalloc(sizeof(*b), GFP_KERNEL); | ||
46 | if (unlikely(b == NULL)) | ||
47 | return NULL; | ||
48 | |||
49 | b->offset = a->offset; | ||
50 | b->length = size; | ||
51 | b->free = a->free; | ||
52 | b->type = a->type; | ||
53 | a->offset += size; | ||
54 | a->length -= size; | ||
55 | list_add_tail(&b->nl_entry, &a->nl_entry); | ||
56 | if (b->free) | ||
57 | list_add_tail(&b->fl_entry, &a->fl_entry); | ||
58 | return b; | ||
59 | } | ||
60 | |||
61 | static struct nouveau_mm_node * | ||
62 | nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | ||
63 | { | ||
64 | struct nouveau_mm_node *prev, *next; | ||
65 | |||
66 | /* try to merge with free adjacent entries of same type */ | ||
67 | prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry); | ||
68 | if (this->nl_entry.prev != &rmm->nodes) { | ||
69 | if (prev->free && prev->type == this->type) { | ||
70 | prev->length += this->length; | ||
71 | region_put(rmm, this); | ||
72 | this = prev; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | ||
77 | if (this->nl_entry.next != &rmm->nodes) { | ||
78 | if (next->free && next->type == this->type) { | ||
79 | next->offset = this->offset; | ||
80 | next->length += this->length; | ||
81 | region_put(rmm, this); | ||
82 | this = next; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | return this; | ||
87 | } | ||
88 | |||
89 | void | ||
90 | nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this) | ||
91 | { | ||
92 | u32 block_s, block_l; | ||
93 | |||
94 | this->free = true; | ||
95 | list_add(&this->fl_entry, &rmm->free); | ||
96 | this = nouveau_mm_merge(rmm, this); | ||
97 | |||
98 | /* any entirely free blocks now? we'll want to remove typing | ||
99 | * on them now so they can be use for any memory allocation | ||
100 | */ | ||
101 | block_s = roundup(this->offset, rmm->block_size); | ||
102 | if (block_s + rmm->block_size > this->offset + this->length) | ||
103 | return; | ||
104 | |||
105 | /* split off any still-typed region at the start */ | ||
106 | if (block_s != this->offset) { | ||
107 | if (!region_split(rmm, this, block_s - this->offset)) | ||
108 | return; | ||
109 | } | ||
110 | |||
111 | /* split off the soon-to-be-untyped block(s) */ | ||
112 | block_l = rounddown(this->length, rmm->block_size); | ||
113 | if (block_l != this->length) { | ||
114 | this = region_split(rmm, this, block_l); | ||
115 | if (!this) | ||
116 | return; | ||
117 | } | ||
118 | |||
119 | /* mark as having no type, and retry merge with any adjacent | ||
120 | * untyped blocks | ||
121 | */ | ||
122 | this->type = 0; | ||
123 | nouveau_mm_merge(rmm, this); | ||
124 | } | ||
125 | |||
126 | int | ||
127 | nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | ||
128 | u32 align, struct nouveau_mm_node **pnode) | ||
129 | { | ||
130 | struct nouveau_mm_node *this, *tmp, *next; | ||
131 | u32 splitoff, avail, alloc; | ||
132 | |||
133 | list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) { | ||
134 | next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry); | ||
135 | if (this->nl_entry.next == &rmm->nodes) | ||
136 | next = NULL; | ||
137 | |||
138 | /* skip wrongly typed blocks */ | ||
139 | if (this->type && this->type != type) | ||
140 | continue; | ||
141 | |||
142 | /* account for alignment */ | ||
143 | splitoff = this->offset & (align - 1); | ||
144 | if (splitoff) | ||
145 | splitoff = align - splitoff; | ||
146 | |||
147 | if (this->length <= splitoff) | ||
148 | continue; | ||
149 | |||
150 | /* determine total memory available from this, and | ||
151 | * the next block (if appropriate) | ||
152 | */ | ||
153 | avail = this->length; | ||
154 | if (next && next->free && (!next->type || next->type == type)) | ||
155 | avail += next->length; | ||
156 | |||
157 | avail -= splitoff; | ||
158 | |||
159 | /* determine allocation size */ | ||
160 | if (size_nc) { | ||
161 | alloc = min(avail, size); | ||
162 | alloc = rounddown(alloc, size_nc); | ||
163 | if (alloc == 0) | ||
164 | continue; | ||
165 | } else { | ||
166 | alloc = size; | ||
167 | if (avail < alloc) | ||
168 | continue; | ||
169 | } | ||
170 | |||
171 | /* untyped block, split off a chunk that's a multiple | ||
172 | * of block_size and type it | ||
173 | */ | ||
174 | if (!this->type) { | ||
175 | u32 block = roundup(alloc + splitoff, rmm->block_size); | ||
176 | if (this->length < block) | ||
177 | continue; | ||
178 | |||
179 | this = region_split(rmm, this, block); | ||
180 | if (!this) | ||
181 | return -ENOMEM; | ||
182 | |||
183 | this->type = type; | ||
184 | } | ||
185 | |||
186 | /* stealing memory from adjacent block */ | ||
187 | if (alloc > this->length) { | ||
188 | u32 amount = alloc - (this->length - splitoff); | ||
189 | |||
190 | if (!next->type) { | ||
191 | amount = roundup(amount, rmm->block_size); | ||
192 | |||
193 | next = region_split(rmm, next, amount); | ||
194 | if (!next) | ||
195 | return -ENOMEM; | ||
196 | |||
197 | next->type = type; | ||
198 | } | ||
199 | |||
200 | this->length += amount; | ||
201 | next->offset += amount; | ||
202 | next->length -= amount; | ||
203 | if (!next->length) { | ||
204 | list_del(&next->nl_entry); | ||
205 | list_del(&next->fl_entry); | ||
206 | kfree(next); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | if (splitoff) { | ||
211 | if (!region_split(rmm, this, splitoff)) | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | |||
215 | this = region_split(rmm, this, alloc); | ||
216 | if (this == NULL) | ||
217 | return -ENOMEM; | ||
218 | |||
219 | this->free = false; | ||
220 | list_del(&this->fl_entry); | ||
221 | *pnode = this; | ||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | return -ENOMEM; | ||
226 | } | ||
227 | |||
228 | int | ||
229 | nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block) | ||
230 | { | ||
231 | struct nouveau_mm *rmm; | ||
232 | struct nouveau_mm_node *heap; | ||
233 | |||
234 | heap = kzalloc(sizeof(*heap), GFP_KERNEL); | ||
235 | if (!heap) | ||
236 | return -ENOMEM; | ||
237 | heap->free = true; | ||
238 | heap->offset = roundup(offset, block); | ||
239 | heap->length = rounddown(offset + length, block) - heap->offset; | ||
240 | |||
241 | rmm = kzalloc(sizeof(*rmm), GFP_KERNEL); | ||
242 | if (!rmm) { | ||
243 | kfree(heap); | ||
244 | return -ENOMEM; | ||
245 | } | ||
246 | rmm->block_size = block; | ||
247 | mutex_init(&rmm->mutex); | ||
248 | INIT_LIST_HEAD(&rmm->nodes); | ||
249 | INIT_LIST_HEAD(&rmm->free); | ||
250 | list_add(&heap->nl_entry, &rmm->nodes); | ||
251 | list_add(&heap->fl_entry, &rmm->free); | ||
252 | |||
253 | *prmm = rmm; | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | int | ||
258 | nouveau_mm_fini(struct nouveau_mm **prmm) | ||
259 | { | ||
260 | struct nouveau_mm *rmm = *prmm; | ||
261 | struct nouveau_mm_node *heap = | ||
262 | list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry); | ||
263 | |||
264 | if (!list_is_singular(&rmm->nodes)) | ||
265 | return -EBUSY; | ||
266 | |||
267 | kfree(heap); | ||
268 | kfree(rmm); | ||
269 | *prmm = NULL; | ||
270 | return 0; | ||
271 | } | ||