diff options
author | Jerome Glisse <glisse@freedesktop.org> | 2009-04-08 11:11:16 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-06-12 01:56:31 -0400 |
commit | 249d6048ca98b5452105b0824abac1275661b8e3 (patch) | |
tree | 5e8e89288ec6f6a278fcb819ea49d0d9984576fd /drivers/gpu | |
parent | 715cbb05c935e8a4306a730d14a72d5af881523e (diff) |
drm: Split out the mm declarations in a separate header. Add atomic operations.
this is a TTM preparation patch, it rearranges the mm and
add operations needed to do mm operations in atomic context.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_mm.c | 165 |
1 files changed, 137 insertions, 28 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 367c590ffbba..7819fd930a51 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -42,8 +42,11 @@ | |||
42 | */ | 42 | */ |
43 | 43 | ||
44 | #include "drmP.h" | 44 | #include "drmP.h" |
45 | #include "drm_mm.h" | ||
45 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
46 | 47 | ||
48 | #define MM_UNUSED_TARGET 4 | ||
49 | |||
47 | unsigned long drm_mm_tail_space(struct drm_mm *mm) | 50 | unsigned long drm_mm_tail_space(struct drm_mm *mm) |
48 | { | 51 | { |
49 | struct list_head *tail_node; | 52 | struct list_head *tail_node; |
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) | |||
74 | return 0; | 77 | return 0; |
75 | } | 78 | } |
76 | 79 | ||
80 | static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | ||
81 | { | ||
82 | struct drm_mm_node *child; | ||
83 | |||
84 | if (atomic) | ||
85 | child = kmalloc(sizeof(*child), GFP_ATOMIC); | ||
86 | else | ||
87 | child = kmalloc(sizeof(*child), GFP_KERNEL); | ||
88 | |||
89 | if (unlikely(child == NULL)) { | ||
90 | spin_lock(&mm->unused_lock); | ||
91 | if (list_empty(&mm->unused_nodes)) | ||
92 | child = NULL; | ||
93 | else { | ||
94 | child = | ||
95 | list_entry(mm->unused_nodes.next, | ||
96 | struct drm_mm_node, fl_entry); | ||
97 | list_del(&child->fl_entry); | ||
98 | --mm->num_unused; | ||
99 | } | ||
100 | spin_unlock(&mm->unused_lock); | ||
101 | } | ||
102 | return child; | ||
103 | } | ||
104 | |||
105 | int drm_mm_pre_get(struct drm_mm *mm) | ||
106 | { | ||
107 | struct drm_mm_node *node; | ||
108 | |||
109 | spin_lock(&mm->unused_lock); | ||
110 | while (mm->num_unused < MM_UNUSED_TARGET) { | ||
111 | spin_unlock(&mm->unused_lock); | ||
112 | node = kmalloc(sizeof(*node), GFP_KERNEL); | ||
113 | spin_lock(&mm->unused_lock); | ||
114 | |||
115 | if (unlikely(node == NULL)) { | ||
116 | int ret = (mm->num_unused < 2) ? -ENOMEM : 0; | ||
117 | spin_unlock(&mm->unused_lock); | ||
118 | return ret; | ||
119 | } | ||
120 | ++mm->num_unused; | ||
121 | list_add_tail(&node->fl_entry, &mm->unused_nodes); | ||
122 | } | ||
123 | spin_unlock(&mm->unused_lock); | ||
124 | return 0; | ||
125 | } | ||
126 | EXPORT_SYMBOL(drm_mm_pre_get); | ||
77 | 127 | ||
78 | static int drm_mm_create_tail_node(struct drm_mm *mm, | 128 | static int drm_mm_create_tail_node(struct drm_mm *mm, |
79 | unsigned long start, | 129 | unsigned long start, |
80 | unsigned long size) | 130 | unsigned long size, int atomic) |
81 | { | 131 | { |
82 | struct drm_mm_node *child; | 132 | struct drm_mm_node *child; |
83 | 133 | ||
84 | child = (struct drm_mm_node *) | 134 | child = drm_mm_kmalloc(mm, atomic); |
85 | drm_alloc(sizeof(*child), DRM_MEM_MM); | 135 | if (unlikely(child == NULL)) |
86 | if (!child) | ||
87 | return -ENOMEM; | 136 | return -ENOMEM; |
88 | 137 | ||
89 | child->free = 1; | 138 | child->free = 1; |
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, | |||
97 | return 0; | 146 | return 0; |
98 | } | 147 | } |
99 | 148 | ||
100 | 149 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) | |
101 | int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) | ||
102 | { | 150 | { |
103 | struct list_head *tail_node; | 151 | struct list_head *tail_node; |
104 | struct drm_mm_node *entry; | 152 | struct drm_mm_node *entry; |
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) | |||
106 | tail_node = mm->ml_entry.prev; | 154 | tail_node = mm->ml_entry.prev; |
107 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); | 155 | entry = list_entry(tail_node, struct drm_mm_node, ml_entry); |
108 | if (!entry->free) { | 156 | if (!entry->free) { |
109 | return drm_mm_create_tail_node(mm, entry->start + entry->size, size); | 157 | return drm_mm_create_tail_node(mm, entry->start + entry->size, |
158 | size, atomic); | ||
110 | } | 159 | } |
111 | entry->size += size; | 160 | entry->size += size; |
112 | return 0; | 161 | return 0; |
113 | } | 162 | } |
114 | 163 | ||
115 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, | 164 | static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, |
116 | unsigned long size) | 165 | unsigned long size, |
166 | int atomic) | ||
117 | { | 167 | { |
118 | struct drm_mm_node *child; | 168 | struct drm_mm_node *child; |
119 | 169 | ||
120 | child = (struct drm_mm_node *) | 170 | child = drm_mm_kmalloc(parent->mm, atomic); |
121 | drm_alloc(sizeof(*child), DRM_MEM_MM); | 171 | if (unlikely(child == NULL)) |
122 | if (!child) | ||
123 | return NULL; | 172 | return NULL; |
124 | 173 | ||
125 | INIT_LIST_HEAD(&child->fl_entry); | 174 | INIT_LIST_HEAD(&child->fl_entry); |
@@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, | |||
151 | tmp = parent->start % alignment; | 200 | tmp = parent->start % alignment; |
152 | 201 | ||
153 | if (tmp) { | 202 | if (tmp) { |
154 | align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); | 203 | align_splitoff = |
155 | if (!align_splitoff) | 204 | drm_mm_split_at_start(parent, alignment - tmp, 0); |
205 | if (unlikely(align_splitoff == NULL)) | ||
156 | return NULL; | 206 | return NULL; |
157 | } | 207 | } |
158 | 208 | ||
@@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, | |||
161 | parent->free = 0; | 211 | parent->free = 0; |
162 | return parent; | 212 | return parent; |
163 | } else { | 213 | } else { |
164 | child = drm_mm_split_at_start(parent, size); | 214 | child = drm_mm_split_at_start(parent, size, 0); |
165 | } | 215 | } |
166 | 216 | ||
167 | if (align_splitoff) | 217 | if (align_splitoff) |
@@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, | |||
169 | 219 | ||
170 | return child; | 220 | return child; |
171 | } | 221 | } |
222 | |||
172 | EXPORT_SYMBOL(drm_mm_get_block); | 223 | EXPORT_SYMBOL(drm_mm_get_block); |
173 | 224 | ||
225 | struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent, | ||
226 | unsigned long size, | ||
227 | unsigned alignment) | ||
228 | { | ||
229 | |||
230 | struct drm_mm_node *align_splitoff = NULL; | ||
231 | struct drm_mm_node *child; | ||
232 | unsigned tmp = 0; | ||
233 | |||
234 | if (alignment) | ||
235 | tmp = parent->start % alignment; | ||
236 | |||
237 | if (tmp) { | ||
238 | align_splitoff = | ||
239 | drm_mm_split_at_start(parent, alignment - tmp, 1); | ||
240 | if (unlikely(align_splitoff == NULL)) | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | if (parent->size == size) { | ||
245 | list_del_init(&parent->fl_entry); | ||
246 | parent->free = 0; | ||
247 | return parent; | ||
248 | } else { | ||
249 | child = drm_mm_split_at_start(parent, size, 1); | ||
250 | } | ||
251 | |||
252 | if (align_splitoff) | ||
253 | drm_mm_put_block(align_splitoff); | ||
254 | |||
255 | return child; | ||
256 | } | ||
257 | EXPORT_SYMBOL(drm_mm_get_block_atomic); | ||
258 | |||
174 | /* | 259 | /* |
175 | * Put a block. Merge with the previous and / or next block if they are free. | 260 | * Put a block. Merge with the previous and / or next block if they are free. |
176 | * Otherwise add to the free stack. | 261 | * Otherwise add to the free stack. |
177 | */ | 262 | */ |
178 | 263 | ||
179 | void drm_mm_put_block(struct drm_mm_node * cur) | 264 | void drm_mm_put_block(struct drm_mm_node *cur) |
180 | { | 265 | { |
181 | 266 | ||
182 | struct drm_mm *mm = cur->mm; | 267 | struct drm_mm *mm = cur->mm; |
@@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur) | |||
188 | int merged = 0; | 273 | int merged = 0; |
189 | 274 | ||
190 | if (cur_head->prev != root_head) { | 275 | if (cur_head->prev != root_head) { |
191 | prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); | 276 | prev_node = |
277 | list_entry(cur_head->prev, struct drm_mm_node, ml_entry); | ||
192 | if (prev_node->free) { | 278 | if (prev_node->free) { |
193 | prev_node->size += cur->size; | 279 | prev_node->size += cur->size; |
194 | merged = 1; | 280 | merged = 1; |
195 | } | 281 | } |
196 | } | 282 | } |
197 | if (cur_head->next != root_head) { | 283 | if (cur_head->next != root_head) { |
198 | next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); | 284 | next_node = |
285 | list_entry(cur_head->next, struct drm_mm_node, ml_entry); | ||
199 | if (next_node->free) { | 286 | if (next_node->free) { |
200 | if (merged) { | 287 | if (merged) { |
201 | prev_node->size += next_node->size; | 288 | prev_node->size += next_node->size; |
202 | list_del(&next_node->ml_entry); | 289 | list_del(&next_node->ml_entry); |
203 | list_del(&next_node->fl_entry); | 290 | list_del(&next_node->fl_entry); |
204 | drm_free(next_node, sizeof(*next_node), | 291 | if (mm->num_unused < MM_UNUSED_TARGET) { |
205 | DRM_MEM_MM); | 292 | list_add(&next_node->fl_entry, |
293 | &mm->unused_nodes); | ||
294 | ++mm->num_unused; | ||
295 | } else | ||
296 | kfree(next_node); | ||
206 | } else { | 297 | } else { |
207 | next_node->size += cur->size; | 298 | next_node->size += cur->size; |
208 | next_node->start = cur->start; | 299 | next_node->start = cur->start; |
@@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur) | |||
215 | list_add(&cur->fl_entry, &mm->fl_entry); | 306 | list_add(&cur->fl_entry, &mm->fl_entry); |
216 | } else { | 307 | } else { |
217 | list_del(&cur->ml_entry); | 308 | list_del(&cur->ml_entry); |
218 | drm_free(cur, sizeof(*cur), DRM_MEM_MM); | 309 | if (mm->num_unused < MM_UNUSED_TARGET) { |
310 | list_add(&cur->fl_entry, &mm->unused_nodes); | ||
311 | ++mm->num_unused; | ||
312 | } else | ||
313 | kfree(cur); | ||
219 | } | 314 | } |
220 | } | 315 | } |
316 | |||
221 | EXPORT_SYMBOL(drm_mm_put_block); | 317 | EXPORT_SYMBOL(drm_mm_put_block); |
222 | 318 | ||
223 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, | 319 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, |
224 | unsigned long size, | 320 | unsigned long size, |
225 | unsigned alignment, int best_match) | 321 | unsigned alignment, int best_match) |
226 | { | 322 | { |
227 | struct list_head *list; | 323 | struct list_head *list; |
228 | const struct list_head *free_stack = &mm->fl_entry; | 324 | const struct list_head *free_stack = &mm->fl_entry; |
@@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, | |||
247 | wasted += alignment - tmp; | 343 | wasted += alignment - tmp; |
248 | } | 344 | } |
249 | 345 | ||
250 | |||
251 | if (entry->size >= size + wasted) { | 346 | if (entry->size >= size + wasted) { |
252 | if (!best_match) | 347 | if (!best_match) |
253 | return entry; | 348 | return entry; |
@@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, | |||
260 | 355 | ||
261 | return best; | 356 | return best; |
262 | } | 357 | } |
358 | EXPORT_SYMBOL(drm_mm_search_free); | ||
263 | 359 | ||
264 | int drm_mm_clean(struct drm_mm * mm) | 360 | int drm_mm_clean(struct drm_mm * mm) |
265 | { | 361 | { |
@@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm) | |||
267 | 363 | ||
268 | return (head->next->next == head); | 364 | return (head->next->next == head); |
269 | } | 365 | } |
270 | EXPORT_SYMBOL(drm_mm_search_free); | 366 | EXPORT_SYMBOL(drm_mm_clean); |
271 | 367 | ||
272 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | 368 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) |
273 | { | 369 | { |
274 | INIT_LIST_HEAD(&mm->ml_entry); | 370 | INIT_LIST_HEAD(&mm->ml_entry); |
275 | INIT_LIST_HEAD(&mm->fl_entry); | 371 | INIT_LIST_HEAD(&mm->fl_entry); |
372 | INIT_LIST_HEAD(&mm->unused_nodes); | ||
373 | mm->num_unused = 0; | ||
374 | spin_lock_init(&mm->unused_lock); | ||
276 | 375 | ||
277 | return drm_mm_create_tail_node(mm, start, size); | 376 | return drm_mm_create_tail_node(mm, start, size, 0); |
278 | } | 377 | } |
279 | EXPORT_SYMBOL(drm_mm_init); | 378 | EXPORT_SYMBOL(drm_mm_init); |
280 | 379 | ||
@@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
282 | { | 381 | { |
283 | struct list_head *bnode = mm->fl_entry.next; | 382 | struct list_head *bnode = mm->fl_entry.next; |
284 | struct drm_mm_node *entry; | 383 | struct drm_mm_node *entry; |
384 | struct drm_mm_node *next; | ||
285 | 385 | ||
286 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); | 386 | entry = list_entry(bnode, struct drm_mm_node, fl_entry); |
287 | 387 | ||
@@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm) | |||
293 | 393 | ||
294 | list_del(&entry->fl_entry); | 394 | list_del(&entry->fl_entry); |
295 | list_del(&entry->ml_entry); | 395 | list_del(&entry->ml_entry); |
396 | kfree(entry); | ||
397 | |||
398 | spin_lock(&mm->unused_lock); | ||
399 | list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { | ||
400 | list_del(&entry->fl_entry); | ||
401 | kfree(entry); | ||
402 | --mm->num_unused; | ||
403 | } | ||
404 | spin_unlock(&mm->unused_lock); | ||
296 | 405 | ||
297 | drm_free(entry, sizeof(*entry), DRM_MEM_MM); | 406 | BUG_ON(mm->num_unused != 0); |
298 | } | 407 | } |
299 | EXPORT_SYMBOL(drm_mm_takedown); | 408 | EXPORT_SYMBOL(drm_mm_takedown); |