aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2011-02-18 11:59:12 -0500
committerDave Airlie <airlied@redhat.com>2011-02-22 19:32:34 -0500
commitea7b1dd44867e9cd6bac67e7c9fc3f128b5b255c (patch)
tree2feb9852ab18e2f726136ae460e414ef40425129
parent31a5b8ce8f3bf20799eb68da9602de2bee58fdd3 (diff)
drm: mm: track free areas implicitly
The idea is to track free holes implicitly by marking the allocation immediatly preceeding a hole. To avoid an ugly corner case add a dummy head_node to struct drm_mm to track the hole that spans to complete allocation area when the memory manager is empty. To guarantee that there's always a preceeding/following node (that might be marked as hole_follows == 1), move the mm->node_list list_head to the head_node. The main allocator and fair-lru scan code actually becomes simpler. Only the debug code slightly suffers because free areas are no longer explicit. Also add drm_mm_for_each_node (which will be much more useful when struct drm_mm_node is embeddable). Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/drm_mm.c464
-rw-r--r--include/drm/drm_mm.h21
2 files changed, 225 insertions, 260 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index c59515ba7e69..4fa33e1283af 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
64 else { 64 else {
65 child = 65 child =
66 list_entry(mm->unused_nodes.next, 66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, free_stack); 67 struct drm_mm_node, node_list);
68 list_del(&child->free_stack); 68 list_del(&child->node_list);
69 --mm->num_unused; 69 --mm->num_unused;
70 } 70 }
71 spin_unlock(&mm->unused_lock); 71 spin_unlock(&mm->unused_lock);
@@ -94,126 +94,123 @@ int drm_mm_pre_get(struct drm_mm *mm)
94 return ret; 94 return ret;
95 } 95 }
96 ++mm->num_unused; 96 ++mm->num_unused;
97 list_add_tail(&node->free_stack, &mm->unused_nodes); 97 list_add_tail(&node->node_list, &mm->unused_nodes);
98 } 98 }
99 spin_unlock(&mm->unused_lock); 99 spin_unlock(&mm->unused_lock);
100 return 0; 100 return 0;
101} 101}
102EXPORT_SYMBOL(drm_mm_pre_get); 102EXPORT_SYMBOL(drm_mm_pre_get);
103 103
104static int drm_mm_create_tail_node(struct drm_mm *mm, 104static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
105 unsigned long start,
106 unsigned long size, int atomic)
107{ 105{
108 struct drm_mm_node *child; 106 return hole_node->start + hole_node->size;
109
110 child = drm_mm_kmalloc(mm, atomic);
111 if (unlikely(child == NULL))
112 return -ENOMEM;
113
114 child->free = 1;
115 child->size = size;
116 child->start = start;
117 child->mm = mm;
118
119 list_add_tail(&child->node_list, &mm->node_list);
120 list_add_tail(&child->free_stack, &mm->free_stack);
121
122 return 0;
123} 107}
124 108
125static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 109static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
126 unsigned long size,
127 int atomic)
128{ 110{
129 struct drm_mm_node *child; 111 struct drm_mm_node *next_node =
130 112 list_entry(hole_node->node_list.next, struct drm_mm_node,
131 child = drm_mm_kmalloc(parent->mm, atomic); 113 node_list);
132 if (unlikely(child == NULL))
133 return NULL;
134
135 INIT_LIST_HEAD(&child->free_stack);
136 114
137 child->size = size; 115 return next_node->start;
138 child->start = parent->start;
139 child->mm = parent->mm;
140
141 list_add_tail(&child->node_list, &parent->node_list);
142 INIT_LIST_HEAD(&child->free_stack);
143
144 parent->size -= size;
145 parent->start += size;
146 return child;
147} 116}
148 117
149 118struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
150struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
151 unsigned long size, 119 unsigned long size,
152 unsigned alignment, 120 unsigned alignment,
153 int atomic) 121 int atomic)
154{ 122{
155 123
156 struct drm_mm_node *align_splitoff = NULL; 124 struct drm_mm_node *node;
157 unsigned tmp = 0; 125 struct drm_mm *mm = hole_node->mm;
126 unsigned long tmp = 0, wasted = 0;
127 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
128 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
129
130 BUG_ON(!hole_node->hole_follows);
131
132 node = drm_mm_kmalloc(mm, atomic);
133 if (unlikely(node == NULL))
134 return NULL;
158 135
159 if (alignment) 136 if (alignment)
160 tmp = node->start % alignment; 137 tmp = hole_start % alignment;
161 138
162 if (tmp) { 139 if (!tmp) {
163 align_splitoff = 140 hole_node->hole_follows = 0;
164 drm_mm_split_at_start(node, alignment - tmp, atomic); 141 list_del_init(&hole_node->hole_stack);
165 if (unlikely(align_splitoff == NULL)) 142 } else
166 return NULL; 143 wasted = alignment - tmp;
167 } 144
145 node->start = hole_start + wasted;
146 node->size = size;
147 node->mm = mm;
168 148
169 if (node->size == size) { 149 INIT_LIST_HEAD(&node->hole_stack);
170 list_del_init(&node->free_stack); 150 list_add(&node->node_list, &hole_node->node_list);
171 node->free = 0; 151
152 BUG_ON(node->start + node->size > hole_end);
153
154 if (node->start + node->size < hole_end) {
155 list_add(&node->hole_stack, &mm->hole_stack);
156 node->hole_follows = 1;
172 } else { 157 } else {
173 node = drm_mm_split_at_start(node, size, atomic); 158 node->hole_follows = 0;
174 } 159 }
175 160
176 if (align_splitoff)
177 drm_mm_put_block(align_splitoff);
178
179 return node; 161 return node;
180} 162}
181EXPORT_SYMBOL(drm_mm_get_block_generic); 163EXPORT_SYMBOL(drm_mm_get_block_generic);
182 164
183struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, 165struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
184 unsigned long size, 166 unsigned long size,
185 unsigned alignment, 167 unsigned alignment,
186 unsigned long start, 168 unsigned long start,
187 unsigned long end, 169 unsigned long end,
188 int atomic) 170 int atomic)
189{ 171{
190 struct drm_mm_node *align_splitoff = NULL; 172 struct drm_mm_node *node;
191 unsigned tmp = 0; 173 struct drm_mm *mm = hole_node->mm;
192 unsigned wasted = 0; 174 unsigned long tmp = 0, wasted = 0;
175 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
176 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
193 177
194 if (node->start < start) 178 BUG_ON(!hole_node->hole_follows);
195 wasted += start - node->start; 179
180 node = drm_mm_kmalloc(mm, atomic);
181 if (unlikely(node == NULL))
182 return NULL;
183
184 if (hole_start < start)
185 wasted += start - hole_start;
196 if (alignment) 186 if (alignment)
197 tmp = ((node->start + wasted) % alignment); 187 tmp = (hole_start + wasted) % alignment;
198 188
199 if (tmp) 189 if (tmp)
200 wasted += alignment - tmp; 190 wasted += alignment - tmp;
201 if (wasted) { 191
202 align_splitoff = drm_mm_split_at_start(node, wasted, atomic); 192 if (!wasted) {
203 if (unlikely(align_splitoff == NULL)) 193 hole_node->hole_follows = 0;
204 return NULL; 194 list_del_init(&hole_node->hole_stack);
205 } 195 }
206 196
207 if (node->size == size) { 197 node->start = hole_start + wasted;
208 list_del_init(&node->free_stack); 198 node->size = size;
209 node->free = 0; 199 node->mm = mm;
200
201 INIT_LIST_HEAD(&node->hole_stack);
202 list_add(&node->node_list, &hole_node->node_list);
203
204 BUG_ON(node->start + node->size > hole_end);
205 BUG_ON(node->start + node->size > end);
206
207 if (node->start + node->size < hole_end) {
208 list_add(&node->hole_stack, &mm->hole_stack);
209 node->hole_follows = 1;
210 } else { 210 } else {
211 node = drm_mm_split_at_start(node, size, atomic); 211 node->hole_follows = 0;
212 } 212 }
213 213
214 if (align_splitoff)
215 drm_mm_put_block(align_splitoff);
216
217 return node; 214 return node;
218} 215}
219EXPORT_SYMBOL(drm_mm_get_block_range_generic); 216EXPORT_SYMBOL(drm_mm_get_block_range_generic);
@@ -223,66 +220,41 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
223 * Otherwise add to the free stack. 220 * Otherwise add to the free stack.
224 */ 221 */
225 222
226void drm_mm_put_block(struct drm_mm_node *cur) 223void drm_mm_put_block(struct drm_mm_node *node)
227{ 224{
228 225
229 struct drm_mm *mm = cur->mm; 226 struct drm_mm *mm = node->mm;
230 struct list_head *cur_head = &cur->node_list; 227 struct drm_mm_node *prev_node;
231 struct list_head *root_head = &mm->node_list;
232 struct drm_mm_node *prev_node = NULL;
233 struct drm_mm_node *next_node;
234 228
235 int merged = 0; 229 BUG_ON(node->scanned_block || node->scanned_prev_free
230 || node->scanned_next_free);
236 231
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free 232 prev_node =
238 || cur->scanned_next_free); 233 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
239 234
240 if (cur_head->prev != root_head) { 235 if (node->hole_follows) {
241 prev_node = 236 BUG_ON(drm_mm_hole_node_start(node)
242 list_entry(cur_head->prev, struct drm_mm_node, node_list); 237 == drm_mm_hole_node_end(node));
243 if (prev_node->free) { 238 list_del(&node->hole_stack);
244 prev_node->size += cur->size; 239 } else
245 merged = 1; 240 BUG_ON(drm_mm_hole_node_start(node)
246 } 241 != drm_mm_hole_node_end(node));
247 }
248 if (cur_head->next != root_head) {
249 next_node =
250 list_entry(cur_head->next, struct drm_mm_node, node_list);
251 if (next_node->free) {
252 if (merged) {
253 prev_node->size += next_node->size;
254 list_del(&next_node->node_list);
255 list_del(&next_node->free_stack);
256 spin_lock(&mm->unused_lock);
257 if (mm->num_unused < MM_UNUSED_TARGET) {
258 list_add(&next_node->free_stack,
259 &mm->unused_nodes);
260 ++mm->num_unused;
261 } else
262 kfree(next_node);
263 spin_unlock(&mm->unused_lock);
264 } else {
265 next_node->size += cur->size;
266 next_node->start = cur->start;
267 merged = 1;
268 }
269 }
270 }
271 if (!merged) {
272 cur->free = 1;
273 list_add(&cur->free_stack, &mm->free_stack);
274 } else {
275 list_del(&cur->node_list);
276 spin_lock(&mm->unused_lock);
277 if (mm->num_unused < MM_UNUSED_TARGET) {
278 list_add(&cur->free_stack, &mm->unused_nodes);
279 ++mm->num_unused;
280 } else
281 kfree(cur);
282 spin_unlock(&mm->unused_lock);
283 }
284}
285 242
243 if (!prev_node->hole_follows) {
244 prev_node->hole_follows = 1;
245 list_add(&prev_node->hole_stack, &mm->hole_stack);
246 } else
247 list_move(&prev_node->hole_stack, &mm->hole_stack);
248
249 list_del(&node->node_list);
250 spin_lock(&mm->unused_lock);
251 if (mm->num_unused < MM_UNUSED_TARGET) {
252 list_add(&node->node_list, &mm->unused_nodes);
253 ++mm->num_unused;
254 } else
255 kfree(node);
256 spin_unlock(&mm->unused_lock);
257}
286EXPORT_SYMBOL(drm_mm_put_block); 258EXPORT_SYMBOL(drm_mm_put_block);
287 259
288static int check_free_hole(unsigned long start, unsigned long end, 260static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +291,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
319 best = NULL; 291 best = NULL;
320 best_size = ~0UL; 292 best_size = ~0UL;
321 293
322 list_for_each_entry(entry, &mm->free_stack, free_stack) { 294 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
323 if (!check_free_hole(entry->start, entry->start + entry->size, 295 BUG_ON(!entry->hole_follows);
296 if (!check_free_hole(drm_mm_hole_node_start(entry),
297 drm_mm_hole_node_end(entry),
324 size, alignment)) 298 size, alignment))
325 continue; 299 continue;
326 300
@@ -353,12 +327,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
353 best = NULL; 327 best = NULL;
354 best_size = ~0UL; 328 best_size = ~0UL;
355 329
356 list_for_each_entry(entry, &mm->free_stack, free_stack) { 330 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
357 unsigned long adj_start = entry->start < start ? 331 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
358 start : entry->start; 332 start : drm_mm_hole_node_start(entry);
359 unsigned long adj_end = entry->start + entry->size > end ? 333 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
360 end : entry->start + entry->size; 334 end : drm_mm_hole_node_end(entry);
361 335
336 BUG_ON(!entry->hole_follows);
362 if (!check_free_hole(adj_start, adj_end, size, alignment)) 337 if (!check_free_hole(adj_start, adj_end, size, alignment))
363 continue; 338 continue;
364 339
@@ -430,70 +405,40 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
430int drm_mm_scan_add_block(struct drm_mm_node *node) 405int drm_mm_scan_add_block(struct drm_mm_node *node)
431{ 406{
432 struct drm_mm *mm = node->mm; 407 struct drm_mm *mm = node->mm;
433 struct list_head *prev_free, *next_free; 408 struct drm_mm_node *prev_node;
434 struct drm_mm_node *prev_node, *next_node; 409 unsigned long hole_start, hole_end;
435 unsigned long adj_start; 410 unsigned long adj_start;
436 unsigned long adj_end; 411 unsigned long adj_end;
437 412
438 mm->scanned_blocks++; 413 mm->scanned_blocks++;
439 414
440 prev_free = next_free = NULL; 415 BUG_ON(node->scanned_block);
441
442 BUG_ON(node->free);
443 node->scanned_block = 1; 416 node->scanned_block = 1;
444 node->free = 1;
445
446 if (node->node_list.prev != &mm->node_list) {
447 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
448 node_list);
449
450 if (prev_node->free) {
451 list_del(&prev_node->node_list);
452
453 node->start = prev_node->start;
454 node->size += prev_node->size;
455
456 prev_node->scanned_prev_free = 1;
457
458 prev_free = &prev_node->free_stack;
459 }
460 }
461
462 if (node->node_list.next != &mm->node_list) {
463 next_node = list_entry(node->node_list.next, struct drm_mm_node,
464 node_list);
465
466 if (next_node->free) {
467 list_del(&next_node->node_list);
468
469 node->size += next_node->size;
470
471 next_node->scanned_next_free = 1;
472 417
473 next_free = &next_node->free_stack; 418 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
474 } 419 node_list);
475 }
476 420
477 /* The free_stack list is not used for allocated objects, so these two 421 node->scanned_preceeds_hole = prev_node->hole_follows;
478 * pointers can be abused (as long as no allocations in this memory 422 prev_node->hole_follows = 1;
479 * manager happens). */ 423 list_del(&node->node_list);
480 node->free_stack.prev = prev_free; 424 node->node_list.prev = &prev_node->node_list;
481 node->free_stack.next = next_free;
482 425
426 hole_start = drm_mm_hole_node_start(prev_node);
427 hole_end = drm_mm_hole_node_end(prev_node);
483 if (mm->scan_check_range) { 428 if (mm->scan_check_range) {
484 adj_start = node->start < mm->scan_start ? 429 adj_start = hole_start < mm->scan_start ?
485 mm->scan_start : node->start; 430 mm->scan_start : hole_start;
486 adj_end = node->start + node->size > mm->scan_end ? 431 adj_end = hole_end > mm->scan_end ?
487 mm->scan_end : node->start + node->size; 432 mm->scan_end : hole_end;
488 } else { 433 } else {
489 adj_start = node->start; 434 adj_start = hole_start;
490 adj_end = node->start + node->size; 435 adj_end = hole_end;
491 } 436 }
492 437
493 if (check_free_hole(adj_start , adj_end, 438 if (check_free_hole(adj_start , adj_end,
494 mm->scan_size, mm->scan_alignment)) { 439 mm->scan_size, mm->scan_alignment)) {
495 mm->scan_hit_start = node->start; 440 mm->scan_hit_start = hole_start;
496 mm->scan_hit_size = node->size; 441 mm->scan_hit_size = hole_end;
497 442
498 return 1; 443 return 1;
499 } 444 }
@@ -519,39 +464,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
519int drm_mm_scan_remove_block(struct drm_mm_node *node) 464int drm_mm_scan_remove_block(struct drm_mm_node *node)
520{ 465{
521 struct drm_mm *mm = node->mm; 466 struct drm_mm *mm = node->mm;
522 struct drm_mm_node *prev_node, *next_node; 467 struct drm_mm_node *prev_node;
523 468
524 mm->scanned_blocks--; 469 mm->scanned_blocks--;
525 470
526 BUG_ON(!node->scanned_block); 471 BUG_ON(!node->scanned_block);
527 node->scanned_block = 0; 472 node->scanned_block = 0;
528 node->free = 0;
529
530 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
531 free_stack);
532 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
533 free_stack);
534
535 if (prev_node) {
536 BUG_ON(!prev_node->scanned_prev_free);
537 prev_node->scanned_prev_free = 0;
538
539 list_add_tail(&prev_node->node_list, &node->node_list);
540 473
541 node->start = prev_node->start + prev_node->size; 474 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
542 node->size -= prev_node->size; 475 node_list);
543 }
544
545 if (next_node) {
546 BUG_ON(!next_node->scanned_next_free);
547 next_node->scanned_next_free = 0;
548
549 list_add(&next_node->node_list, &node->node_list);
550
551 node->size -= next_node->size;
552 }
553 476
554 INIT_LIST_HEAD(&node->free_stack); 477 prev_node->hole_follows = node->scanned_preceeds_hole;
478 INIT_LIST_HEAD(&node->node_list);
479 list_add(&node->node_list, &prev_node->node_list);
555 480
556 /* Only need to check for containement because start&size for the 481 /* Only need to check for containement because start&size for the
557 * complete resulting free block (not just the desired part) is 482 * complete resulting free block (not just the desired part) is
@@ -568,7 +493,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
568 493
569int drm_mm_clean(struct drm_mm * mm) 494int drm_mm_clean(struct drm_mm * mm)
570{ 495{
571 struct list_head *head = &mm->node_list; 496 struct list_head *head = &mm->head_node.node_list;
572 497
573 return (head->next->next == head); 498 return (head->next->next == head);
574} 499}
@@ -576,38 +501,40 @@ EXPORT_SYMBOL(drm_mm_clean);
576 501
577int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 502int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
578{ 503{
579 INIT_LIST_HEAD(&mm->node_list); 504 INIT_LIST_HEAD(&mm->hole_stack);
580 INIT_LIST_HEAD(&mm->free_stack);
581 INIT_LIST_HEAD(&mm->unused_nodes); 505 INIT_LIST_HEAD(&mm->unused_nodes);
582 mm->num_unused = 0; 506 mm->num_unused = 0;
583 mm->scanned_blocks = 0; 507 mm->scanned_blocks = 0;
584 spin_lock_init(&mm->unused_lock); 508 spin_lock_init(&mm->unused_lock);
585 509
586 return drm_mm_create_tail_node(mm, start, size, 0); 510 /* Clever trick to avoid a special case in the free hole tracking. */
511 INIT_LIST_HEAD(&mm->head_node.node_list);
512 INIT_LIST_HEAD(&mm->head_node.hole_stack);
513 mm->head_node.hole_follows = 1;
514 mm->head_node.scanned_block = 0;
515 mm->head_node.scanned_prev_free = 0;
516 mm->head_node.scanned_next_free = 0;
517 mm->head_node.mm = mm;
518 mm->head_node.start = start + size;
519 mm->head_node.size = start - mm->head_node.start;
520 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
521
522 return 0;
587} 523}
588EXPORT_SYMBOL(drm_mm_init); 524EXPORT_SYMBOL(drm_mm_init);
589 525
590void drm_mm_takedown(struct drm_mm * mm) 526void drm_mm_takedown(struct drm_mm * mm)
591{ 527{
592 struct list_head *bnode = mm->free_stack.next; 528 struct drm_mm_node *entry, *next;
593 struct drm_mm_node *entry;
594 struct drm_mm_node *next;
595 529
596 entry = list_entry(bnode, struct drm_mm_node, free_stack); 530 if (!list_empty(&mm->head_node.node_list)) {
597
598 if (entry->node_list.next != &mm->node_list ||
599 entry->free_stack.next != &mm->free_stack) {
600 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 531 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
601 return; 532 return;
602 } 533 }
603 534
604 list_del(&entry->free_stack);
605 list_del(&entry->node_list);
606 kfree(entry);
607
608 spin_lock(&mm->unused_lock); 535 spin_lock(&mm->unused_lock);
609 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { 536 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
610 list_del(&entry->free_stack); 537 list_del(&entry->node_list);
611 kfree(entry); 538 kfree(entry);
612 --mm->num_unused; 539 --mm->num_unused;
613 } 540 }
@@ -620,19 +547,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
620void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 547void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
621{ 548{
622 struct drm_mm_node *entry; 549 struct drm_mm_node *entry;
623 int total_used = 0, total_free = 0, total = 0; 550 unsigned long total_used = 0, total_free = 0, total = 0;
624 551 unsigned long hole_start, hole_end, hole_size;
625 list_for_each_entry(entry, &mm->node_list, node_list) { 552
626 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", 553 hole_start = drm_mm_hole_node_start(&mm->head_node);
554 hole_end = drm_mm_hole_node_end(&mm->head_node);
555 hole_size = hole_end - hole_start;
556 if (hole_size)
557 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
558 prefix, hole_start, hole_end,
559 hole_size);
560 total_free += hole_size;
561
562 drm_mm_for_each_node(entry, mm) {
563 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
627 prefix, entry->start, entry->start + entry->size, 564 prefix, entry->start, entry->start + entry->size,
628 entry->size, entry->free ? "free" : "used"); 565 entry->size);
629 total += entry->size; 566 total_used += entry->size;
630 if (entry->free) 567
631 total_free += entry->size; 568 if (entry->hole_follows) {
632 else 569 hole_start = drm_mm_hole_node_start(entry);
633 total_used += entry->size; 570 hole_end = drm_mm_hole_node_end(entry);
571 hole_size = hole_end - hole_start;
572 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
573 prefix, hole_start, hole_end,
574 hole_size);
575 total_free += hole_size;
576 }
634 } 577 }
635 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total, 578 total = total_free + total_used;
579
580 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
636 total_used, total_free); 581 total_used, total_free);
637} 582}
638EXPORT_SYMBOL(drm_mm_debug_table); 583EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +586,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
641int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 586int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
642{ 587{
643 struct drm_mm_node *entry; 588 struct drm_mm_node *entry;
644 int total_used = 0, total_free = 0, total = 0; 589 unsigned long total_used = 0, total_free = 0, total = 0;
645 590 unsigned long hole_start, hole_end, hole_size;
646 list_for_each_entry(entry, &mm->node_list, node_list) { 591
647 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); 592 hole_start = drm_mm_hole_node_start(&mm->head_node);
648 total += entry->size; 593 hole_end = drm_mm_hole_node_end(&mm->head_node);
649 if (entry->free) 594 hole_size = hole_end - hole_start;
650 total_free += entry->size; 595 if (hole_size)
651 else 596 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
652 total_used += entry->size; 597 hole_start, hole_end, hole_size);
598 total_free += hole_size;
599
600 drm_mm_for_each_node(entry, mm) {
601 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
602 entry->start, entry->start + entry->size,
603 entry->size);
604 total_used += entry->size;
605 if (entry->hole_follows) {
606 hole_start = drm_mm_hole_node_start(&mm->head_node);
607 hole_end = drm_mm_hole_node_end(&mm->head_node);
608 hole_size = hole_end - hole_start;
609 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
610 hole_start, hole_end, hole_size);
611 total_free += hole_size;
612 }
653 } 613 }
654 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); 614 total = total_free + total_used;
615
616 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
655 return 0; 617 return 0;
656} 618}
657EXPORT_SYMBOL(drm_mm_dump_table); 619EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0d791462f7b2..34fa36f2de70 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -42,23 +42,24 @@
42#endif 42#endif
43 43
44struct drm_mm_node { 44struct drm_mm_node {
45 struct list_head free_stack;
46 struct list_head node_list; 45 struct list_head node_list;
47 unsigned free : 1; 46 struct list_head hole_stack;
47 unsigned hole_follows : 1;
48 unsigned scanned_block : 1; 48 unsigned scanned_block : 1;
49 unsigned scanned_prev_free : 1; 49 unsigned scanned_prev_free : 1;
50 unsigned scanned_next_free : 1; 50 unsigned scanned_next_free : 1;
51 unsigned scanned_preceeds_hole : 1;
51 unsigned long start; 52 unsigned long start;
52 unsigned long size; 53 unsigned long size;
53 struct drm_mm *mm; 54 struct drm_mm *mm;
54}; 55};
55 56
56struct drm_mm { 57struct drm_mm {
57 /* List of free memory blocks, most recently freed ordered. */ 58 /* List of all memory nodes that immediatly preceed a free hole. */
58 struct list_head free_stack; 59 struct list_head hole_stack;
59 /* List of all memory nodes, ordered according to the (increasing) start 60 /* head_node.node_list is the list of all memory nodes, ordered
60 * address of the memory node. */ 61 * according to the (increasing) start address of the memory node. */
61 struct list_head node_list; 62 struct drm_mm_node head_node;
62 struct list_head unused_nodes; 63 struct list_head unused_nodes;
63 int num_unused; 64 int num_unused;
64 spinlock_t unused_lock; 65 spinlock_t unused_lock;
@@ -74,9 +75,11 @@ struct drm_mm {
74 75
75static inline bool drm_mm_initialized(struct drm_mm *mm) 76static inline bool drm_mm_initialized(struct drm_mm *mm)
76{ 77{
77 return mm->free_stack.next; 78 return mm->hole_stack.next;
78} 79}
79 80#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
81 &(mm)->head_node.node_list, \
82 node_list);
80/* 83/*
81 * Basic range manager support (drm_mm.c) 84 * Basic range manager support (drm_mm.c)
82 */ 85 */