aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_mm.c359
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--include/drm/drm_mm.h27
5 files changed, 254 insertions, 144 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2ac074c8f5d2..da99edc50888 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -48,44 +48,14 @@
48 48
49#define MM_UNUSED_TARGET 4 49#define MM_UNUSED_TARGET 4
50 50
51unsigned long drm_mm_tail_space(struct drm_mm *mm)
52{
53 struct list_head *tail_node;
54 struct drm_mm_node *entry;
55
56 tail_node = mm->ml_entry.prev;
57 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58 if (!entry->free)
59 return 0;
60
61 return entry->size;
62}
63
64int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65{
66 struct list_head *tail_node;
67 struct drm_mm_node *entry;
68
69 tail_node = mm->ml_entry.prev;
70 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71 if (!entry->free)
72 return -ENOMEM;
73
74 if (entry->size <= size)
75 return -ENOMEM;
76
77 entry->size -= size;
78 return 0;
79}
80
81static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) 51static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
82{ 52{
83 struct drm_mm_node *child; 53 struct drm_mm_node *child;
84 54
85 if (atomic) 55 if (atomic)
86 child = kmalloc(sizeof(*child), GFP_ATOMIC); 56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
87 else 57 else
88 child = kmalloc(sizeof(*child), GFP_KERNEL); 58 child = kzalloc(sizeof(*child), GFP_KERNEL);
89 59
90 if (unlikely(child == NULL)) { 60 if (unlikely(child == NULL)) {
91 spin_lock(&mm->unused_lock); 61 spin_lock(&mm->unused_lock);
@@ -94,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
94 else { 64 else {
95 child = 65 child =
96 list_entry(mm->unused_nodes.next, 66 list_entry(mm->unused_nodes.next,
97 struct drm_mm_node, fl_entry); 67 struct drm_mm_node, free_stack);
98 list_del(&child->fl_entry); 68 list_del(&child->free_stack);
99 --mm->num_unused; 69 --mm->num_unused;
100 } 70 }
101 spin_unlock(&mm->unused_lock); 71 spin_unlock(&mm->unused_lock);
@@ -115,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
115 spin_lock(&mm->unused_lock); 85 spin_lock(&mm->unused_lock);
116 while (mm->num_unused < MM_UNUSED_TARGET) { 86 while (mm->num_unused < MM_UNUSED_TARGET) {
117 spin_unlock(&mm->unused_lock); 87 spin_unlock(&mm->unused_lock);
118 node = kmalloc(sizeof(*node), GFP_KERNEL); 88 node = kzalloc(sizeof(*node), GFP_KERNEL);
119 spin_lock(&mm->unused_lock); 89 spin_lock(&mm->unused_lock);
120 90
121 if (unlikely(node == NULL)) { 91 if (unlikely(node == NULL)) {
@@ -124,7 +94,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
124 return ret; 94 return ret;
125 } 95 }
126 ++mm->num_unused; 96 ++mm->num_unused;
127 list_add_tail(&node->fl_entry, &mm->unused_nodes); 97 list_add_tail(&node->free_stack, &mm->unused_nodes);
128 } 98 }
129 spin_unlock(&mm->unused_lock); 99 spin_unlock(&mm->unused_lock);
130 return 0; 100 return 0;
@@ -146,27 +116,12 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
146 child->start = start; 116 child->start = start;
147 child->mm = mm; 117 child->mm = mm;
148 118
149 list_add_tail(&child->ml_entry, &mm->ml_entry); 119 list_add_tail(&child->node_list, &mm->node_list);
150 list_add_tail(&child->fl_entry, &mm->fl_entry); 120 list_add_tail(&child->free_stack, &mm->free_stack);
151 121
152 return 0; 122 return 0;
153} 123}
154 124
155int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
156{
157 struct list_head *tail_node;
158 struct drm_mm_node *entry;
159
160 tail_node = mm->ml_entry.prev;
161 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
162 if (!entry->free) {
163 return drm_mm_create_tail_node(mm, entry->start + entry->size,
164 size, atomic);
165 }
166 entry->size += size;
167 return 0;
168}
169
170static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 125static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
171 unsigned long size, 126 unsigned long size,
172 int atomic) 127 int atomic)
@@ -177,15 +132,14 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
177 if (unlikely(child == NULL)) 132 if (unlikely(child == NULL))
178 return NULL; 133 return NULL;
179 134
180 INIT_LIST_HEAD(&child->fl_entry); 135 INIT_LIST_HEAD(&child->free_stack);
181 136
182 child->free = 0;
183 child->size = size; 137 child->size = size;
184 child->start = parent->start; 138 child->start = parent->start;
185 child->mm = parent->mm; 139 child->mm = parent->mm;
186 140
187 list_add_tail(&child->ml_entry, &parent->ml_entry); 141 list_add_tail(&child->node_list, &parent->node_list);
188 INIT_LIST_HEAD(&child->fl_entry); 142 INIT_LIST_HEAD(&child->free_stack);
189 143
190 parent->size -= size; 144 parent->size -= size;
191 parent->start += size; 145 parent->start += size;
@@ -213,7 +167,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
213 } 167 }
214 168
215 if (node->size == size) { 169 if (node->size == size) {
216 list_del_init(&node->fl_entry); 170 list_del_init(&node->free_stack);
217 node->free = 0; 171 node->free = 0;
218 } else { 172 } else {
219 node = drm_mm_split_at_start(node, size, atomic); 173 node = drm_mm_split_at_start(node, size, atomic);
@@ -251,7 +205,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
251 } 205 }
252 206
253 if (node->size == size) { 207 if (node->size == size) {
254 list_del_init(&node->fl_entry); 208 list_del_init(&node->free_stack);
255 node->free = 0; 209 node->free = 0;
256 } else { 210 } else {
257 node = drm_mm_split_at_start(node, size, atomic); 211 node = drm_mm_split_at_start(node, size, atomic);
@@ -273,16 +227,19 @@ void drm_mm_put_block(struct drm_mm_node *cur)
273{ 227{
274 228
275 struct drm_mm *mm = cur->mm; 229 struct drm_mm *mm = cur->mm;
276 struct list_head *cur_head = &cur->ml_entry; 230 struct list_head *cur_head = &cur->node_list;
277 struct list_head *root_head = &mm->ml_entry; 231 struct list_head *root_head = &mm->node_list;
278 struct drm_mm_node *prev_node = NULL; 232 struct drm_mm_node *prev_node = NULL;
279 struct drm_mm_node *next_node; 233 struct drm_mm_node *next_node;
280 234
281 int merged = 0; 235 int merged = 0;
282 236
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free
238 || cur->scanned_next_free);
239
283 if (cur_head->prev != root_head) { 240 if (cur_head->prev != root_head) {
284 prev_node = 241 prev_node =
285 list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 242 list_entry(cur_head->prev, struct drm_mm_node, node_list);
286 if (prev_node->free) { 243 if (prev_node->free) {
287 prev_node->size += cur->size; 244 prev_node->size += cur->size;
288 merged = 1; 245 merged = 1;
@@ -290,15 +247,15 @@ void drm_mm_put_block(struct drm_mm_node *cur)
290 } 247 }
291 if (cur_head->next != root_head) { 248 if (cur_head->next != root_head) {
292 next_node = 249 next_node =
293 list_entry(cur_head->next, struct drm_mm_node, ml_entry); 250 list_entry(cur_head->next, struct drm_mm_node, node_list);
294 if (next_node->free) { 251 if (next_node->free) {
295 if (merged) { 252 if (merged) {
296 prev_node->size += next_node->size; 253 prev_node->size += next_node->size;
297 list_del(&next_node->ml_entry); 254 list_del(&next_node->node_list);
298 list_del(&next_node->fl_entry); 255 list_del(&next_node->free_stack);
299 spin_lock(&mm->unused_lock); 256 spin_lock(&mm->unused_lock);
300 if (mm->num_unused < MM_UNUSED_TARGET) { 257 if (mm->num_unused < MM_UNUSED_TARGET) {
301 list_add(&next_node->fl_entry, 258 list_add(&next_node->free_stack,
302 &mm->unused_nodes); 259 &mm->unused_nodes);
303 ++mm->num_unused; 260 ++mm->num_unused;
304 } else 261 } else
@@ -313,12 +270,12 @@ void drm_mm_put_block(struct drm_mm_node *cur)
313 } 270 }
314 if (!merged) { 271 if (!merged) {
315 cur->free = 1; 272 cur->free = 1;
316 list_add(&cur->fl_entry, &mm->fl_entry); 273 list_add(&cur->free_stack, &mm->free_stack);
317 } else { 274 } else {
318 list_del(&cur->ml_entry); 275 list_del(&cur->node_list);
319 spin_lock(&mm->unused_lock); 276 spin_lock(&mm->unused_lock);
320 if (mm->num_unused < MM_UNUSED_TARGET) { 277 if (mm->num_unused < MM_UNUSED_TARGET) {
321 list_add(&cur->fl_entry, &mm->unused_nodes); 278 list_add(&cur->free_stack, &mm->unused_nodes);
322 ++mm->num_unused; 279 ++mm->num_unused;
323 } else 280 } else
324 kfree(cur); 281 kfree(cur);
@@ -328,40 +285,50 @@ void drm_mm_put_block(struct drm_mm_node *cur)
328 285
329EXPORT_SYMBOL(drm_mm_put_block); 286EXPORT_SYMBOL(drm_mm_put_block);
330 287
288static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
289 unsigned alignment)
290{
291 unsigned wasted = 0;
292
293 if (entry->size < size)
294 return 0;
295
296 if (alignment) {
297 register unsigned tmp = entry->start % alignment;
298 if (tmp)
299 wasted = alignment - tmp;
300 }
301
302 if (entry->size >= size + wasted) {
303 return 1;
304 }
305
306 return 0;
307}
308
331struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 309struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
332 unsigned long size, 310 unsigned long size,
333 unsigned alignment, int best_match) 311 unsigned alignment, int best_match)
334{ 312{
335 struct list_head *list;
336 const struct list_head *free_stack = &mm->fl_entry;
337 struct drm_mm_node *entry; 313 struct drm_mm_node *entry;
338 struct drm_mm_node *best; 314 struct drm_mm_node *best;
339 unsigned long best_size; 315 unsigned long best_size;
340 unsigned wasted; 316
317 BUG_ON(mm->scanned_blocks);
341 318
342 best = NULL; 319 best = NULL;
343 best_size = ~0UL; 320 best_size = ~0UL;
344 321
345 list_for_each(list, free_stack) { 322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
346 entry = list_entry(list, struct drm_mm_node, fl_entry); 323 if (!check_free_mm_node(entry, size, alignment))
347 wasted = 0;
348
349 if (entry->size < size)
350 continue; 324 continue;
351 325
352 if (alignment) { 326 if (!best_match)
353 register unsigned tmp = entry->start % alignment; 327 return entry;
354 if (tmp)
355 wasted += alignment - tmp;
356 }
357 328
358 if (entry->size >= size + wasted) { 329 if (entry->size < best_size) {
359 if (!best_match) 330 best = entry;
360 return entry; 331 best_size = entry->size;
361 if (entry->size < best_size) {
362 best = entry;
363 best_size = entry->size;
364 }
365 } 332 }
366 } 333 }
367 334
@@ -376,43 +343,28 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
376 unsigned long end, 343 unsigned long end,
377 int best_match) 344 int best_match)
378{ 345{
379 struct list_head *list;
380 const struct list_head *free_stack = &mm->fl_entry;
381 struct drm_mm_node *entry; 346 struct drm_mm_node *entry;
382 struct drm_mm_node *best; 347 struct drm_mm_node *best;
383 unsigned long best_size; 348 unsigned long best_size;
384 unsigned wasted; 349
350 BUG_ON(mm->scanned_blocks);
385 351
386 best = NULL; 352 best = NULL;
387 best_size = ~0UL; 353 best_size = ~0UL;
388 354
389 list_for_each(list, free_stack) { 355 list_for_each_entry(entry, &mm->free_stack, free_stack) {
390 entry = list_entry(list, struct drm_mm_node, fl_entry);
391 wasted = 0;
392
393 if (entry->size < size)
394 continue;
395
396 if (entry->start > end || (entry->start+entry->size) < start) 356 if (entry->start > end || (entry->start+entry->size) < start)
397 continue; 357 continue;
398 358
399 if (entry->start < start) 359 if (!check_free_mm_node(entry, size, alignment))
400 wasted += start - entry->start; 360 continue;
401 361
402 if (alignment) { 362 if (!best_match)
403 register unsigned tmp = (entry->start + wasted) % alignment; 363 return entry;
404 if (tmp)
405 wasted += alignment - tmp;
406 }
407 364
408 if (entry->size >= size + wasted && 365 if (entry->size < best_size) {
409 (entry->start + wasted + size) <= end) { 366 best = entry;
410 if (!best_match) 367 best_size = entry->size;
411 return entry;
412 if (entry->size < best_size) {
413 best = entry;
414 best_size = entry->size;
415 }
416 } 368 }
417 } 369 }
418 370
@@ -420,9 +372,161 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
420} 372}
421EXPORT_SYMBOL(drm_mm_search_free_in_range); 373EXPORT_SYMBOL(drm_mm_search_free_in_range);
422 374
375/**
376 * Initializa lru scanning.
377 *
378 * This simply sets up the scanning routines with the parameters for the desired
379 * hole.
380 *
381 * Warning: As long as the scan list is non-empty, no other operations than
382 * adding/removing nodes to/from the scan list are allowed.
383 */
384void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
385 unsigned alignment)
386{
387 mm->scan_alignment = alignment;
388 mm->scan_size = size;
389 mm->scanned_blocks = 0;
390 mm->scan_hit_start = 0;
391 mm->scan_hit_size = 0;
392}
393EXPORT_SYMBOL(drm_mm_init_scan);
394
395/**
396 * Add a node to the scan list that might be freed to make space for the desired
397 * hole.
398 *
399 * Returns non-zero, if a hole has been found, zero otherwise.
400 */
401int drm_mm_scan_add_block(struct drm_mm_node *node)
402{
403 struct drm_mm *mm = node->mm;
404 struct list_head *prev_free, *next_free;
405 struct drm_mm_node *prev_node, *next_node;
406
407 mm->scanned_blocks++;
408
409 prev_free = next_free = NULL;
410
411 BUG_ON(node->free);
412 node->scanned_block = 1;
413 node->free = 1;
414
415 if (node->node_list.prev != &mm->node_list) {
416 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
417 node_list);
418
419 if (prev_node->free) {
420 list_del(&prev_node->node_list);
421
422 node->start = prev_node->start;
423 node->size += prev_node->size;
424
425 prev_node->scanned_prev_free = 1;
426
427 prev_free = &prev_node->free_stack;
428 }
429 }
430
431 if (node->node_list.next != &mm->node_list) {
432 next_node = list_entry(node->node_list.next, struct drm_mm_node,
433 node_list);
434
435 if (next_node->free) {
436 list_del(&next_node->node_list);
437
438 node->size += next_node->size;
439
440 next_node->scanned_next_free = 1;
441
442 next_free = &next_node->free_stack;
443 }
444 }
445
446 /* The free_stack list is not used for allocated objects, so these two
447 * pointers can be abused (as long as no allocations in this memory
448 * manager happens). */
449 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free;
451
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size;
455
456 return 1;
457 }
458
459 return 0;
460}
461EXPORT_SYMBOL(drm_mm_scan_add_block);
462
463/**
464 * Remove a node from the scan list.
465 *
466 * Nodes _must_ be removed in the exact same order from the scan list as they
467 * have been added, otherwise the internal state of the memory manager will be
468 * corrupted.
469 *
470 * When the scan list is empty, the selected memory nodes can be freed. An
471 * immediatly following drm_mm_search_free with best_match = 0 will then return
472 * the just freed block (because its at the top of the free_stack list).
473 *
474 * Returns one if this block should be evicted, zero otherwise. Will always
475 * return zero when no hole has been found.
476 */
477int drm_mm_scan_remove_block(struct drm_mm_node *node)
478{
479 struct drm_mm *mm = node->mm;
480 struct drm_mm_node *prev_node, *next_node;
481
482 mm->scanned_blocks--;
483
484 BUG_ON(!node->scanned_block);
485 node->scanned_block = 0;
486 node->free = 0;
487
488 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
489 free_stack);
490 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
491 free_stack);
492
493 if (prev_node) {
494 BUG_ON(!prev_node->scanned_prev_free);
495 prev_node->scanned_prev_free = 0;
496
497 list_add_tail(&prev_node->node_list, &node->node_list);
498
499 node->start = prev_node->start + prev_node->size;
500 node->size -= prev_node->size;
501 }
502
503 if (next_node) {
504 BUG_ON(!next_node->scanned_next_free);
505 next_node->scanned_next_free = 0;
506
507 list_add(&next_node->node_list, &node->node_list);
508
509 node->size -= next_node->size;
510 }
511
512 INIT_LIST_HEAD(&node->free_stack);
513
514 /* Only need to check for containement because start&size for the
515 * complete resulting free block (not just the desired part) is
516 * stored. */
517 if (node->start >= mm->scan_hit_start &&
518 node->start + node->size
519 <= mm->scan_hit_start + mm->scan_hit_size) {
520 return 1;
521 }
522
523 return 0;
524}
525EXPORT_SYMBOL(drm_mm_scan_remove_block);
526
423int drm_mm_clean(struct drm_mm * mm) 527int drm_mm_clean(struct drm_mm * mm)
424{ 528{
425 struct list_head *head = &mm->ml_entry; 529 struct list_head *head = &mm->node_list;
426 530
427 return (head->next->next == head); 531 return (head->next->next == head);
428} 532}
@@ -430,10 +534,11 @@ EXPORT_SYMBOL(drm_mm_clean);
430 534
431int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 535int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
432{ 536{
433 INIT_LIST_HEAD(&mm->ml_entry); 537 INIT_LIST_HEAD(&mm->node_list);
434 INIT_LIST_HEAD(&mm->fl_entry); 538 INIT_LIST_HEAD(&mm->free_stack);
435 INIT_LIST_HEAD(&mm->unused_nodes); 539 INIT_LIST_HEAD(&mm->unused_nodes);
436 mm->num_unused = 0; 540 mm->num_unused = 0;
541 mm->scanned_blocks = 0;
437 spin_lock_init(&mm->unused_lock); 542 spin_lock_init(&mm->unused_lock);
438 543
439 return drm_mm_create_tail_node(mm, start, size, 0); 544 return drm_mm_create_tail_node(mm, start, size, 0);
@@ -442,25 +547,25 @@ EXPORT_SYMBOL(drm_mm_init);
442 547
443void drm_mm_takedown(struct drm_mm * mm) 548void drm_mm_takedown(struct drm_mm * mm)
444{ 549{
445 struct list_head *bnode = mm->fl_entry.next; 550 struct list_head *bnode = mm->free_stack.next;
446 struct drm_mm_node *entry; 551 struct drm_mm_node *entry;
447 struct drm_mm_node *next; 552 struct drm_mm_node *next;
448 553
449 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 554 entry = list_entry(bnode, struct drm_mm_node, free_stack);
450 555
451 if (entry->ml_entry.next != &mm->ml_entry || 556 if (entry->node_list.next != &mm->node_list ||
452 entry->fl_entry.next != &mm->fl_entry) { 557 entry->free_stack.next != &mm->free_stack) {
453 DRM_ERROR("Memory manager not clean. Delaying takedown\n"); 558 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
454 return; 559 return;
455 } 560 }
456 561
457 list_del(&entry->fl_entry); 562 list_del(&entry->free_stack);
458 list_del(&entry->ml_entry); 563 list_del(&entry->node_list);
459 kfree(entry); 564 kfree(entry);
460 565
461 spin_lock(&mm->unused_lock); 566 spin_lock(&mm->unused_lock);
462 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { 567 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
463 list_del(&entry->fl_entry); 568 list_del(&entry->free_stack);
464 kfree(entry); 569 kfree(entry);
465 --mm->num_unused; 570 --mm->num_unused;
466 } 571 }
@@ -475,7 +580,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
475 struct drm_mm_node *entry; 580 struct drm_mm_node *entry;
476 int total_used = 0, total_free = 0, total = 0; 581 int total_used = 0, total_free = 0, total = 0;
477 582
478 list_for_each_entry(entry, &mm->ml_entry, ml_entry) { 583 list_for_each_entry(entry, &mm->node_list, node_list) {
479 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", 584 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
480 prefix, entry->start, entry->start + entry->size, 585 prefix, entry->start, entry->start + entry->size,
481 entry->size, entry->free ? "free" : "used"); 586 entry->size, entry->free ? "free" : "used");
@@ -496,7 +601,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
496 struct drm_mm_node *entry; 601 struct drm_mm_node *entry;
497 int total_used = 0, total_free = 0, total = 0; 602 int total_used = 0, total_free = 0, total = 0;
498 603
499 list_for_each_entry(entry, &mm->ml_entry, ml_entry) { 604 list_for_each_entry(entry, &mm->node_list, node_list) {
500 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); 605 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
501 total += entry->size; 606 total += entry->size;
502 if (entry->free) 607 if (entry->free)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 074385882ccf..75061b305b8c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2633,10 +2633,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2633 if (free_space != NULL) { 2633 if (free_space != NULL) {
2634 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 2634 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
2635 alignment); 2635 alignment);
2636 if (obj_priv->gtt_space != NULL) { 2636 if (obj_priv->gtt_space != NULL)
2637 obj_priv->gtt_space->private = obj;
2638 obj_priv->gtt_offset = obj_priv->gtt_space->start; 2637 obj_priv->gtt_offset = obj_priv->gtt_space->start;
2639 }
2640 } 2638 }
2641 if (obj_priv->gtt_space == NULL) { 2639 if (obj_priv->gtt_space == NULL) {
2642 /* If the gtt is empty and we're still having trouble 2640 /* If the gtt is empty and we're still having trouble
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 555ebb12ace8..9763288c6b2d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -476,7 +476,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
476 ++put_count; 476 ++put_count;
477 } 477 }
478 if (bo->mem.mm_node) { 478 if (bo->mem.mm_node) {
479 bo->mem.mm_node->private = NULL;
480 drm_mm_put_block(bo->mem.mm_node); 479 drm_mm_put_block(bo->mem.mm_node);
481 bo->mem.mm_node = NULL; 480 bo->mem.mm_node = NULL;
482 } 481 }
@@ -670,7 +669,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
670 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 669 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
671 spin_lock(&glob->lru_lock); 670 spin_lock(&glob->lru_lock);
672 if (evict_mem.mm_node) { 671 if (evict_mem.mm_node) {
673 evict_mem.mm_node->private = NULL;
674 drm_mm_put_block(evict_mem.mm_node); 672 drm_mm_put_block(evict_mem.mm_node);
675 evict_mem.mm_node = NULL; 673 evict_mem.mm_node = NULL;
676 } 674 }
@@ -929,8 +927,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
929 mem->mm_node = node; 927 mem->mm_node = node;
930 mem->mem_type = mem_type; 928 mem->mem_type = mem_type;
931 mem->placement = cur_flags; 929 mem->placement = cur_flags;
932 if (node)
933 node->private = bo;
934 return 0; 930 return 0;
935 } 931 }
936 932
@@ -973,7 +969,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
973 interruptible, no_wait_reserve, no_wait_gpu); 969 interruptible, no_wait_reserve, no_wait_gpu);
974 if (ret == 0 && mem->mm_node) { 970 if (ret == 0 && mem->mm_node) {
975 mem->placement = cur_flags; 971 mem->placement = cur_flags;
976 mem->mm_node->private = bo;
977 return 0; 972 return 0;
978 } 973 }
979 if (ret == -ERESTARTSYS) 974 if (ret == -ERESTARTSYS)
@@ -1029,7 +1024,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1029out_unlock: 1024out_unlock:
1030 if (ret && mem.mm_node) { 1025 if (ret && mem.mm_node) {
1031 spin_lock(&glob->lru_lock); 1026 spin_lock(&glob->lru_lock);
1032 mem.mm_node->private = NULL;
1033 drm_mm_put_block(mem.mm_node); 1027 drm_mm_put_block(mem.mm_node);
1034 spin_unlock(&glob->lru_lock); 1028 spin_unlock(&glob->lru_lock);
1035 } 1029 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 13012a1f1486..7cffb3e04232 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,8 +353,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
353 fbo->vm_node = NULL; 353 fbo->vm_node = NULL;
354 354
355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); 355 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
356 if (fbo->mem.mm_node)
357 fbo->mem.mm_node->private = (void *)fbo;
358 kref_init(&fbo->list_kref); 356 kref_init(&fbo->list_kref);
359 kref_init(&fbo->kref); 357 kref_init(&fbo->kref);
360 fbo->destroy = &ttm_transfered_destroy; 358 fbo->destroy = &ttm_transfered_destroy;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4c10be39a43b..bf01531193d5 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -42,21 +42,31 @@
42#endif 42#endif
43 43
44struct drm_mm_node { 44struct drm_mm_node {
45 struct list_head fl_entry; 45 struct list_head free_stack;
46 struct list_head ml_entry; 46 struct list_head node_list;
47 int free; 47 unsigned free : 1;
48 unsigned scanned_block : 1;
49 unsigned scanned_prev_free : 1;
50 unsigned scanned_next_free : 1;
48 unsigned long start; 51 unsigned long start;
49 unsigned long size; 52 unsigned long size;
50 struct drm_mm *mm; 53 struct drm_mm *mm;
51 void *private;
52}; 54};
53 55
54struct drm_mm { 56struct drm_mm {
55 struct list_head fl_entry; 57 /* List of free memory blocks, most recently freed ordered. */
56 struct list_head ml_entry; 58 struct list_head free_stack;
59 /* List of all memory nodes, ordered according to the (increasing) start
60 * address of the memory node. */
61 struct list_head node_list;
57 struct list_head unused_nodes; 62 struct list_head unused_nodes;
58 int num_unused; 63 int num_unused;
59 spinlock_t unused_lock; 64 spinlock_t unused_lock;
65 unsigned scan_alignment;
66 unsigned long scan_size;
67 unsigned long scan_hit_start;
68 unsigned scan_hit_size;
69 unsigned scanned_blocks;
60}; 70};
61 71
62/* 72/*
@@ -133,6 +143,11 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
133 return block->mm; 143 return block->mm;
134} 144}
135 145
146void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
147 unsigned alignment);
148int drm_mm_scan_add_block(struct drm_mm_node *node);
149int drm_mm_scan_remove_block(struct drm_mm_node *node);
150
136extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 151extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
137#ifdef CONFIG_DEBUG_FS 152#ifdef CONFIG_DEBUG_FS
138int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 153int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);