aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2010-07-02 10:02:16 -0400
committerDave Airlie <airlied@redhat.com>2010-07-06 22:29:51 -0400
commit709ea97145c125b3811ff70429e90ebdb0e832e5 (patch)
tree222c5cafcae50f06e0e2c2591ab09f3bd1e89827 /drivers/gpu
parent7a6b2896f261894dde287d3faefa4b432cddca53 (diff)
drm: implement helper functions for scanning lru list
These helper functions can be used to efficiently scan lru list for eviction. Eviction becomes a three stage process: 1. Scanning through the lru list until a suitable hole has been found. 2. Scan backwards to restore drm_mm consistency and find out which objects fall into the hole. 3. Evict the objects that fall into the hole. These helper functions don't allocate any memory (at the price of not allowing any other concurrent operations). Hence this can also be used for ttm (which does lru scanning under a spinlock). Evicting objects in this fashion should be more fair than the current approach by i915 (scan the lru for a object large enough to contain the new object). It's also more efficient than the current approach used by ttm (uncoditionally evict objects from the lru until there's enough free space). Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Acked-by: Thomas Hellstrom <thellstrom@vmwgfx.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_mm.c167
1 files changed, 163 insertions, 4 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index fd86a6c13aac..da99edc50888 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -53,9 +53,9 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 struct drm_mm_node *child; 53 struct drm_mm_node *child;
54 54
55 if (atomic) 55 if (atomic)
56 child = kmalloc(sizeof(*child), GFP_ATOMIC); 56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 else 57 else
58 child = kmalloc(sizeof(*child), GFP_KERNEL); 58 child = kzalloc(sizeof(*child), GFP_KERNEL);
59 59
60 if (unlikely(child == NULL)) { 60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock); 61 spin_lock(&mm->unused_lock);
@@ -85,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
85 spin_lock(&mm->unused_lock); 85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) { 86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock); 87 spin_unlock(&mm->unused_lock);
88 node = kmalloc(sizeof(*node), GFP_KERNEL); 88 node = kzalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock); 89 spin_lock(&mm->unused_lock);
90 90
91 if (unlikely(node == NULL)) { 91 if (unlikely(node == NULL)) {
@@ -134,7 +134,6 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
134 134
135 INIT_LIST_HEAD(&child->free_stack); 135 INIT_LIST_HEAD(&child->free_stack);
136 136
137 child->free = 0;
138 child->size = size; 137 child->size = size;
139 child->start = parent->start; 138 child->start = parent->start;
140 child->mm = parent->mm; 139 child->mm = parent->mm;
@@ -235,6 +234,9 @@ void drm_mm_put_block(struct drm_mm_node *cur)
235 234
236 int merged = 0; 235 int merged = 0;
237 236
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free
238 || cur->scanned_next_free);
239
238 if (cur_head->prev != root_head) { 240 if (cur_head->prev != root_head) {
239 prev_node = 241 prev_node =
240 list_entry(cur_head->prev, struct drm_mm_node, node_list); 242 list_entry(cur_head->prev, struct drm_mm_node, node_list);
@@ -312,6 +314,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
312 struct drm_mm_node *best; 314 struct drm_mm_node *best;
313 unsigned long best_size; 315 unsigned long best_size;
314 316
317 BUG_ON(mm->scanned_blocks);
318
315 best = NULL; 319 best = NULL;
316 best_size = ~0UL; 320 best_size = ~0UL;
317 321
@@ -343,6 +347,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
343 struct drm_mm_node *best; 347 struct drm_mm_node *best;
344 unsigned long best_size; 348 unsigned long best_size;
345 349
350 BUG_ON(mm->scanned_blocks);
351
346 best = NULL; 352 best = NULL;
347 best_size = ~0UL; 353 best_size = ~0UL;
348 354
@@ -366,6 +372,158 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
366} 372}
367EXPORT_SYMBOL(drm_mm_search_free_in_range); 373EXPORT_SYMBOL(drm_mm_search_free_in_range);
368 374
375/**
376 * Initializa lru scanning.
377 *
378 * This simply sets up the scanning routines with the parameters for the desired
379 * hole.
380 *
381 * Warning: As long as the scan list is non-empty, no other operations than
382 * adding/removing nodes to/from the scan list are allowed.
383 */
384void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
385 unsigned alignment)
386{
387 mm->scan_alignment = alignment;
388 mm->scan_size = size;
389 mm->scanned_blocks = 0;
390 mm->scan_hit_start = 0;
391 mm->scan_hit_size = 0;
392}
393EXPORT_SYMBOL(drm_mm_init_scan);
394
395/**
396 * Add a node to the scan list that might be freed to make space for the desired
397 * hole.
398 *
399 * Returns non-zero, if a hole has been found, zero otherwise.
400 */
401int drm_mm_scan_add_block(struct drm_mm_node *node)
402{
403 struct drm_mm *mm = node->mm;
404 struct list_head *prev_free, *next_free;
405 struct drm_mm_node *prev_node, *next_node;
406
407 mm->scanned_blocks++;
408
409 prev_free = next_free = NULL;
410
411 BUG_ON(node->free);
412 node->scanned_block = 1;
413 node->free = 1;
414
415 if (node->node_list.prev != &mm->node_list) {
416 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
417 node_list);
418
419 if (prev_node->free) {
420 list_del(&prev_node->node_list);
421
422 node->start = prev_node->start;
423 node->size += prev_node->size;
424
425 prev_node->scanned_prev_free = 1;
426
427 prev_free = &prev_node->free_stack;
428 }
429 }
430
431 if (node->node_list.next != &mm->node_list) {
432 next_node = list_entry(node->node_list.next, struct drm_mm_node,
433 node_list);
434
435 if (next_node->free) {
436 list_del(&next_node->node_list);
437
438 node->size += next_node->size;
439
440 next_node->scanned_next_free = 1;
441
442 next_free = &next_node->free_stack;
443 }
444 }
445
446 /* The free_stack list is not used for allocated objects, so these two
447 * pointers can be abused (as long as no allocations in this memory
448 * manager happens). */
449 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free;
451
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size;
455
456 return 1;
457 }
458
459 return 0;
460}
461EXPORT_SYMBOL(drm_mm_scan_add_block);
462
463/**
464 * Remove a node from the scan list.
465 *
466 * Nodes _must_ be removed in the exact same order from the scan list as they
467 * have been added, otherwise the internal state of the memory manager will be
468 * corrupted.
469 *
470 * When the scan list is empty, the selected memory nodes can be freed. An
471 * immediatly following drm_mm_search_free with best_match = 0 will then return
472 * the just freed block (because its at the top of the free_stack list).
473 *
474 * Returns one if this block should be evicted, zero otherwise. Will always
475 * return zero when no hole has been found.
476 */
477int drm_mm_scan_remove_block(struct drm_mm_node *node)
478{
479 struct drm_mm *mm = node->mm;
480 struct drm_mm_node *prev_node, *next_node;
481
482 mm->scanned_blocks--;
483
484 BUG_ON(!node->scanned_block);
485 node->scanned_block = 0;
486 node->free = 0;
487
488 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
489 free_stack);
490 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
491 free_stack);
492
493 if (prev_node) {
494 BUG_ON(!prev_node->scanned_prev_free);
495 prev_node->scanned_prev_free = 0;
496
497 list_add_tail(&prev_node->node_list, &node->node_list);
498
499 node->start = prev_node->start + prev_node->size;
500 node->size -= prev_node->size;
501 }
502
503 if (next_node) {
504 BUG_ON(!next_node->scanned_next_free);
505 next_node->scanned_next_free = 0;
506
507 list_add(&next_node->node_list, &node->node_list);
508
509 node->size -= next_node->size;
510 }
511
512 INIT_LIST_HEAD(&node->free_stack);
513
514 /* Only need to check for containement because start&size for the
515 * complete resulting free block (not just the desired part) is
516 * stored. */
517 if (node->start >= mm->scan_hit_start &&
518 node->start + node->size
519 <= mm->scan_hit_start + mm->scan_hit_size) {
520 return 1;
521 }
522
523 return 0;
524}
525EXPORT_SYMBOL(drm_mm_scan_remove_block);
526
369int drm_mm_clean(struct drm_mm * mm) 527int drm_mm_clean(struct drm_mm * mm)
370{ 528{
371 struct list_head *head = &mm->node_list; 529 struct list_head *head = &mm->node_list;
@@ -380,6 +538,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
380 INIT_LIST_HEAD(&mm->free_stack); 538 INIT_LIST_HEAD(&mm->free_stack);
381 INIT_LIST_HEAD(&mm->unused_nodes); 539 INIT_LIST_HEAD(&mm->unused_nodes);
382 mm->num_unused = 0; 540 mm->num_unused = 0;
541 mm->scanned_blocks = 0;
383 spin_lock_init(&mm->unused_lock); 542 spin_lock_init(&mm->unused_lock);
384 543
385 return drm_mm_create_tail_node(mm, start, size, 0); 544 return drm_mm_create_tail_node(mm, start, size, 0);