aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_mm.c167
-rw-r--r--include/drm/drm_mm.h15
2 files changed, 177 insertions, 5 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index fd86a6c13aac..da99edc50888 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -53,9 +53,9 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53 struct drm_mm_node *child; 53 struct drm_mm_node *child;
54 54
55 if (atomic) 55 if (atomic)
56 child = kmalloc(sizeof(*child), GFP_ATOMIC); 56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 else 57 else
58 child = kmalloc(sizeof(*child), GFP_KERNEL); 58 child = kzalloc(sizeof(*child), GFP_KERNEL);
59 59
60 if (unlikely(child == NULL)) { 60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock); 61 spin_lock(&mm->unused_lock);
@@ -85,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
85 spin_lock(&mm->unused_lock); 85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) { 86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock); 87 spin_unlock(&mm->unused_lock);
88 node = kmalloc(sizeof(*node), GFP_KERNEL); 88 node = kzalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock); 89 spin_lock(&mm->unused_lock);
90 90
91 if (unlikely(node == NULL)) { 91 if (unlikely(node == NULL)) {
@@ -134,7 +134,6 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
134 134
135 INIT_LIST_HEAD(&child->free_stack); 135 INIT_LIST_HEAD(&child->free_stack);
136 136
137 child->free = 0;
138 child->size = size; 137 child->size = size;
139 child->start = parent->start; 138 child->start = parent->start;
140 child->mm = parent->mm; 139 child->mm = parent->mm;
@@ -235,6 +234,9 @@ void drm_mm_put_block(struct drm_mm_node *cur)
235 234
236 int merged = 0; 235 int merged = 0;
237 236
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free
238 || cur->scanned_next_free);
239
238 if (cur_head->prev != root_head) { 240 if (cur_head->prev != root_head) {
239 prev_node = 241 prev_node =
240 list_entry(cur_head->prev, struct drm_mm_node, node_list); 242 list_entry(cur_head->prev, struct drm_mm_node, node_list);
@@ -312,6 +314,8 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
312 struct drm_mm_node *best; 314 struct drm_mm_node *best;
313 unsigned long best_size; 315 unsigned long best_size;
314 316
317 BUG_ON(mm->scanned_blocks);
318
315 best = NULL; 319 best = NULL;
316 best_size = ~0UL; 320 best_size = ~0UL;
317 321
@@ -343,6 +347,8 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
343 struct drm_mm_node *best; 347 struct drm_mm_node *best;
344 unsigned long best_size; 348 unsigned long best_size;
345 349
350 BUG_ON(mm->scanned_blocks);
351
346 best = NULL; 352 best = NULL;
347 best_size = ~0UL; 353 best_size = ~0UL;
348 354
@@ -366,6 +372,158 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
366} 372}
367EXPORT_SYMBOL(drm_mm_search_free_in_range); 373EXPORT_SYMBOL(drm_mm_search_free_in_range);
368 374
375/**
376 * Initializa lru scanning.
377 *
378 * This simply sets up the scanning routines with the parameters for the desired
379 * hole.
380 *
381 * Warning: As long as the scan list is non-empty, no other operations than
382 * adding/removing nodes to/from the scan list are allowed.
383 */
384void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
385 unsigned alignment)
386{
387 mm->scan_alignment = alignment;
388 mm->scan_size = size;
389 mm->scanned_blocks = 0;
390 mm->scan_hit_start = 0;
391 mm->scan_hit_size = 0;
392}
393EXPORT_SYMBOL(drm_mm_init_scan);
394
395/**
396 * Add a node to the scan list that might be freed to make space for the desired
397 * hole.
398 *
399 * Returns non-zero, if a hole has been found, zero otherwise.
400 */
401int drm_mm_scan_add_block(struct drm_mm_node *node)
402{
403 struct drm_mm *mm = node->mm;
404 struct list_head *prev_free, *next_free;
405 struct drm_mm_node *prev_node, *next_node;
406
407 mm->scanned_blocks++;
408
409 prev_free = next_free = NULL;
410
411 BUG_ON(node->free);
412 node->scanned_block = 1;
413 node->free = 1;
414
415 if (node->node_list.prev != &mm->node_list) {
416 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
417 node_list);
418
419 if (prev_node->free) {
420 list_del(&prev_node->node_list);
421
422 node->start = prev_node->start;
423 node->size += prev_node->size;
424
425 prev_node->scanned_prev_free = 1;
426
427 prev_free = &prev_node->free_stack;
428 }
429 }
430
431 if (node->node_list.next != &mm->node_list) {
432 next_node = list_entry(node->node_list.next, struct drm_mm_node,
433 node_list);
434
435 if (next_node->free) {
436 list_del(&next_node->node_list);
437
438 node->size += next_node->size;
439
440 next_node->scanned_next_free = 1;
441
442 next_free = &next_node->free_stack;
443 }
444 }
445
446 /* The free_stack list is not used for allocated objects, so these two
447 * pointers can be abused (as long as no allocations in this memory
448 * manager happens). */
449 node->free_stack.prev = prev_free;
450 node->free_stack.next = next_free;
451
452 if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
453 mm->scan_hit_start = node->start;
454 mm->scan_hit_size = node->size;
455
456 return 1;
457 }
458
459 return 0;
460}
461EXPORT_SYMBOL(drm_mm_scan_add_block);
462
463/**
464 * Remove a node from the scan list.
465 *
466 * Nodes _must_ be removed in the exact same order from the scan list as they
467 * have been added, otherwise the internal state of the memory manager will be
468 * corrupted.
469 *
470 * When the scan list is empty, the selected memory nodes can be freed. An
471 * immediatly following drm_mm_search_free with best_match = 0 will then return
472 * the just freed block (because its at the top of the free_stack list).
473 *
474 * Returns one if this block should be evicted, zero otherwise. Will always
475 * return zero when no hole has been found.
476 */
477int drm_mm_scan_remove_block(struct drm_mm_node *node)
478{
479 struct drm_mm *mm = node->mm;
480 struct drm_mm_node *prev_node, *next_node;
481
482 mm->scanned_blocks--;
483
484 BUG_ON(!node->scanned_block);
485 node->scanned_block = 0;
486 node->free = 0;
487
488 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
489 free_stack);
490 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
491 free_stack);
492
493 if (prev_node) {
494 BUG_ON(!prev_node->scanned_prev_free);
495 prev_node->scanned_prev_free = 0;
496
497 list_add_tail(&prev_node->node_list, &node->node_list);
498
499 node->start = prev_node->start + prev_node->size;
500 node->size -= prev_node->size;
501 }
502
503 if (next_node) {
504 BUG_ON(!next_node->scanned_next_free);
505 next_node->scanned_next_free = 0;
506
507 list_add(&next_node->node_list, &node->node_list);
508
509 node->size -= next_node->size;
510 }
511
512 INIT_LIST_HEAD(&node->free_stack);
513
514 /* Only need to check for containement because start&size for the
515 * complete resulting free block (not just the desired part) is
516 * stored. */
517 if (node->start >= mm->scan_hit_start &&
518 node->start + node->size
519 <= mm->scan_hit_start + mm->scan_hit_size) {
520 return 1;
521 }
522
523 return 0;
524}
525EXPORT_SYMBOL(drm_mm_scan_remove_block);
526
369int drm_mm_clean(struct drm_mm * mm) 527int drm_mm_clean(struct drm_mm * mm)
370{ 528{
371 struct list_head *head = &mm->node_list; 529 struct list_head *head = &mm->node_list;
@@ -380,6 +538,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
380 INIT_LIST_HEAD(&mm->free_stack); 538 INIT_LIST_HEAD(&mm->free_stack);
381 INIT_LIST_HEAD(&mm->unused_nodes); 539 INIT_LIST_HEAD(&mm->unused_nodes);
382 mm->num_unused = 0; 540 mm->num_unused = 0;
541 mm->scanned_blocks = 0;
383 spin_lock_init(&mm->unused_lock); 542 spin_lock_init(&mm->unused_lock);
384 543
385 return drm_mm_create_tail_node(mm, start, size, 0); 544 return drm_mm_create_tail_node(mm, start, size, 0);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index e8740cc185cf..bf01531193d5 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -44,7 +44,10 @@
44struct drm_mm_node { 44struct drm_mm_node {
45 struct list_head free_stack; 45 struct list_head free_stack;
46 struct list_head node_list; 46 struct list_head node_list;
47 int free; 47 unsigned free : 1;
48 unsigned scanned_block : 1;
49 unsigned scanned_prev_free : 1;
50 unsigned scanned_next_free : 1;
48 unsigned long start; 51 unsigned long start;
49 unsigned long size; 52 unsigned long size;
50 struct drm_mm *mm; 53 struct drm_mm *mm;
@@ -59,6 +62,11 @@ struct drm_mm {
59 struct list_head unused_nodes; 62 struct list_head unused_nodes;
60 int num_unused; 63 int num_unused;
61 spinlock_t unused_lock; 64 spinlock_t unused_lock;
65 unsigned scan_alignment;
66 unsigned long scan_size;
67 unsigned long scan_hit_start;
68 unsigned scan_hit_size;
69 unsigned scanned_blocks;
62}; 70};
63 71
64/* 72/*
@@ -135,6 +143,11 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
135 return block->mm; 143 return block->mm;
136} 144}
137 145
146void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
147 unsigned alignment);
148int drm_mm_scan_add_block(struct drm_mm_node *node);
149int drm_mm_scan_remove_block(struct drm_mm_node *node);
150
138extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 151extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
139#ifdef CONFIG_DEBUG_FS 152#ifdef CONFIG_DEBUG_FS
140int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 153int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);