aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl5
-rw-r--r--drivers/gpu/drm/drm_mm.c144
-rw-r--r--include/drm/drm_mm.h154
3 files changed, 251 insertions, 52 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2ac018bfbddf..d68bb0a2dc06 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -931,6 +931,11 @@ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
931!Pdrivers/gpu/drm/drm_mm.c lru scan roaster 931!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
932 </sect3> 932 </sect3>
933 </sect2> 933 </sect2>
934 <sect2>
935 <title>DRM MM Range Allocator Function References</title>
936!Edrivers/gpu/drm/drm_mm.c
937!Iinclude/drm/drm_mm.h
938 </sect2>
934 </sect1> 939 </sect1>
935 940
936 <!-- Internals: mode setting --> 941 <!-- Internals: mode setting -->
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 276a7a27c166..a2d45b748f86 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -144,6 +144,20 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
144 } 144 }
145} 145}
146 146
147/**
148 * drm_mm_reserve_node - insert an pre-initialized node
149 * @mm: drm_mm allocator to insert @node into
150 * @node: drm_mm_node to insert
151 *
152 * This functions inserts an already set-up drm_mm_node into the allocator,
153 * meaning that start, size and color must be set by the caller. This is useful
154 * to initialize the allocator with preallocated objects which must be set-up
155 * before the range allocator can be set-up, e.g. when taking over a firmware
156 * framebuffer.
157 *
158 * Returns:
159 * 0 on success, -ENOSPC if there's no hole where @node is.
160 */
147int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 161int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
148{ 162{
149 struct drm_mm_node *hole; 163 struct drm_mm_node *hole;
@@ -185,9 +199,18 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
185EXPORT_SYMBOL(drm_mm_reserve_node); 199EXPORT_SYMBOL(drm_mm_reserve_node);
186 200
187/** 201/**
188 * Search for free space and insert a preallocated memory node. Returns 202 * drm_mm_insert_node_generic - search for space and insert @node
189 * -ENOSPC if no suitable free area is available. The preallocated memory node 203 * @mm: drm_mm to allocate from
190 * must be cleared. 204 * @node: preallocate node to insert
205 * @size: size of the allocation
206 * @alignment: alignment of the allocation
207 * @color: opaque tag value to use for this node
208 * @flags: flags to fine-tune the allocation
209 *
210 * The preallocated node must be cleared to 0.
211 *
212 * Returns:
213 * 0 on success, -ENOSPC if there's no suitable hole.
191 */ 214 */
192int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, 215int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
193 unsigned long size, unsigned alignment, 216 unsigned long size, unsigned alignment,
@@ -259,9 +282,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
259} 282}
260 283
261/** 284/**
262 * Search for free space and insert a preallocated memory node. Returns 285 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
263 * -ENOSPC if no suitable free area is available. This is for range 286 * @mm: drm_mm to allocate from
264 * restricted allocations. The preallocated memory node must be cleared. 287 * @node: preallocate node to insert
288 * @size: size of the allocation
289 * @alignment: alignment of the allocation
290 * @color: opaque tag value to use for this node
291 * @start: start of the allowed range for this node
292 * @end: end of the allowed range for this node
293 * @flags: flags to fine-tune the allocation
294 *
295 * The preallocated node must be cleared to 0.
296 *
297 * Returns:
298 * 0 on success, -ENOSPC if there's no suitable hole.
265 */ 299 */
266int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, 300int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
267 unsigned long size, unsigned alignment, unsigned long color, 301 unsigned long size, unsigned alignment, unsigned long color,
@@ -284,7 +318,12 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
284EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); 318EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
285 319
286/** 320/**
287 * Remove a memory node from the allocator. 321 * drm_mm_remove_node - Remove a memory node from the allocator.
322 * @node: drm_mm_node to remove
323 *
324 * This just removes a node from its drm_mm allocator. The node does not need to
325 * be cleared again before it can be re-inserted into this or any other drm_mm
326 * allocator. It is a bug to call this function on a un-allocated node.
288 */ 327 */
289void drm_mm_remove_node(struct drm_mm_node *node) 328void drm_mm_remove_node(struct drm_mm_node *node)
290{ 329{
@@ -421,7 +460,13 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
421} 460}
422 461
423/** 462/**
424 * Moves an allocation. To be used with embedded struct drm_mm_node. 463 * drm_mm_replace_node - move an allocation from @old to @new
464 * @old: drm_mm_node to remove from the allocator
465 * @new: drm_mm_node which should inherit @old's allocation
466 *
467 * This is useful for when drivers embed the drm_mm_node structure and hence
468 * can't move allocations by reassigning pointers. It's a combination of remove
469 * and insert with the guarantee that the allocation start will match.
425 */ 470 */
426void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) 471void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
427{ 472{
@@ -467,12 +512,18 @@ EXPORT_SYMBOL(drm_mm_replace_node);
467 */ 512 */
468 513
469/** 514/**
470 * Initializa lru scanning. 515 * drm_mm_init_scan - initialize lru scanning
516 * @mm: drm_mm to scan
517 * @size: size of the allocation
518 * @alignment: alignment of the allocation
519 * @color: opaque tag value to use for the allocation
471 * 520 *
472 * This simply sets up the scanning routines with the parameters for the desired 521 * This simply sets up the scanning routines with the parameters for the desired
473 * hole. 522 * hole. Note that there's no need to specify allocation flags, since they only
523 * change the place a node is allocated from within a suitable hole.
474 * 524 *
475 * Warning: As long as the scan list is non-empty, no other operations than 525 * Warning:
526 * As long as the scan list is non-empty, no other operations than
476 * adding/removing nodes to/from the scan list are allowed. 527 * adding/removing nodes to/from the scan list are allowed.
477 */ 528 */
478void drm_mm_init_scan(struct drm_mm *mm, 529void drm_mm_init_scan(struct drm_mm *mm,
@@ -492,12 +543,20 @@ void drm_mm_init_scan(struct drm_mm *mm,
492EXPORT_SYMBOL(drm_mm_init_scan); 543EXPORT_SYMBOL(drm_mm_init_scan);
493 544
494/** 545/**
495 * Initializa lru scanning. 546 * drm_mm_init_scan - initialize range-restricted lru scanning
547 * @mm: drm_mm to scan
548 * @size: size of the allocation
549 * @alignment: alignment of the allocation
550 * @color: opaque tag value to use for the allocation
551 * @start: start of the allowed range for the allocation
552 * @end: end of the allowed range for the allocation
496 * 553 *
497 * This simply sets up the scanning routines with the parameters for the desired 554 * This simply sets up the scanning routines with the parameters for the desired
498 * hole. This version is for range-restricted scans. 555 * hole. Note that there's no need to specify allocation flags, since they only
556 * change the place a node is allocated from within a suitable hole.
499 * 557 *
500 * Warning: As long as the scan list is non-empty, no other operations than 558 * Warning:
559 * As long as the scan list is non-empty, no other operations than
501 * adding/removing nodes to/from the scan list are allowed. 560 * adding/removing nodes to/from the scan list are allowed.
502 */ 561 */
503void drm_mm_init_scan_with_range(struct drm_mm *mm, 562void drm_mm_init_scan_with_range(struct drm_mm *mm,
@@ -521,12 +580,16 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
521EXPORT_SYMBOL(drm_mm_init_scan_with_range); 580EXPORT_SYMBOL(drm_mm_init_scan_with_range);
522 581
523/** 582/**
583 * drm_mm_scan_add_block - add a node to the scan list
584 * @node: drm_mm_node to add
585 *
524 * Add a node to the scan list that might be freed to make space for the desired 586 * Add a node to the scan list that might be freed to make space for the desired
525 * hole. 587 * hole.
526 * 588 *
527 * Returns non-zero, if a hole has been found, zero otherwise. 589 * Returns:
590 * True if a hole has been found, false otherwise.
528 */ 591 */
529int drm_mm_scan_add_block(struct drm_mm_node *node) 592bool drm_mm_scan_add_block(struct drm_mm_node *node)
530{ 593{
531 struct drm_mm *mm = node->mm; 594 struct drm_mm *mm = node->mm;
532 struct drm_mm_node *prev_node; 595 struct drm_mm_node *prev_node;
@@ -566,15 +629,16 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
566 mm->scan_size, mm->scan_alignment)) { 629 mm->scan_size, mm->scan_alignment)) {
567 mm->scan_hit_start = hole_start; 630 mm->scan_hit_start = hole_start;
568 mm->scan_hit_end = hole_end; 631 mm->scan_hit_end = hole_end;
569 return 1; 632 return true;
570 } 633 }
571 634
572 return 0; 635 return false;
573} 636}
574EXPORT_SYMBOL(drm_mm_scan_add_block); 637EXPORT_SYMBOL(drm_mm_scan_add_block);
575 638
576/** 639/**
577 * Remove a node from the scan list. 640 * drm_mm_scan_remove_block - remove a node from the scan list
641 * @node: drm_mm_node to remove
578 * 642 *
579 * Nodes _must_ be removed in the exact same order from the scan list as they 643 * Nodes _must_ be removed in the exact same order from the scan list as they
580 * have been added, otherwise the internal state of the memory manager will be 644 * have been added, otherwise the internal state of the memory manager will be
@@ -584,10 +648,11 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
584 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then 648 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
585 * return the just freed block (because its at the top of the free_stack list). 649 * return the just freed block (because its at the top of the free_stack list).
586 * 650 *
587 * Returns one if this block should be evicted, zero otherwise. Will always 651 * Returns:
588 * return zero when no hole has been found. 652 * True if this block should be evicted, false otherwise. Will always
653 * return false when no hole has been found.
589 */ 654 */
590int drm_mm_scan_remove_block(struct drm_mm_node *node) 655bool drm_mm_scan_remove_block(struct drm_mm_node *node)
591{ 656{
592 struct drm_mm *mm = node->mm; 657 struct drm_mm *mm = node->mm;
593 struct drm_mm_node *prev_node; 658 struct drm_mm_node *prev_node;
@@ -608,7 +673,15 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
608} 673}
609EXPORT_SYMBOL(drm_mm_scan_remove_block); 674EXPORT_SYMBOL(drm_mm_scan_remove_block);
610 675
611int drm_mm_clean(struct drm_mm * mm) 676/**
677 * drm_mm_clean - checks whether an allocator is clean
678 * @mm: drm_mm allocator to check
679 *
680 * Returns:
681 * True if the allocator is completely free, false if there's still a node
682 * allocated in it.
683 */
684bool drm_mm_clean(struct drm_mm * mm)
612{ 685{
613 struct list_head *head = &mm->head_node.node_list; 686 struct list_head *head = &mm->head_node.node_list;
614 687
@@ -616,6 +689,14 @@ int drm_mm_clean(struct drm_mm * mm)
616} 689}
617EXPORT_SYMBOL(drm_mm_clean); 690EXPORT_SYMBOL(drm_mm_clean);
618 691
692/**
693 * drm_mm_init - initialize a drm-mm allocator
694 * @mm: the drm_mm structure to initialize
695 * @start: start of the range managed by @mm
696 * @size: end of the range managed by @mm
697 *
698 * Note that @mm must be cleared to 0 before calling this function.
699 */
619void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 700void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
620{ 701{
621 INIT_LIST_HEAD(&mm->hole_stack); 702 INIT_LIST_HEAD(&mm->hole_stack);
@@ -637,6 +718,13 @@ void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
637} 718}
638EXPORT_SYMBOL(drm_mm_init); 719EXPORT_SYMBOL(drm_mm_init);
639 720
721/**
722 * drm_mm_takedown - clean up a drm_mm allocator
723 * @mm: drm_mm allocator to clean up
724 *
725 * Note that it is a bug to call this function on an allocator which is not
726 * clean.
727 */
640void drm_mm_takedown(struct drm_mm * mm) 728void drm_mm_takedown(struct drm_mm * mm)
641{ 729{
642 WARN(!list_empty(&mm->head_node.node_list), 730 WARN(!list_empty(&mm->head_node.node_list),
@@ -662,6 +750,11 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
662 return 0; 750 return 0;
663} 751}
664 752
753/**
754 * drm_mm_debug_table - dump allocator state to dmesg
755 * @mm: drm_mm allocator to dump
756 * @prefix: prefix to use for dumping to dmesg
757 */
665void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) 758void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
666{ 759{
667 struct drm_mm_node *entry; 760 struct drm_mm_node *entry;
@@ -700,6 +793,11 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
700 return 0; 793 return 0;
701} 794}
702 795
796/**
797 * drm_mm_dump_table - dump allocator state to a seq_file
798 * @m: seq_file to dump to
799 * @mm: drm_mm allocator to dump
800 */
703int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 801int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
704{ 802{
705 struct drm_mm_node *entry; 803 struct drm_mm_node *entry;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index cba67865d18f..8b6981ab3fcf 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -85,11 +85,31 @@ struct drm_mm {
85 unsigned long *start, unsigned long *end); 85 unsigned long *start, unsigned long *end);
86}; 86};
87 87
88/**
89 * drm_mm_node_allocated - checks whether a node is allocated
90 * @node: drm_mm_node to check
91 *
92 * Drivers should use this helpers for proper encapusulation of drm_mm
93 * internals.
94 *
95 * Returns:
96 * True if the @node is allocated.
97 */
88static inline bool drm_mm_node_allocated(struct drm_mm_node *node) 98static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
89{ 99{
90 return node->allocated; 100 return node->allocated;
91} 101}
92 102
103/**
104 * drm_mm_initialized - checks whether an allocator is initialized
105 * @mm: drm_mm to check
106 *
107 * Drivers should use this helpers for proper encapusulation of drm_mm
108 * internals.
109 *
110 * Returns:
111 * True if the @mm is initialized.
112 */
93static inline bool drm_mm_initialized(struct drm_mm *mm) 113static inline bool drm_mm_initialized(struct drm_mm *mm)
94{ 114{
95 return mm->hole_stack.next; 115 return mm->hole_stack.next;
@@ -100,6 +120,17 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
100 return hole_node->start + hole_node->size; 120 return hole_node->start + hole_node->size;
101} 121}
102 122
123/**
124 * drm_mm_hole_node_start - computes the start of the hole following @node
125 * @hole_node: drm_mm_node which implicitly tracks the following hole
126 *
127 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
128 * inspect holes themselves. Drivers must check first whether a hole indeed
129 * follows by looking at node->hole_follows.
130 *
131 * Returns:
132 * Start of the subsequent hole.
133 */
103static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 134static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
104{ 135{
105 BUG_ON(!hole_node->hole_follows); 136 BUG_ON(!hole_node->hole_follows);
@@ -112,18 +143,49 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
112 struct drm_mm_node, node_list)->start; 143 struct drm_mm_node, node_list)->start;
113} 144}
114 145
146/**
147 * drm_mm_hole_node_end - computes the end of the hole following @node
148 * @hole_node: drm_mm_node which implicitly tracks the following hole
149 *
150 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
151 * inspect holes themselves. Drivers must check first whether a hole indeed
152 * follows by looking at node->hole_follows.
153 *
154 * Returns:
155 * End of the subsequent hole.
156 */
115static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 157static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
116{ 158{
117 return __drm_mm_hole_node_end(hole_node); 159 return __drm_mm_hole_node_end(hole_node);
118} 160}
119 161
162/**
163 * drm_mm_for_each_node - iterator to walk over all allocated nodes
164 * @entry: drm_mm_node structure to assign to in each iteration step
165 * @mm: drm_mm allocator to walk
166 *
167 * This iterator walks over all nodes in the range allocator. It is implemented
168 * with list_for_each, so not save against removal of elements.
169 */
120#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 170#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
121 &(mm)->head_node.node_list, \ 171 &(mm)->head_node.node_list, \
122 node_list) 172 node_list)
123 173
124/* Note that we need to unroll list_for_each_entry in order to inline 174/**
125 * setting hole_start and hole_end on each iteration and keep the 175 * drm_mm_for_each_hole - iterator to walk over all holes
126 * macro sane. 176 * @entry: drm_mm_node used internally to track progress
177 * @mm: drm_mm allocator to walk
178 * @hole_start: ulong variable to assign the hole start to on each iteration
179 * @hole_end: ulong variable to assign the hole end to on each iteration
180 *
181 * This iterator walks over all holes in the range allocator. It is implemented
182 * with list_for_each, so not save against removal of elements. @entry is used
183 * internally and will not reflect a real drm_mm_node for the very first hole.
184 * Hence users of this iterator may not access it.
185 *
186 * Implementation Note:
187 * We need to inline list_for_each_entry in order to be able to set hole_start
188 * and hole_end on each iteration while keeping the macro sane.
127 */ 189 */
128#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 190#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
129 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 191 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
@@ -136,14 +198,30 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
136/* 198/*
137 * Basic range manager support (drm_mm.c) 199 * Basic range manager support (drm_mm.c)
138 */ 200 */
139extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 201int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
140 202
141extern int drm_mm_insert_node_generic(struct drm_mm *mm, 203int drm_mm_insert_node_generic(struct drm_mm *mm,
142 struct drm_mm_node *node, 204 struct drm_mm_node *node,
143 unsigned long size, 205 unsigned long size,
144 unsigned alignment, 206 unsigned alignment,
145 unsigned long color, 207 unsigned long color,
146 enum drm_mm_search_flags flags); 208 enum drm_mm_search_flags flags);
209/**
210 * drm_mm_insert_node - search for space and insert @node
211 * @mm: drm_mm to allocate from
212 * @node: preallocate node to insert
213 * @size: size of the allocation
214 * @alignment: alignment of the allocation
215 * @flags: flags to fine-tune the allocation
216 *
217 * This is a simplified version of drm_mm_insert_node_generic() with @color set
218 * to 0.
219 *
220 * The preallocated node must be cleared to 0.
221 *
222 * Returns:
223 * 0 on success, -ENOSPC if there's no suitable hole.
224 */
147static inline int drm_mm_insert_node(struct drm_mm *mm, 225static inline int drm_mm_insert_node(struct drm_mm *mm,
148 struct drm_mm_node *node, 226 struct drm_mm_node *node,
149 unsigned long size, 227 unsigned long size,
@@ -153,14 +231,32 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
153 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); 231 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
154} 232}
155 233
156extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 234int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
157 struct drm_mm_node *node, 235 struct drm_mm_node *node,
158 unsigned long size, 236 unsigned long size,
159 unsigned alignment, 237 unsigned alignment,
160 unsigned long color, 238 unsigned long color,
161 unsigned long start, 239 unsigned long start,
162 unsigned long end, 240 unsigned long end,
163 enum drm_mm_search_flags flags); 241 enum drm_mm_search_flags flags);
242/**
243 * drm_mm_insert_node_in_range - ranged search for space and insert @node
244 * @mm: drm_mm to allocate from
245 * @node: preallocate node to insert
246 * @size: size of the allocation
247 * @alignment: alignment of the allocation
248 * @start: start of the allowed range for this node
249 * @end: end of the allowed range for this node
250 * @flags: flags to fine-tune the allocation
251 *
252 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
253 * @color set to 0.
254 *
255 * The preallocated node must be cleared to 0.
256 *
257 * Returns:
258 * 0 on success, -ENOSPC if there's no suitable hole.
259 */
164static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 260static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
165 struct drm_mm_node *node, 261 struct drm_mm_node *node,
166 unsigned long size, 262 unsigned long size,
@@ -173,13 +269,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
173 0, start, end, flags); 269 0, start, end, flags);
174} 270}
175 271
176extern void drm_mm_remove_node(struct drm_mm_node *node); 272void drm_mm_remove_node(struct drm_mm_node *node);
177extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 273void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
178extern void drm_mm_init(struct drm_mm *mm, 274void drm_mm_init(struct drm_mm *mm,
179 unsigned long start, 275 unsigned long start,
180 unsigned long size); 276 unsigned long size);
181extern void drm_mm_takedown(struct drm_mm *mm); 277void drm_mm_takedown(struct drm_mm *mm);
182extern int drm_mm_clean(struct drm_mm *mm); 278bool drm_mm_clean(struct drm_mm *mm);
183 279
184void drm_mm_init_scan(struct drm_mm *mm, 280void drm_mm_init_scan(struct drm_mm *mm,
185 unsigned long size, 281 unsigned long size,
@@ -191,10 +287,10 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
191 unsigned long color, 287 unsigned long color,
192 unsigned long start, 288 unsigned long start,
193 unsigned long end); 289 unsigned long end);
194int drm_mm_scan_add_block(struct drm_mm_node *node); 290bool drm_mm_scan_add_block(struct drm_mm_node *node);
195int drm_mm_scan_remove_block(struct drm_mm_node *node); 291bool drm_mm_scan_remove_block(struct drm_mm_node *node);
196 292
197extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 293void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
198#ifdef CONFIG_DEBUG_FS 294#ifdef CONFIG_DEBUG_FS
199int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 295int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
200#endif 296#endif