aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm/drm_mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm/drm_mm.h')
-rw-r--r--include/drm/drm_mm.h154
1 files changed, 125 insertions, 29 deletions
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index cba67865d18f..8b6981ab3fcf 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -85,11 +85,31 @@ struct drm_mm {
85 unsigned long *start, unsigned long *end); 85 unsigned long *start, unsigned long *end);
86}; 86};
87 87
88/**
89 * drm_mm_node_allocated - checks whether a node is allocated
90 * @node: drm_mm_node to check
91 *
92 * Drivers should use this helpers for proper encapusulation of drm_mm
93 * internals.
94 *
95 * Returns:
96 * True if the @node is allocated.
97 */
88static inline bool drm_mm_node_allocated(struct drm_mm_node *node) 98static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
89{ 99{
90 return node->allocated; 100 return node->allocated;
91} 101}
92 102
103/**
104 * drm_mm_initialized - checks whether an allocator is initialized
105 * @mm: drm_mm to check
106 *
107 * Drivers should use this helpers for proper encapusulation of drm_mm
108 * internals.
109 *
110 * Returns:
111 * True if the @mm is initialized.
112 */
93static inline bool drm_mm_initialized(struct drm_mm *mm) 113static inline bool drm_mm_initialized(struct drm_mm *mm)
94{ 114{
95 return mm->hole_stack.next; 115 return mm->hole_stack.next;
@@ -100,6 +120,17 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
100 return hole_node->start + hole_node->size; 120 return hole_node->start + hole_node->size;
101} 121}
102 122
123/**
124 * drm_mm_hole_node_start - computes the start of the hole following @node
125 * @hole_node: drm_mm_node which implicitly tracks the following hole
126 *
127 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
128 * inspect holes themselves. Drivers must check first whether a hole indeed
129 * follows by looking at node->hole_follows.
130 *
131 * Returns:
132 * Start of the subsequent hole.
133 */
103static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) 134static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
104{ 135{
105 BUG_ON(!hole_node->hole_follows); 136 BUG_ON(!hole_node->hole_follows);
@@ -112,18 +143,49 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
112 struct drm_mm_node, node_list)->start; 143 struct drm_mm_node, node_list)->start;
113} 144}
114 145
146/**
147 * drm_mm_hole_node_end - computes the end of the hole following @node
148 * @hole_node: drm_mm_node which implicitly tracks the following hole
149 *
150 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
151 * inspect holes themselves. Drivers must check first whether a hole indeed
152 * follows by looking at node->hole_follows.
153 *
154 * Returns:
155 * End of the subsequent hole.
156 */
115static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) 157static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
116{ 158{
117 return __drm_mm_hole_node_end(hole_node); 159 return __drm_mm_hole_node_end(hole_node);
118} 160}
119 161
162/**
163 * drm_mm_for_each_node - iterator to walk over all allocated nodes
164 * @entry: drm_mm_node structure to assign to in each iteration step
165 * @mm: drm_mm allocator to walk
166 *
167 * This iterator walks over all nodes in the range allocator. It is implemented
168 * with list_for_each, so not save against removal of elements.
169 */
120#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 170#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
121 &(mm)->head_node.node_list, \ 171 &(mm)->head_node.node_list, \
122 node_list) 172 node_list)
123 173
124/* Note that we need to unroll list_for_each_entry in order to inline 174/**
125 * setting hole_start and hole_end on each iteration and keep the 175 * drm_mm_for_each_hole - iterator to walk over all holes
126 * macro sane. 176 * @entry: drm_mm_node used internally to track progress
177 * @mm: drm_mm allocator to walk
178 * @hole_start: ulong variable to assign the hole start to on each iteration
179 * @hole_end: ulong variable to assign the hole end to on each iteration
180 *
181 * This iterator walks over all holes in the range allocator. It is implemented
182 * with list_for_each, so not save against removal of elements. @entry is used
183 * internally and will not reflect a real drm_mm_node for the very first hole.
184 * Hence users of this iterator may not access it.
185 *
186 * Implementation Note:
187 * We need to inline list_for_each_entry in order to be able to set hole_start
188 * and hole_end on each iteration while keeping the macro sane.
127 */ 189 */
128#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 190#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
129 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 191 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
@@ -136,14 +198,30 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
136/* 198/*
137 * Basic range manager support (drm_mm.c) 199 * Basic range manager support (drm_mm.c)
138 */ 200 */
139extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 201int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
140 202
141extern int drm_mm_insert_node_generic(struct drm_mm *mm, 203int drm_mm_insert_node_generic(struct drm_mm *mm,
142 struct drm_mm_node *node, 204 struct drm_mm_node *node,
143 unsigned long size, 205 unsigned long size,
144 unsigned alignment, 206 unsigned alignment,
145 unsigned long color, 207 unsigned long color,
146 enum drm_mm_search_flags flags); 208 enum drm_mm_search_flags flags);
209/**
210 * drm_mm_insert_node - search for space and insert @node
211 * @mm: drm_mm to allocate from
212 * @node: preallocate node to insert
213 * @size: size of the allocation
214 * @alignment: alignment of the allocation
215 * @flags: flags to fine-tune the allocation
216 *
217 * This is a simplified version of drm_mm_insert_node_generic() with @color set
218 * to 0.
219 *
220 * The preallocated node must be cleared to 0.
221 *
222 * Returns:
223 * 0 on success, -ENOSPC if there's no suitable hole.
224 */
147static inline int drm_mm_insert_node(struct drm_mm *mm, 225static inline int drm_mm_insert_node(struct drm_mm *mm,
148 struct drm_mm_node *node, 226 struct drm_mm_node *node,
149 unsigned long size, 227 unsigned long size,
@@ -153,14 +231,32 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
153 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); 231 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
154} 232}
155 233
156extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 234int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
157 struct drm_mm_node *node, 235 struct drm_mm_node *node,
158 unsigned long size, 236 unsigned long size,
159 unsigned alignment, 237 unsigned alignment,
160 unsigned long color, 238 unsigned long color,
161 unsigned long start, 239 unsigned long start,
162 unsigned long end, 240 unsigned long end,
163 enum drm_mm_search_flags flags); 241 enum drm_mm_search_flags flags);
242/**
243 * drm_mm_insert_node_in_range - ranged search for space and insert @node
244 * @mm: drm_mm to allocate from
245 * @node: preallocate node to insert
246 * @size: size of the allocation
247 * @alignment: alignment of the allocation
248 * @start: start of the allowed range for this node
249 * @end: end of the allowed range for this node
250 * @flags: flags to fine-tune the allocation
251 *
252 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
253 * @color set to 0.
254 *
255 * The preallocated node must be cleared to 0.
256 *
257 * Returns:
258 * 0 on success, -ENOSPC if there's no suitable hole.
259 */
164static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 260static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
165 struct drm_mm_node *node, 261 struct drm_mm_node *node,
166 unsigned long size, 262 unsigned long size,
@@ -173,13 +269,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
173 0, start, end, flags); 269 0, start, end, flags);
174} 270}
175 271
176extern void drm_mm_remove_node(struct drm_mm_node *node); 272void drm_mm_remove_node(struct drm_mm_node *node);
177extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 273void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
178extern void drm_mm_init(struct drm_mm *mm, 274void drm_mm_init(struct drm_mm *mm,
179 unsigned long start, 275 unsigned long start,
180 unsigned long size); 276 unsigned long size);
181extern void drm_mm_takedown(struct drm_mm *mm); 277void drm_mm_takedown(struct drm_mm *mm);
182extern int drm_mm_clean(struct drm_mm *mm); 278bool drm_mm_clean(struct drm_mm *mm);
183 279
184void drm_mm_init_scan(struct drm_mm *mm, 280void drm_mm_init_scan(struct drm_mm *mm,
185 unsigned long size, 281 unsigned long size,
@@ -191,10 +287,10 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
191 unsigned long color, 287 unsigned long color,
192 unsigned long start, 288 unsigned long start,
193 unsigned long end); 289 unsigned long end);
194int drm_mm_scan_add_block(struct drm_mm_node *node); 290bool drm_mm_scan_add_block(struct drm_mm_node *node);
195int drm_mm_scan_remove_block(struct drm_mm_node *node); 291bool drm_mm_scan_remove_block(struct drm_mm_node *node);
196 292
197extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 293void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
198#ifdef CONFIG_DEBUG_FS 294#ifdef CONFIG_DEBUG_FS
199int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 295int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
200#endif 296#endif