aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2011-02-18 11:59:14 -0500
committerDave Airlie <airlied@redhat.com>2011-02-22 19:32:51 -0500
commitb0b7af1884b7d807a3504804f9825d472de78708 (patch)
treeba9e2dde9575caab3ae6f4b0a1d1ef15c09caf67 /drivers/gpu/drm/drm_mm.c
parent9fc935debb33d90bf302ba42f7234b78e322f195 (diff)
drm: mm: add api for embedding struct drm_mm_node
The old api has a two-step process: First search for a suitable free hole, then allocate from that specific hole. No user used this to do anything clever. So drop it for the embeddable variant of the drm_mm api (the old one retains this ability, for the time being). With struct drm_mm_node embedded, we cannot track allocations anymore by checking for a NULL pointer. So keep track of this and add a small helper drm_mm_node_allocated. Also add a function to move allocations between different struct drm_mm_node. v2: Implement suggestions by Chris Wilson. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c93
1 files changed, 84 insertions, 9 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index fecb4063c018..d6432f9e49c1 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -124,6 +124,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
124 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 124 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
125 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 125 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
126 126
127 BUG_ON(!hole_node->hole_follows || node->allocated);
128
127 if (alignment) 129 if (alignment)
128 tmp = hole_start % alignment; 130 tmp = hole_start % alignment;
129 131
@@ -136,6 +138,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
136 node->start = hole_start + wasted; 138 node->start = hole_start + wasted;
137 node->size = size; 139 node->size = size;
138 node->mm = mm; 140 node->mm = mm;
141 node->allocated = 1;
139 142
140 INIT_LIST_HEAD(&node->hole_stack); 143 INIT_LIST_HEAD(&node->hole_stack);
141 list_add(&node->node_list, &hole_node->node_list); 144 list_add(&node->node_list, &hole_node->node_list);
@@ -157,8 +160,6 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
157{ 160{
158 struct drm_mm_node *node; 161 struct drm_mm_node *node;
159 162
160 BUG_ON(!hole_node->hole_follows);
161
162 node = drm_mm_kmalloc(hole_node->mm, atomic); 163 node = drm_mm_kmalloc(hole_node->mm, atomic);
163 if (unlikely(node == NULL)) 164 if (unlikely(node == NULL))
164 return NULL; 165 return NULL;
@@ -169,6 +170,26 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
169} 170}
170EXPORT_SYMBOL(drm_mm_get_block_generic); 171EXPORT_SYMBOL(drm_mm_get_block_generic);
171 172
173/**
174 * Search for free space and insert a preallocated memory node. Returns
175 * -ENOSPC if no suitable free area is available. The preallocated memory node
176 * must be cleared.
177 */
178int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
179 unsigned long size, unsigned alignment)
180{
181 struct drm_mm_node *hole_node;
182
183 hole_node = drm_mm_search_free(mm, size, alignment, 0);
184 if (!hole_node)
185 return -ENOSPC;
186
187 drm_mm_insert_helper(hole_node, node, size, alignment);
188
189 return 0;
190}
191EXPORT_SYMBOL(drm_mm_insert_node);
192
172static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 193static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
173 struct drm_mm_node *node, 194 struct drm_mm_node *node,
174 unsigned long size, unsigned alignment, 195 unsigned long size, unsigned alignment,
@@ -179,6 +200,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
179 unsigned long hole_start = drm_mm_hole_node_start(hole_node); 200 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
180 unsigned long hole_end = drm_mm_hole_node_end(hole_node); 201 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
181 202
203 BUG_ON(!hole_node->hole_follows || node->allocated);
204
182 if (hole_start < start) 205 if (hole_start < start)
183 wasted += start - hole_start; 206 wasted += start - hole_start;
184 if (alignment) 207 if (alignment)
@@ -195,6 +218,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
195 node->start = hole_start + wasted; 218 node->start = hole_start + wasted;
196 node->size = size; 219 node->size = size;
197 node->mm = mm; 220 node->mm = mm;
221 node->allocated = 1;
198 222
199 INIT_LIST_HEAD(&node->hole_stack); 223 INIT_LIST_HEAD(&node->hole_stack);
200 list_add(&node->node_list, &hole_node->node_list); 224 list_add(&node->node_list, &hole_node->node_list);
@@ -219,8 +243,6 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
219{ 243{
220 struct drm_mm_node *node; 244 struct drm_mm_node *node;
221 245
222 BUG_ON(!hole_node->hole_follows);
223
224 node = drm_mm_kmalloc(hole_node->mm, atomic); 246 node = drm_mm_kmalloc(hole_node->mm, atomic);
225 if (unlikely(node == NULL)) 247 if (unlikely(node == NULL))
226 return NULL; 248 return NULL;
@@ -232,14 +254,34 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node
232} 254}
233EXPORT_SYMBOL(drm_mm_get_block_range_generic); 255EXPORT_SYMBOL(drm_mm_get_block_range_generic);
234 256
235/* 257/**
236 * Put a block. Merge with the previous and / or next block if they are free. 258 * Search for free space and insert a preallocated memory node. Returns
237 * Otherwise add to the free stack. 259 * -ENOSPC if no suitable free area is available. This is for range
260 * restricted allocations. The preallocated memory node must be cleared.
238 */ 261 */
239 262int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
240void drm_mm_put_block(struct drm_mm_node *node) 263 unsigned long size, unsigned alignment,
264 unsigned long start, unsigned long end)
241{ 265{
266 struct drm_mm_node *hole_node;
267
268 hole_node = drm_mm_search_free_in_range(mm, size, alignment,
269 start, end, 0);
270 if (!hole_node)
271 return -ENOSPC;
272
273 drm_mm_insert_helper_range(hole_node, node, size, alignment,
274 start, end);
242 275
276 return 0;
277}
278EXPORT_SYMBOL(drm_mm_insert_node_in_range);
279
280/**
281 * Remove a memory node from the allocator.
282 */
283void drm_mm_remove_node(struct drm_mm_node *node)
284{
243 struct drm_mm *mm = node->mm; 285 struct drm_mm *mm = node->mm;
244 struct drm_mm_node *prev_node; 286 struct drm_mm_node *prev_node;
245 287
@@ -264,6 +306,22 @@ void drm_mm_put_block(struct drm_mm_node *node)
264 list_move(&prev_node->hole_stack, &mm->hole_stack); 306 list_move(&prev_node->hole_stack, &mm->hole_stack);
265 307
266 list_del(&node->node_list); 308 list_del(&node->node_list);
309 node->allocated = 0;
310}
311EXPORT_SYMBOL(drm_mm_remove_node);
312
313/*
314 * Remove a memory node from the allocator and free the allocated struct
315 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
316 * drm_mm_get_block functions.
317 */
318void drm_mm_put_block(struct drm_mm_node *node)
319{
320
321 struct drm_mm *mm = node->mm;
322
323 drm_mm_remove_node(node);
324
267 spin_lock(&mm->unused_lock); 325 spin_lock(&mm->unused_lock);
268 if (mm->num_unused < MM_UNUSED_TARGET) { 326 if (mm->num_unused < MM_UNUSED_TARGET) {
269 list_add(&node->node_list, &mm->unused_nodes); 327 list_add(&node->node_list, &mm->unused_nodes);
@@ -368,6 +426,23 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
368EXPORT_SYMBOL(drm_mm_search_free_in_range); 426EXPORT_SYMBOL(drm_mm_search_free_in_range);
369 427
370/** 428/**
429 * Moves an allocation. To be used with embedded struct drm_mm_node.
430 */
431void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
432{
433 list_replace(&old->node_list, &new->node_list);
434 list_replace(&old->node_list, &new->hole_stack);
435 new->hole_follows = old->hole_follows;
436 new->mm = old->mm;
437 new->start = old->start;
438 new->size = old->size;
439
440 old->allocated = 0;
441 new->allocated = 1;
442}
443EXPORT_SYMBOL(drm_mm_replace_node);
444
445/**
371 * Initializa lru scanning. 446 * Initializa lru scanning.
372 * 447 *
373 * This simply sets up the scanning routines with the parameters for the desired 448 * This simply sets up the scanning routines with the parameters for the desired