aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 21:09:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-12 21:09:18 -0400
commit6b702462cbe5b6f372966a53f4465d745d86b65c (patch)
tree19a8d090b284bb804e8a2ffa38fa51b58118db6a /drivers/gpu/drm/drm_mm.c
parent947ec0b0c1e7e80eef4fe64f7763a06d0cf04d2e (diff)
parent3c24475c1e4e8d10e50df161d8c4f1d382997a7c (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (50 commits) drm: include kernel list header file in hashtab header drm: Export hash table functionality. drm: Split out the mm declarations in a separate header. Add atomic operations. drm/radeon: add support for RV790. drm/radeon: add rv740 drm support. drm_calloc_large: check right size, check integer overflow, use GFP_ZERO drm: Eliminate magic I2C frobbing when reading EDID drm/i915: duplicate desired mode for use by fbcon. drm/via: vfree() no need checking before calling it drm: Replace DRM_DEBUG with DRM_DEBUG_DRIVER in i915 driver drm: Replace DRM_DEBUG with DRM_DEBUG_MODE in drm_mode drm/i915: Replace DRM_DEBUG with DRM_DEBUG_KMS in intel_sdvo drm/i915: replace DRM_DEBUG with DRM_DEBUG_KMS in intel_lvds drm: add separate drm debugging levels radeon: remove _DRM_DRIVER from the preadded sarea map drm: don't associate _DRM_DRIVER maps with a master drm: simplify kcalloc() call to kzalloc(). intelfb: fix spelling of "CLOCK" drm: fix LOCK_TEST_WITH_RETURN macro drm/i915: Hook connector to encoder during load detection (fixes tv/vga detect) ...
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c165
1 files changed, 137 insertions, 28 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 367c590ffbba..7819fd930a51 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,8 +42,11 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include "drm_mm.h"
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
48#define MM_UNUSED_TARGET 4
49
47unsigned long drm_mm_tail_space(struct drm_mm *mm) 50unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{ 51{
49 struct list_head *tail_node; 52 struct list_head *tail_node;
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
74 return 0; 77 return 0;
75} 78}
76 79
80static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81{
82 struct drm_mm_node *child;
83
84 if (atomic)
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 else
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
88
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
92 child = NULL;
93 else {
94 child =
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
98 --mm->num_unused;
99 }
100 spin_unlock(&mm->unused_lock);
101 }
102 return child;
103}
104
105int drm_mm_pre_get(struct drm_mm *mm)
106{
107 struct drm_mm_node *node;
108
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
114
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
118 return ret;
119 }
120 ++mm->num_unused;
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
122 }
123 spin_unlock(&mm->unused_lock);
124 return 0;
125}
126EXPORT_SYMBOL(drm_mm_pre_get);
77 127
78static int drm_mm_create_tail_node(struct drm_mm *mm, 128static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start, 129 unsigned long start,
80 unsigned long size) 130 unsigned long size, int atomic)
81{ 131{
82 struct drm_mm_node *child; 132 struct drm_mm_node *child;
83 133
84 child = (struct drm_mm_node *) 134 child = drm_mm_kmalloc(mm, atomic);
85 drm_alloc(sizeof(*child), DRM_MEM_MM); 135 if (unlikely(child == NULL))
86 if (!child)
87 return -ENOMEM; 136 return -ENOMEM;
88 137
89 child->free = 1; 138 child->free = 1;
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
97 return 0; 146 return 0;
98} 147}
99 148
100 149int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{ 150{
103 struct list_head *tail_node; 151 struct list_head *tail_node;
104 struct drm_mm_node *entry; 152 struct drm_mm_node *entry;
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
106 tail_node = mm->ml_entry.prev; 154 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) { 156 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size); 157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 size, atomic);
110 } 159 }
111 entry->size += size; 160 entry->size += size;
112 return 0; 161 return 0;
113} 162}
114 163
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 164static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size) 165 unsigned long size,
166 int atomic)
117{ 167{
118 struct drm_mm_node *child; 168 struct drm_mm_node *child;
119 169
120 child = (struct drm_mm_node *) 170 child = drm_mm_kmalloc(parent->mm, atomic);
121 drm_alloc(sizeof(*child), DRM_MEM_MM); 171 if (unlikely(child == NULL))
122 if (!child)
123 return NULL; 172 return NULL;
124 173
125 INIT_LIST_HEAD(&child->fl_entry); 174 INIT_LIST_HEAD(&child->fl_entry);
@@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
151 tmp = parent->start % alignment; 200 tmp = parent->start % alignment;
152 201
153 if (tmp) { 202 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); 203 align_splitoff =
155 if (!align_splitoff) 204 drm_mm_split_at_start(parent, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL))
156 return NULL; 206 return NULL;
157 } 207 }
158 208
@@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
161 parent->free = 0; 211 parent->free = 0;
162 return parent; 212 return parent;
163 } else { 213 } else {
164 child = drm_mm_split_at_start(parent, size); 214 child = drm_mm_split_at_start(parent, size, 0);
165 } 215 }
166 216
167 if (align_splitoff) 217 if (align_splitoff)
@@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 219
170 return child; 220 return child;
171} 221}
222
172EXPORT_SYMBOL(drm_mm_get_block); 223EXPORT_SYMBOL(drm_mm_get_block);
173 224
225struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
226 unsigned long size,
227 unsigned alignment)
228{
229
230 struct drm_mm_node *align_splitoff = NULL;
231 struct drm_mm_node *child;
232 unsigned tmp = 0;
233
234 if (alignment)
235 tmp = parent->start % alignment;
236
237 if (tmp) {
238 align_splitoff =
239 drm_mm_split_at_start(parent, alignment - tmp, 1);
240 if (unlikely(align_splitoff == NULL))
241 return NULL;
242 }
243
244 if (parent->size == size) {
245 list_del_init(&parent->fl_entry);
246 parent->free = 0;
247 return parent;
248 } else {
249 child = drm_mm_split_at_start(parent, size, 1);
250 }
251
252 if (align_splitoff)
253 drm_mm_put_block(align_splitoff);
254
255 return child;
256}
257EXPORT_SYMBOL(drm_mm_get_block_atomic);
258
174/* 259/*
175 * Put a block. Merge with the previous and / or next block if they are free. 260 * Put a block. Merge with the previous and / or next block if they are free.
176 * Otherwise add to the free stack. 261 * Otherwise add to the free stack.
177 */ 262 */
178 263
179void drm_mm_put_block(struct drm_mm_node * cur) 264void drm_mm_put_block(struct drm_mm_node *cur)
180{ 265{
181 266
182 struct drm_mm *mm = cur->mm; 267 struct drm_mm *mm = cur->mm;
@@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur)
188 int merged = 0; 273 int merged = 0;
189 274
190 if (cur_head->prev != root_head) { 275 if (cur_head->prev != root_head) {
191 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 276 prev_node =
277 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
192 if (prev_node->free) { 278 if (prev_node->free) {
193 prev_node->size += cur->size; 279 prev_node->size += cur->size;
194 merged = 1; 280 merged = 1;
195 } 281 }
196 } 282 }
197 if (cur_head->next != root_head) { 283 if (cur_head->next != root_head) {
198 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); 284 next_node =
285 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
199 if (next_node->free) { 286 if (next_node->free) {
200 if (merged) { 287 if (merged) {
201 prev_node->size += next_node->size; 288 prev_node->size += next_node->size;
202 list_del(&next_node->ml_entry); 289 list_del(&next_node->ml_entry);
203 list_del(&next_node->fl_entry); 290 list_del(&next_node->fl_entry);
204 drm_free(next_node, sizeof(*next_node), 291 if (mm->num_unused < MM_UNUSED_TARGET) {
205 DRM_MEM_MM); 292 list_add(&next_node->fl_entry,
293 &mm->unused_nodes);
294 ++mm->num_unused;
295 } else
296 kfree(next_node);
206 } else { 297 } else {
207 next_node->size += cur->size; 298 next_node->size += cur->size;
208 next_node->start = cur->start; 299 next_node->start = cur->start;
@@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur)
215 list_add(&cur->fl_entry, &mm->fl_entry); 306 list_add(&cur->fl_entry, &mm->fl_entry);
216 } else { 307 } else {
217 list_del(&cur->ml_entry); 308 list_del(&cur->ml_entry);
218 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 309 if (mm->num_unused < MM_UNUSED_TARGET) {
310 list_add(&cur->fl_entry, &mm->unused_nodes);
311 ++mm->num_unused;
312 } else
313 kfree(cur);
219 } 314 }
220} 315}
316
221EXPORT_SYMBOL(drm_mm_put_block); 317EXPORT_SYMBOL(drm_mm_put_block);
222 318
223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 319struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
224 unsigned long size, 320 unsigned long size,
225 unsigned alignment, int best_match) 321 unsigned alignment, int best_match)
226{ 322{
227 struct list_head *list; 323 struct list_head *list;
228 const struct list_head *free_stack = &mm->fl_entry; 324 const struct list_head *free_stack = &mm->fl_entry;
@@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
247 wasted += alignment - tmp; 343 wasted += alignment - tmp;
248 } 344 }
249 345
250
251 if (entry->size >= size + wasted) { 346 if (entry->size >= size + wasted) {
252 if (!best_match) 347 if (!best_match)
253 return entry; 348 return entry;
@@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260 355
261 return best; 356 return best;
262} 357}
358EXPORT_SYMBOL(drm_mm_search_free);
263 359
264int drm_mm_clean(struct drm_mm * mm) 360int drm_mm_clean(struct drm_mm * mm)
265{ 361{
@@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm)
267 363
268 return (head->next->next == head); 364 return (head->next->next == head);
269} 365}
270EXPORT_SYMBOL(drm_mm_search_free); 366EXPORT_SYMBOL(drm_mm_clean);
271 367
272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 368int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273{ 369{
274 INIT_LIST_HEAD(&mm->ml_entry); 370 INIT_LIST_HEAD(&mm->ml_entry);
275 INIT_LIST_HEAD(&mm->fl_entry); 371 INIT_LIST_HEAD(&mm->fl_entry);
372 INIT_LIST_HEAD(&mm->unused_nodes);
373 mm->num_unused = 0;
374 spin_lock_init(&mm->unused_lock);
276 375
277 return drm_mm_create_tail_node(mm, start, size); 376 return drm_mm_create_tail_node(mm, start, size, 0);
278} 377}
279EXPORT_SYMBOL(drm_mm_init); 378EXPORT_SYMBOL(drm_mm_init);
280 379
@@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm)
282{ 381{
283 struct list_head *bnode = mm->fl_entry.next; 382 struct list_head *bnode = mm->fl_entry.next;
284 struct drm_mm_node *entry; 383 struct drm_mm_node *entry;
384 struct drm_mm_node *next;
285 385
286 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 386 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
287 387
@@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm)
293 393
294 list_del(&entry->fl_entry); 394 list_del(&entry->fl_entry);
295 list_del(&entry->ml_entry); 395 list_del(&entry->ml_entry);
396 kfree(entry);
397
398 spin_lock(&mm->unused_lock);
399 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 list_del(&entry->fl_entry);
401 kfree(entry);
402 --mm->num_unused;
403 }
404 spin_unlock(&mm->unused_lock);
296 405
297 drm_free(entry, sizeof(*entry), DRM_MEM_MM); 406 BUG_ON(mm->num_unused != 0);
298} 407}
299EXPORT_SYMBOL(drm_mm_takedown); 408EXPORT_SYMBOL(drm_mm_takedown);