aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-01-17 05:34:08 -0500
committerDave Airlie <airlied@redhat.com>2013-01-17 05:34:08 -0500
commitb5cc6c0387b2f8d269c1df1e68c97c958dd22fed (patch)
tree697f2335b3a10f55e0ea226dcd044ee4ff3f0f7f /drivers/gpu/drm/drm_mm.c
parent9931faca02c604c22335f5a935a501bb2ace6e20 (diff)
parentc0c36b941b6f0be6ac74f340040cbb29d6a0b06c (diff)
Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Daniel writes: - seqno wrap fixes and debug infrastructure from Mika Kuoppala and Chris Wilson - some leftover kill-agp on gen6+ patches from Ben - hotplug improvements from Damien - clear fb when allocated from stolen, avoids dirt on the fbcon (Chris) - Stolen mem support from Chris Wilson, one of the many steps to get to real fastboot support. - Some DDI code cleanups from Paulo. - Some refactorings around lvds and dp code. - some random little bits&pieces * tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm-intel: (93 commits) drm/i915: Return the real error code from intel_set_mode() drm/i915: Make GSM void drm/i915: Move GSM mapping into dev_priv drm/i915: Move even more gtt code to i915_gem_gtt drm/i915: Make next_seqno debugs entry to use i915_gem_set_seqno drm/i915: Introduce i915_gem_set_seqno() drm/i915: Always clear semaphore mboxes on seqno wrap drm/i915: Initialize hardware semaphore state on ring init drm/i915: Introduce ring set_seqno drm/i915: Missed conversion to gtt_pte_t drm/i915: Bug on unsupported swizzled platforms drm/i915: BUG() if fences are used on unsupported platform drm/i915: fixup overlay stolen memory leak drm/i915: clean up PIPECONF bpc #defines drm/i915: add intel_dp_set_signal_levels drm/i915: remove leftover display.update_wm assignment drm/i915: check for the PCH when setting pch_transcoder drm/i915: Clear the stolen fb before enabling drm/i915: Access to snooped system memory through the GTT is incoherent drm/i915: Remove stale comment about intel_dp_detect() ... Conflicts: drivers/gpu/drm/i915/intel_display.c
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c96
1 files changed, 63 insertions, 33 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2bf9670ba29b..86272f04b82f 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
102} 102}
103EXPORT_SYMBOL(drm_mm_pre_get); 103EXPORT_SYMBOL(drm_mm_pre_get);
104 104
105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106{
107 return hole_node->start + hole_node->size;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
115
116 return next_node->start;
117}
118
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 105static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node, 106 struct drm_mm_node *node,
121 unsigned long size, unsigned alignment, 107 unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
127 unsigned long adj_start = hole_start; 113 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end; 114 unsigned long adj_end = hole_end;
129 115
130 BUG_ON(!hole_node->hole_follows || node->allocated); 116 BUG_ON(node->allocated);
131 117
132 if (mm->color_adjust) 118 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 119 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
155 BUG_ON(node->start + node->size > adj_end); 141 BUG_ON(node->start + node->size > adj_end);
156 142
157 node->hole_follows = 0; 143 node->hole_follows = 0;
158 if (node->start + node->size < hole_end) { 144 if (__drm_mm_hole_node_start(node) < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack); 145 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1; 146 node->hole_follows = 1;
161 } 147 }
162} 148}
163 149
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{
155 struct drm_mm_node *hole, *node;
156 unsigned long end = start + size;
157 unsigned long hole_start;
158 unsigned long hole_end;
159
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end)
162 continue;
163
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm;
171 node->allocated = 1;
172
173 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list);
175
176 if (start == hole_start) {
177 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack);
179 }
180
181 node->hole_follows = 0;
182 if (end != hole_end) {
183 list_add(&node->hole_stack, &mm->hole_stack);
184 node->hole_follows = 1;
185 }
186
187 return node;
188 }
189
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
191 return NULL;
192}
193EXPORT_SYMBOL(drm_mm_create_block);
194
164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 unsigned long size, 196 unsigned long size,
166 unsigned alignment, 197 unsigned alignment,
@@ -251,7 +282,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
251 BUG_ON(node->start + node->size > end); 282 BUG_ON(node->start + node->size > end);
252 283
253 node->hole_follows = 0; 284 node->hole_follows = 0;
254 if (node->start + node->size < hole_end) { 285 if (__drm_mm_hole_node_start(node) < hole_end) {
255 list_add(&node->hole_stack, &mm->hole_stack); 286 list_add(&node->hole_stack, &mm->hole_stack);
256 node->hole_follows = 1; 287 node->hole_follows = 1;
257 } 288 }
@@ -325,12 +356,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
325 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 356 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
326 357
327 if (node->hole_follows) { 358 if (node->hole_follows) {
328 BUG_ON(drm_mm_hole_node_start(node) 359 BUG_ON(__drm_mm_hole_node_start(node) ==
329 == drm_mm_hole_node_end(node)); 360 __drm_mm_hole_node_end(node));
330 list_del(&node->hole_stack); 361 list_del(&node->hole_stack);
331 } else 362 } else
332 BUG_ON(drm_mm_hole_node_start(node) 363 BUG_ON(__drm_mm_hole_node_start(node) !=
333 != drm_mm_hole_node_end(node)); 364 __drm_mm_hole_node_end(node));
365
334 366
335 if (!prev_node->hole_follows) { 367 if (!prev_node->hole_follows) {
336 prev_node->hole_follows = 1; 368 prev_node->hole_follows = 1;
@@ -388,6 +420,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
388{ 420{
389 struct drm_mm_node *entry; 421 struct drm_mm_node *entry;
390 struct drm_mm_node *best; 422 struct drm_mm_node *best;
423 unsigned long adj_start;
424 unsigned long adj_end;
391 unsigned long best_size; 425 unsigned long best_size;
392 426
393 BUG_ON(mm->scanned_blocks); 427 BUG_ON(mm->scanned_blocks);
@@ -395,17 +429,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
395 best = NULL; 429 best = NULL;
396 best_size = ~0UL; 430 best_size = ~0UL;
397 431
398 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 432 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
399 unsigned long adj_start = drm_mm_hole_node_start(entry);
400 unsigned long adj_end = drm_mm_hole_node_end(entry);
401
402 if (mm->color_adjust) { 433 if (mm->color_adjust) {
403 mm->color_adjust(entry, color, &adj_start, &adj_end); 434 mm->color_adjust(entry, color, &adj_start, &adj_end);
404 if (adj_end <= adj_start) 435 if (adj_end <= adj_start)
405 continue; 436 continue;
406 } 437 }
407 438
408 BUG_ON(!entry->hole_follows);
409 if (!check_free_hole(adj_start, adj_end, size, alignment)) 439 if (!check_free_hole(adj_start, adj_end, size, alignment))
410 continue; 440 continue;
411 441
@@ -432,6 +462,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
432{ 462{
433 struct drm_mm_node *entry; 463 struct drm_mm_node *entry;
434 struct drm_mm_node *best; 464 struct drm_mm_node *best;
465 unsigned long adj_start;
466 unsigned long adj_end;
435 unsigned long best_size; 467 unsigned long best_size;
436 468
437 BUG_ON(mm->scanned_blocks); 469 BUG_ON(mm->scanned_blocks);
@@ -439,13 +471,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
439 best = NULL; 471 best = NULL;
440 best_size = ~0UL; 472 best_size = ~0UL;
441 473
442 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 474 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
443 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? 475 if (adj_start < start)
444 start : drm_mm_hole_node_start(entry); 476 adj_start = start;
445 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? 477 if (adj_end > end)
446 end : drm_mm_hole_node_end(entry); 478 adj_end = end;
447
448 BUG_ON(!entry->hole_follows);
449 479
450 if (mm->color_adjust) { 480 if (mm->color_adjust) {
451 mm->color_adjust(entry, color, &adj_start, &adj_end); 481 mm->color_adjust(entry, color, &adj_start, &adj_end);