aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_mm.c
diff options
context:
space:
mode:
authorRodrigo Vivi <rodrigo.vivi@intel.com>2018-07-23 12:13:12 -0400
committerRodrigo Vivi <rodrigo.vivi@intel.com>2018-07-23 12:13:12 -0400
commitc74a7469f97c0f40b46e82ee979f9fb1bb6e847c (patch)
treef2690a1a916b73ef94657fbf0e0141ae57701825 /drivers/gpu/drm/drm_mm.c
parent6f15a7de86c8cf2dc09fc9e6d07047efa40ef809 (diff)
parent500775074f88d9cf5416bed2ca19592812d62c41 (diff)
Merge drm/drm-next into drm-intel-next-queued
We need a backmerge to get DP_DPCD_REV_14 before we push other i915 changes to dinq that could break compilation. Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/drm_mm.c')
-rw-r--r--drivers/gpu/drm/drm_mm.c91
1 files changed, 64 insertions, 27 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 3166026a1874..3cc5fbd78ee2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -239,6 +239,32 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
239#define HOLE_SIZE(NODE) ((NODE)->hole_size) 239#define HOLE_SIZE(NODE) ((NODE)->hole_size)
240#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) 240#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
241 241
242static u64 rb_to_hole_size(struct rb_node *rb)
243{
244 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
245}
246
247static void insert_hole_size(struct rb_root_cached *root,
248 struct drm_mm_node *node)
249{
250 struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
251 u64 x = node->hole_size;
252 bool first = true;
253
254 while (*link) {
255 rb = *link;
256 if (x > rb_to_hole_size(rb)) {
257 link = &rb->rb_left;
258 } else {
259 link = &rb->rb_right;
260 first = false;
261 }
262 }
263
264 rb_link_node(&node->rb_hole_size, rb, link);
265 rb_insert_color_cached(&node->rb_hole_size, root, first);
266}
267
242static void add_hole(struct drm_mm_node *node) 268static void add_hole(struct drm_mm_node *node)
243{ 269{
244 struct drm_mm *mm = node->mm; 270 struct drm_mm *mm = node->mm;
@@ -247,7 +273,7 @@ static void add_hole(struct drm_mm_node *node)
247 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node); 273 __drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
248 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 274 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
249 275
250 RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE); 276 insert_hole_size(&mm->holes_size, node);
251 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR); 277 RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
252 278
253 list_add(&node->hole_stack, &mm->hole_stack); 279 list_add(&node->hole_stack, &mm->hole_stack);
@@ -258,7 +284,7 @@ static void rm_hole(struct drm_mm_node *node)
258 DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); 284 DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
259 285
260 list_del(&node->hole_stack); 286 list_del(&node->hole_stack);
261 rb_erase(&node->rb_hole_size, &node->mm->holes_size); 287 rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
262 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr); 288 rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
263 node->hole_size = 0; 289 node->hole_size = 0;
264 290
@@ -282,38 +308,39 @@ static inline u64 rb_hole_size(struct rb_node *rb)
282 308
283static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) 309static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
284{ 310{
285 struct rb_node *best = NULL; 311 struct rb_node *rb = mm->holes_size.rb_root.rb_node;
286 struct rb_node **link = &mm->holes_size.rb_node; 312 struct drm_mm_node *best = NULL;
287 313
288 while (*link) { 314 do {
289 struct rb_node *rb = *link; 315 struct drm_mm_node *node =
316 rb_entry(rb, struct drm_mm_node, rb_hole_size);
290 317
291 if (size <= rb_hole_size(rb)) { 318 if (size <= node->hole_size) {
292 link = &rb->rb_left; 319 best = node;
293 best = rb; 320 rb = rb->rb_right;
294 } else { 321 } else {
295 link = &rb->rb_right; 322 rb = rb->rb_left;
296 } 323 }
297 } 324 } while (rb);
298 325
299 return rb_hole_size_to_node(best); 326 return best;
300} 327}
301 328
302static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr) 329static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
303{ 330{
331 struct rb_node *rb = mm->holes_addr.rb_node;
304 struct drm_mm_node *node = NULL; 332 struct drm_mm_node *node = NULL;
305 struct rb_node **link = &mm->holes_addr.rb_node;
306 333
307 while (*link) { 334 while (rb) {
308 u64 hole_start; 335 u64 hole_start;
309 336
310 node = rb_hole_addr_to_node(*link); 337 node = rb_hole_addr_to_node(rb);
311 hole_start = __drm_mm_hole_node_start(node); 338 hole_start = __drm_mm_hole_node_start(node);
312 339
313 if (addr < hole_start) 340 if (addr < hole_start)
314 link = &node->rb_hole_addr.rb_left; 341 rb = node->rb_hole_addr.rb_left;
315 else if (addr > hole_start + node->hole_size) 342 else if (addr > hole_start + node->hole_size)
316 link = &node->rb_hole_addr.rb_right; 343 rb = node->rb_hole_addr.rb_right;
317 else 344 else
318 break; 345 break;
319 } 346 }
@@ -326,9 +353,6 @@ first_hole(struct drm_mm *mm,
326 u64 start, u64 end, u64 size, 353 u64 start, u64 end, u64 size,
327 enum drm_mm_insert_mode mode) 354 enum drm_mm_insert_mode mode)
328{ 355{
329 if (RB_EMPTY_ROOT(&mm->holes_size))
330 return NULL;
331
332 switch (mode) { 356 switch (mode) {
333 default: 357 default:
334 case DRM_MM_INSERT_BEST: 358 case DRM_MM_INSERT_BEST:
@@ -355,7 +379,7 @@ next_hole(struct drm_mm *mm,
355 switch (mode) { 379 switch (mode) {
356 default: 380 default:
357 case DRM_MM_INSERT_BEST: 381 case DRM_MM_INSERT_BEST:
358 return rb_hole_size_to_node(rb_next(&node->rb_hole_size)); 382 return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
359 383
360 case DRM_MM_INSERT_LOW: 384 case DRM_MM_INSERT_LOW:
361 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr)); 385 return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
@@ -426,6 +450,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
426} 450}
427EXPORT_SYMBOL(drm_mm_reserve_node); 451EXPORT_SYMBOL(drm_mm_reserve_node);
428 452
453static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
454{
455 return rb ? rb_to_hole_size(rb) : 0;
456}
457
429/** 458/**
430 * drm_mm_insert_node_in_range - ranged search for space and insert @node 459 * drm_mm_insert_node_in_range - ranged search for space and insert @node
431 * @mm: drm_mm to allocate from 460 * @mm: drm_mm to allocate from
@@ -451,18 +480,26 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
451{ 480{
452 struct drm_mm_node *hole; 481 struct drm_mm_node *hole;
453 u64 remainder_mask; 482 u64 remainder_mask;
483 bool once;
454 484
455 DRM_MM_BUG_ON(range_start >= range_end); 485 DRM_MM_BUG_ON(range_start >= range_end);
456 486
457 if (unlikely(size == 0 || range_end - range_start < size)) 487 if (unlikely(size == 0 || range_end - range_start < size))
458 return -ENOSPC; 488 return -ENOSPC;
459 489
490 if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
491 return -ENOSPC;
492
460 if (alignment <= 1) 493 if (alignment <= 1)
461 alignment = 0; 494 alignment = 0;
462 495
496 once = mode & DRM_MM_INSERT_ONCE;
497 mode &= ~DRM_MM_INSERT_ONCE;
498
463 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; 499 remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
464 for (hole = first_hole(mm, range_start, range_end, size, mode); hole; 500 for (hole = first_hole(mm, range_start, range_end, size, mode);
465 hole = next_hole(mm, hole, mode)) { 501 hole;
502 hole = once ? NULL : next_hole(mm, hole, mode)) {
466 u64 hole_start = __drm_mm_hole_node_start(hole); 503 u64 hole_start = __drm_mm_hole_node_start(hole);
467 u64 hole_end = hole_start + hole->hole_size; 504 u64 hole_end = hole_start + hole->hole_size;
468 u64 adj_start, adj_end; 505 u64 adj_start, adj_end;
@@ -587,9 +624,9 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
587 624
588 if (drm_mm_hole_follows(old)) { 625 if (drm_mm_hole_follows(old)) {
589 list_replace(&old->hole_stack, &new->hole_stack); 626 list_replace(&old->hole_stack, &new->hole_stack);
590 rb_replace_node(&old->rb_hole_size, 627 rb_replace_node_cached(&old->rb_hole_size,
591 &new->rb_hole_size, 628 &new->rb_hole_size,
592 &mm->holes_size); 629 &mm->holes_size);
593 rb_replace_node(&old->rb_hole_addr, 630 rb_replace_node(&old->rb_hole_addr,
594 &new->rb_hole_addr, 631 &new->rb_hole_addr,
595 &mm->holes_addr); 632 &mm->holes_addr);
@@ -885,7 +922,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
885 922
886 INIT_LIST_HEAD(&mm->hole_stack); 923 INIT_LIST_HEAD(&mm->hole_stack);
887 mm->interval_tree = RB_ROOT_CACHED; 924 mm->interval_tree = RB_ROOT_CACHED;
888 mm->holes_size = RB_ROOT; 925 mm->holes_size = RB_ROOT_CACHED;
889 mm->holes_addr = RB_ROOT; 926 mm->holes_addr = RB_ROOT;
890 927
891 /* Clever trick to avoid a special case in the free hole tracking. */ 928 /* Clever trick to avoid a special case in the free hole tracking. */