aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c177
1 files changed, 161 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index bac0d06c52ac..b85fb83d7ae8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -44,6 +44,9 @@ struct radeon_object {
44 uint64_t gpu_addr; 44 uint64_t gpu_addr;
45 void *kptr; 45 void *kptr;
46 bool is_iomem; 46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
47}; 50};
48 51
49int radeon_ttm_init(struct radeon_device *rdev); 52int radeon_ttm_init(struct radeon_device *rdev);
@@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
70 73
71 robj = container_of(tobj, struct radeon_object, tobj); 74 robj = container_of(tobj, struct radeon_object, tobj);
72 list_del_init(&robj->list); 75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
73 kfree(robj); 77 kfree(robj);
74} 78}
75 79
@@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
99{ 103{
100 uint32_t flags = 0; 104 uint32_t flags = 0;
101 if (domain & RADEON_GEM_DOMAIN_VRAM) { 105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
102 flags |= TTM_PL_FLAG_VRAM; 106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
103 } 107 }
104 if (domain & RADEON_GEM_DOMAIN_GTT) { 108 if (domain & RADEON_GEM_DOMAIN_GTT) {
105 flags |= TTM_PL_FLAG_TT; 109 flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
106 } 110 }
107 if (domain & RADEON_GEM_DOMAIN_CPU) { 111 if (domain & RADEON_GEM_DOMAIN_CPU) {
108 flags |= TTM_PL_FLAG_SYSTEM; 112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
109 } 113 }
110 if (!flags) { 114 if (!flags) {
111 flags |= TTM_PL_FLAG_SYSTEM; 115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
112 } 116 }
113 return flags; 117 return flags;
114} 118}
@@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev,
141 } 145 }
142 robj->rdev = rdev; 146 robj->rdev = rdev;
143 robj->gobj = gobj; 147 robj->gobj = gobj;
148 robj->surface_reg = -1;
144 INIT_LIST_HEAD(&robj->list); 149 INIT_LIST_HEAD(&robj->list);
145 150
146 flags = radeon_object_flags_from_domain(domain); 151 flags = radeon_object_flags_from_domain(domain);
@@ -304,7 +309,26 @@ int radeon_object_wait(struct radeon_object *robj)
304 } 309 }
305 spin_lock(&robj->tobj.lock); 310 spin_lock(&robj->tobj.lock);
306 if (robj->tobj.sync_obj) { 311 if (robj->tobj.sync_obj) {
307 r = ttm_bo_wait(&robj->tobj, true, false, false); 312 r = ttm_bo_wait(&robj->tobj, true, true, false);
313 }
314 spin_unlock(&robj->tobj.lock);
315 radeon_object_unreserve(robj);
316 return r;
317}
318
319int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
320{
321 int r = 0;
322
323 r = radeon_object_reserve(robj, true);
324 if (unlikely(r != 0)) {
325 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
326 return r;
327 }
328 spin_lock(&robj->tobj.lock);
329 *cur_placement = robj->tobj.mem.mem_type;
330 if (robj->tobj.sync_obj) {
331 r = ttm_bo_wait(&robj->tobj, true, true, true);
308 } 332 }
309 spin_unlock(&robj->tobj.lock); 333 spin_unlock(&robj->tobj.lock);
310 radeon_object_unreserve(robj); 334 radeon_object_unreserve(robj);
@@ -403,7 +427,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
403 struct radeon_object *robj; 427 struct radeon_object *robj;
404 struct radeon_fence *old_fence = NULL; 428 struct radeon_fence *old_fence = NULL;
405 struct list_head *i; 429 struct list_head *i;
406 uint32_t flags;
407 int r; 430 int r;
408 431
409 r = radeon_object_list_reserve(head); 432 r = radeon_object_list_reserve(head);
@@ -414,27 +437,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
414 list_for_each(i, head) { 437 list_for_each(i, head) {
415 lobj = list_entry(i, struct radeon_object_list, list); 438 lobj = list_entry(i, struct radeon_object_list, list);
416 robj = lobj->robj; 439 robj = lobj->robj;
417 if (lobj->wdomain) {
418 flags = radeon_object_flags_from_domain(lobj->wdomain);
419 flags |= TTM_PL_FLAG_TT;
420 } else {
421 flags = radeon_object_flags_from_domain(lobj->rdomain);
422 flags |= TTM_PL_FLAG_TT;
423 flags |= TTM_PL_FLAG_VRAM;
424 }
425 if (!robj->pin_count) { 440 if (!robj->pin_count) {
426 robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; 441 if (lobj->wdomain) {
442 robj->tobj.proposed_placement =
443 radeon_object_flags_from_domain(lobj->wdomain);
444 } else {
445 robj->tobj.proposed_placement =
446 radeon_object_flags_from_domain(lobj->rdomain);
447 }
427 r = ttm_buffer_object_validate(&robj->tobj, 448 r = ttm_buffer_object_validate(&robj->tobj,
428 robj->tobj.proposed_placement, 449 robj->tobj.proposed_placement,
429 true, false); 450 true, false);
430 if (unlikely(r)) { 451 if (unlikely(r)) {
431 radeon_object_list_unreserve(head);
432 DRM_ERROR("radeon: failed to validate.\n"); 452 DRM_ERROR("radeon: failed to validate.\n");
433 return r; 453 return r;
434 } 454 }
435 radeon_object_gpu_addr(robj); 455 radeon_object_gpu_addr(robj);
436 } 456 }
437 lobj->gpu_offset = robj->gpu_addr; 457 lobj->gpu_offset = robj->gpu_addr;
458 lobj->tiling_flags = robj->tiling_flags;
438 if (fence) { 459 if (fence) {
439 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 460 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
440 robj->tobj.sync_obj = radeon_fence_ref(fence); 461 robj->tobj.sync_obj = radeon_fence_ref(fence);
@@ -479,3 +500,127 @@ unsigned long radeon_object_size(struct radeon_object *robj)
479{ 500{
480 return robj->tobj.num_pages << PAGE_SHIFT; 501 return robj->tobj.num_pages << PAGE_SHIFT;
481} 502}
503
504int radeon_object_get_surface_reg(struct radeon_object *robj)
505{
506 struct radeon_device *rdev = robj->rdev;
507 struct radeon_surface_reg *reg;
508 struct radeon_object *old_object;
509 int steal;
510 int i;
511
512 if (!robj->tiling_flags)
513 return 0;
514
515 if (robj->surface_reg >= 0) {
516 reg = &rdev->surface_regs[robj->surface_reg];
517 i = robj->surface_reg;
518 goto out;
519 }
520
521 steal = -1;
522 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
523
524 reg = &rdev->surface_regs[i];
525 if (!reg->robj)
526 break;
527
528 old_object = reg->robj;
529 if (old_object->pin_count == 0)
530 steal = i;
531 }
532
533 /* if we are all out */
534 if (i == RADEON_GEM_MAX_SURFACES) {
535 if (steal == -1)
536 return -ENOMEM;
537 /* find someone with a surface reg and nuke their BO */
538 reg = &rdev->surface_regs[steal];
539 old_object = reg->robj;
540 /* blow away the mapping */
541 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
542 ttm_bo_unmap_virtual(&old_object->tobj);
543 old_object->surface_reg = -1;
544 i = steal;
545 }
546
547 robj->surface_reg = i;
548 reg->robj = robj;
549
550out:
551 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
552 robj->tobj.mem.mm_node->start << PAGE_SHIFT,
553 robj->tobj.num_pages << PAGE_SHIFT);
554 return 0;
555}
556
557void radeon_object_clear_surface_reg(struct radeon_object *robj)
558{
559 struct radeon_device *rdev = robj->rdev;
560 struct radeon_surface_reg *reg;
561
562 if (robj->surface_reg == -1)
563 return;
564
565 reg = &rdev->surface_regs[robj->surface_reg];
566 radeon_clear_surface_reg(rdev, robj->surface_reg);
567
568 reg->robj = NULL;
569 robj->surface_reg = -1;
570}
571
572void radeon_object_set_tiling_flags(struct radeon_object *robj,
573 uint32_t tiling_flags, uint32_t pitch)
574{
575 robj->tiling_flags = tiling_flags;
576 robj->pitch = pitch;
577}
578
579void radeon_object_get_tiling_flags(struct radeon_object *robj,
580 uint32_t *tiling_flags,
581 uint32_t *pitch)
582{
583 if (tiling_flags)
584 *tiling_flags = robj->tiling_flags;
585 if (pitch)
586 *pitch = robj->pitch;
587}
588
589int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
590 bool force_drop)
591{
592 if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
593 return 0;
594
595 if (force_drop) {
596 radeon_object_clear_surface_reg(robj);
597 return 0;
598 }
599
600 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
601 if (!has_moved)
602 return 0;
603
604 if (robj->surface_reg >= 0)
605 radeon_object_clear_surface_reg(robj);
606 return 0;
607 }
608
609 if ((robj->surface_reg >= 0) && !has_moved)
610 return 0;
611
612 return radeon_object_get_surface_reg(robj);
613}
614
615void radeon_bo_move_notify(struct ttm_buffer_object *bo,
616 struct ttm_mem_reg *mem)
617{
618 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
619 radeon_object_check_tiling(robj, 0, 1);
620}
621
622void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
623{
624 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
625 radeon_object_check_tiling(robj, 0, 0);
626}