aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2014-08-22 08:25:55 -0400
committerAlex Deucher <alexander.deucher@amd.com>2014-08-27 22:46:23 -0400
commitfeba9b0bcf492ba991d7fbfc211dd49ebbc95a4b (patch)
treeef2896a8e05be32c2d62911da795dc5106f17030
parent3852752ca89ca00aa13f12a9b9450fd97ff437d4 (diff)
drm/radeon: preallocate mem for UVD create/destroy msg
llocating memory for UVD create and destroy messages can fail, which is rather annoying when this happens in the middle of a GPU reset. Try to avoid this condition by preallocating a page for those dummy messages. Signed-off-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c101
1 files changed, 26 insertions, 75 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 1dedadd8f5df..5729e9bebd9d 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -138,7 +138,8 @@ int radeon_uvd_init(struct radeon_device *rdev)
138 } 138 }
139 139
140 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + 140 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
141 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; 141 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE +
142 RADEON_GPU_PAGE_SIZE;
142 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, 143 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
143 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo); 144 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
144 if (r) { 145 if (r) {
@@ -647,38 +648,16 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
647} 648}
648 649
649static int radeon_uvd_send_msg(struct radeon_device *rdev, 650static int radeon_uvd_send_msg(struct radeon_device *rdev,
650 int ring, struct radeon_bo *bo, 651 int ring, uint64_t addr,
651 struct radeon_fence **fence) 652 struct radeon_fence **fence)
652{ 653{
653 struct ttm_validate_buffer tv;
654 struct ww_acquire_ctx ticket;
655 struct list_head head;
656 struct radeon_ib ib; 654 struct radeon_ib ib;
657 uint64_t addr;
658 int i, r; 655 int i, r;
659 656
660 memset(&tv, 0, sizeof(tv));
661 tv.bo = &bo->tbo;
662
663 INIT_LIST_HEAD(&head);
664 list_add(&tv.head, &head);
665
666 r = ttm_eu_reserve_buffers(&ticket, &head);
667 if (r)
668 return r;
669
670 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
671 radeon_uvd_force_into_uvd_segment(bo, RADEON_GEM_DOMAIN_VRAM);
672
673 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
674 if (r)
675 goto err;
676
677 r = radeon_ib_get(rdev, ring, &ib, NULL, 64); 657 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
678 if (r) 658 if (r)
679 goto err; 659 return r;
680 660
681 addr = radeon_bo_gpu_offset(bo);
682 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0); 661 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
683 ib.ptr[1] = addr; 662 ib.ptr[1] = addr;
684 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0); 663 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
@@ -690,19 +669,11 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,
690 ib.length_dw = 16; 669 ib.length_dw = 16;
691 670
692 r = radeon_ib_schedule(rdev, &ib, NULL, false); 671 r = radeon_ib_schedule(rdev, &ib, NULL, false);
693 if (r)
694 goto err;
695 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
696 672
697 if (fence) 673 if (fence)
698 *fence = radeon_fence_ref(ib.fence); 674 *fence = radeon_fence_ref(ib.fence);
699 675
700 radeon_ib_free(rdev, &ib); 676 radeon_ib_free(rdev, &ib);
701 radeon_bo_unref(&bo);
702 return 0;
703
704err:
705 ttm_eu_backoff_reservation(&ticket, &head);
706 return r; 677 return r;
707} 678}
708 679
@@ -712,27 +683,18 @@ err:
712int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, 683int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
713 uint32_t handle, struct radeon_fence **fence) 684 uint32_t handle, struct radeon_fence **fence)
714{ 685{
715 struct radeon_bo *bo; 686 /* we use the last page of the vcpu bo for the UVD message */
716 uint32_t *msg; 687 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
717 int r, i; 688 RADEON_GPU_PAGE_SIZE;
718 689
719 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 690 uint32_t *msg = rdev->uvd.cpu_addr + offs;
720 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); 691 uint64_t addr = rdev->uvd.gpu_addr + offs;
721 if (r)
722 return r;
723 692
724 r = radeon_bo_reserve(bo, false); 693 int r, i;
725 if (r) {
726 radeon_bo_unref(&bo);
727 return r;
728 }
729 694
730 r = radeon_bo_kmap(bo, (void **)&msg); 695 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
731 if (r) { 696 if (r)
732 radeon_bo_unreserve(bo);
733 radeon_bo_unref(&bo);
734 return r; 697 return r;
735 }
736 698
737 /* stitch together an UVD create msg */ 699 /* stitch together an UVD create msg */
738 msg[0] = cpu_to_le32(0x00000de4); 700 msg[0] = cpu_to_le32(0x00000de4);
@@ -749,36 +711,26 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
749 for (i = 11; i < 1024; ++i) 711 for (i = 11; i < 1024; ++i)
750 msg[i] = cpu_to_le32(0x0); 712 msg[i] = cpu_to_le32(0x0);
751 713
752 radeon_bo_kunmap(bo); 714 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
753 radeon_bo_unreserve(bo); 715 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
754 716 return r;
755 return radeon_uvd_send_msg(rdev, ring, bo, fence);
756} 717}
757 718
758int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring, 719int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
759 uint32_t handle, struct radeon_fence **fence) 720 uint32_t handle, struct radeon_fence **fence)
760{ 721{
761 struct radeon_bo *bo; 722 /* we use the last page of the vcpu bo for the UVD message */
762 uint32_t *msg; 723 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
763 int r, i; 724 RADEON_GPU_PAGE_SIZE;
764 725
765 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true, 726 uint32_t *msg = rdev->uvd.cpu_addr + offs;
766 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo); 727 uint64_t addr = rdev->uvd.gpu_addr + offs;
767 if (r)
768 return r;
769 728
770 r = radeon_bo_reserve(bo, false); 729 int r, i;
771 if (r) {
772 radeon_bo_unref(&bo);
773 return r;
774 }
775 730
776 r = radeon_bo_kmap(bo, (void **)&msg); 731 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
777 if (r) { 732 if (r)
778 radeon_bo_unreserve(bo);
779 radeon_bo_unref(&bo);
780 return r; 733 return r;
781 }
782 734
783 /* stitch together an UVD destroy msg */ 735 /* stitch together an UVD destroy msg */
784 msg[0] = cpu_to_le32(0x00000de4); 736 msg[0] = cpu_to_le32(0x00000de4);
@@ -788,10 +740,9 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
788 for (i = 4; i < 1024; ++i) 740 for (i = 4; i < 1024; ++i)
789 msg[i] = cpu_to_le32(0x0); 741 msg[i] = cpu_to_le32(0x0);
790 742
791 radeon_bo_kunmap(bo); 743 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
792 radeon_bo_unreserve(bo); 744 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
793 745 return r;
794 return radeon_uvd_send_msg(rdev, ring, bo, fence);
795} 746}
796 747
797/** 748/**