diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 100 |
1 files changed, 86 insertions, 14 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1949d8aedf49..0b9332e65a4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -109,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, | |||
109 | { | 109 | { |
110 | uint32_t ret; | 110 | uint32_t ret; |
111 | 111 | ||
112 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { | 112 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) |
113 | BUG_ON(in_interrupt()); | ||
114 | return amdgpu_virt_kiq_rreg(adev, reg); | 113 | return amdgpu_virt_kiq_rreg(adev, reg); |
115 | } | ||
116 | 114 | ||
117 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) | 115 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) |
118 | ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); | 116 | ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); |
@@ -137,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, | |||
137 | adev->last_mm_index = v; | 135 | adev->last_mm_index = v; |
138 | } | 136 | } |
139 | 137 | ||
140 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) { | 138 | if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) |
141 | BUG_ON(in_interrupt()); | ||
142 | return amdgpu_virt_kiq_wreg(adev, reg, v); | 139 | return amdgpu_virt_kiq_wreg(adev, reg, v); |
143 | } | ||
144 | 140 | ||
145 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) | 141 | if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) |
146 | writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); | 142 | writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); |
@@ -658,6 +654,81 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc) | |||
658 | } | 654 | } |
659 | 655 | ||
660 | /* | 656 | /* |
657 | * Firmware Reservation functions | ||
658 | */ | ||
659 | /** | ||
660 | * amdgpu_fw_reserve_vram_fini - free fw reserved vram | ||
661 | * | ||
662 | * @adev: amdgpu_device pointer | ||
663 | * | ||
664 | * free fw reserved vram if it has been reserved. | ||
665 | */ | ||
666 | void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev) | ||
667 | { | ||
668 | amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo, | ||
669 | NULL, &adev->fw_vram_usage.va); | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * amdgpu_fw_reserve_vram_init - create bo vram reservation from fw | ||
674 | * | ||
675 | * @adev: amdgpu_device pointer | ||
676 | * | ||
677 | * create bo vram reservation from fw. | ||
678 | */ | ||
679 | int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev) | ||
680 | { | ||
681 | int r = 0; | ||
682 | u64 gpu_addr; | ||
683 | u64 vram_size = adev->mc.visible_vram_size; | ||
684 | |||
685 | adev->fw_vram_usage.va = NULL; | ||
686 | adev->fw_vram_usage.reserved_bo = NULL; | ||
687 | |||
688 | if (adev->fw_vram_usage.size > 0 && | ||
689 | adev->fw_vram_usage.size <= vram_size) { | ||
690 | |||
691 | r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, | ||
692 | PAGE_SIZE, true, 0, | ||
693 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | ||
694 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, | ||
695 | &adev->fw_vram_usage.reserved_bo); | ||
696 | if (r) | ||
697 | goto error_create; | ||
698 | |||
699 | r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); | ||
700 | if (r) | ||
701 | goto error_reserve; | ||
702 | r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, | ||
703 | AMDGPU_GEM_DOMAIN_VRAM, | ||
704 | adev->fw_vram_usage.start_offset, | ||
705 | (adev->fw_vram_usage.start_offset + | ||
706 | adev->fw_vram_usage.size), &gpu_addr); | ||
707 | if (r) | ||
708 | goto error_pin; | ||
709 | r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, | ||
710 | &adev->fw_vram_usage.va); | ||
711 | if (r) | ||
712 | goto error_kmap; | ||
713 | |||
714 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); | ||
715 | } | ||
716 | return r; | ||
717 | |||
718 | error_kmap: | ||
719 | amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); | ||
720 | error_pin: | ||
721 | amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); | ||
722 | error_reserve: | ||
723 | amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); | ||
724 | error_create: | ||
725 | adev->fw_vram_usage.va = NULL; | ||
726 | adev->fw_vram_usage.reserved_bo = NULL; | ||
727 | return r; | ||
728 | } | ||
729 | |||
730 | |||
731 | /* | ||
661 | * GPU helpers function. | 732 | * GPU helpers function. |
662 | */ | 733 | */ |
663 | /** | 734 | /** |
@@ -1604,7 +1675,6 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1604 | return r; | 1675 | return r; |
1605 | } | 1676 | } |
1606 | adev->ip_blocks[i].status.sw = true; | 1677 | adev->ip_blocks[i].status.sw = true; |
1607 | |||
1608 | /* need to do gmc hw init early so we can allocate gpu mem */ | 1678 | /* need to do gmc hw init early so we can allocate gpu mem */ |
1609 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | 1679 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1610 | r = amdgpu_vram_scratch_init(adev); | 1680 | r = amdgpu_vram_scratch_init(adev); |
@@ -1635,11 +1705,6 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1635 | } | 1705 | } |
1636 | } | 1706 | } |
1637 | 1707 | ||
1638 | mutex_lock(&adev->firmware.mutex); | ||
1639 | if (amdgpu_ucode_init_bo(adev)) | ||
1640 | adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; | ||
1641 | mutex_unlock(&adev->firmware.mutex); | ||
1642 | |||
1643 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1708 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1644 | if (!adev->ip_blocks[i].status.sw) | 1709 | if (!adev->ip_blocks[i].status.sw) |
1645 | continue; | 1710 | continue; |
@@ -1775,8 +1840,6 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1775 | 1840 | ||
1776 | adev->ip_blocks[i].status.hw = false; | 1841 | adev->ip_blocks[i].status.hw = false; |
1777 | } | 1842 | } |
1778 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) | ||
1779 | amdgpu_ucode_fini_bo(adev); | ||
1780 | 1843 | ||
1781 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1844 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1782 | if (!adev->ip_blocks[i].status.sw) | 1845 | if (!adev->ip_blocks[i].status.sw) |
@@ -2019,6 +2082,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2019 | adev->vm_manager.vm_pte_num_rings = 0; | 2082 | adev->vm_manager.vm_pte_num_rings = 0; |
2020 | adev->gart.gart_funcs = NULL; | 2083 | adev->gart.gart_funcs = NULL; |
2021 | adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); | 2084 | adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); |
2085 | bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); | ||
2022 | 2086 | ||
2023 | adev->smc_rreg = &amdgpu_invalid_rreg; | 2087 | adev->smc_rreg = &amdgpu_invalid_rreg; |
2024 | adev->smc_wreg = &amdgpu_invalid_wreg; | 2088 | adev->smc_wreg = &amdgpu_invalid_wreg; |
@@ -2047,6 +2111,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2047 | mutex_init(&adev->pm.mutex); | 2111 | mutex_init(&adev->pm.mutex); |
2048 | mutex_init(&adev->gfx.gpu_clock_mutex); | 2112 | mutex_init(&adev->gfx.gpu_clock_mutex); |
2049 | mutex_init(&adev->srbm_mutex); | 2113 | mutex_init(&adev->srbm_mutex); |
2114 | mutex_init(&adev->gfx.pipe_reserve_mutex); | ||
2050 | mutex_init(&adev->grbm_idx_mutex); | 2115 | mutex_init(&adev->grbm_idx_mutex); |
2051 | mutex_init(&adev->mn_lock); | 2116 | mutex_init(&adev->mn_lock); |
2052 | mutex_init(&adev->virt.vf_errors.lock); | 2117 | mutex_init(&adev->virt.vf_errors.lock); |
@@ -2223,6 +2288,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2223 | if (r) | 2288 | if (r) |
2224 | DRM_ERROR("ib ring test failed (%d).\n", r); | 2289 | DRM_ERROR("ib ring test failed (%d).\n", r); |
2225 | 2290 | ||
2291 | if (amdgpu_sriov_vf(adev)) | ||
2292 | amdgpu_virt_init_data_exchange(adev); | ||
2293 | |||
2226 | amdgpu_fbdev_init(adev); | 2294 | amdgpu_fbdev_init(adev); |
2227 | 2295 | ||
2228 | r = amdgpu_pm_sysfs_init(adev); | 2296 | r = amdgpu_pm_sysfs_init(adev); |
@@ -2300,6 +2368,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
2300 | /* evict vram memory */ | 2368 | /* evict vram memory */ |
2301 | amdgpu_bo_evict_vram(adev); | 2369 | amdgpu_bo_evict_vram(adev); |
2302 | amdgpu_ib_pool_fini(adev); | 2370 | amdgpu_ib_pool_fini(adev); |
2371 | amdgpu_fw_reserve_vram_fini(adev); | ||
2303 | amdgpu_fence_driver_fini(adev); | 2372 | amdgpu_fence_driver_fini(adev); |
2304 | amdgpu_fbdev_fini(adev); | 2373 | amdgpu_fbdev_fini(adev); |
2305 | r = amdgpu_fini(adev); | 2374 | r = amdgpu_fini(adev); |
@@ -2552,6 +2621,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) | |||
2552 | int i; | 2621 | int i; |
2553 | bool asic_hang = false; | 2622 | bool asic_hang = false; |
2554 | 2623 | ||
2624 | if (amdgpu_sriov_vf(adev)) | ||
2625 | return true; | ||
2626 | |||
2555 | for (i = 0; i < adev->num_ip_blocks; i++) { | 2627 | for (i = 0; i < adev->num_ip_blocks; i++) { |
2556 | if (!adev->ip_blocks[i].status.valid) | 2628 | if (!adev->ip_blocks[i].status.valid) |
2557 | continue; | 2629 | continue; |