aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2018-10-11 00:53:40 -0400
committerDave Airlie <airlied@redhat.com>2018-10-11 00:53:45 -0400
commitca4b869240d5810ebac6b1570ad7beffcfbac2f5 (patch)
treed7e36e551b058316ab35e28f1bb992ce06b2ce0c /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
parent46972c03ab667dc298cad0c9db517fb9b1521b5f (diff)
parentdf2fc43d09d3ee5ede82cab9299df5e78aa427b5 (diff)
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
Add a new list.h helper for doing bulk updates. Used by ttm. - Fixes for display underflow on VI APUs at 4K with UVD running - Endian fixes for powerplay on vega - DC fixes for interlaced video - Vega20 powerplay fixes - RV/RV2/PCO powerplay fixes - Fix for spurious ACPI events on HG laptops - Fix a memory leak in DC on driver unload - Fixes for manual fan control mode switching - Suspend/resume robustness fixes - Fix display handling on RV2 - VCN fixes for DPG on PCO - Misc code cleanups and warning fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20181011014739.3117-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c183
1 files changed, 165 insertions, 18 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index bd79d0a31942..1e4dd09a5072 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1525,6 +1525,92 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
1525 return 0; 1525 return 0;
1526} 1526}
1527 1527
1528static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
1529{
1530 int i, r;
1531
1532 for (i = 0; i < adev->num_ip_blocks; i++) {
1533 if (!adev->ip_blocks[i].status.sw)
1534 continue;
1535 if (adev->ip_blocks[i].status.hw)
1536 continue;
1537 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
1538 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
1539 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1540 if (r) {
1541 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1542 adev->ip_blocks[i].version->funcs->name, r);
1543 return r;
1544 }
1545 adev->ip_blocks[i].status.hw = true;
1546 }
1547 }
1548
1549 return 0;
1550}
1551
1552static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
1553{
1554 int i, r;
1555
1556 for (i = 0; i < adev->num_ip_blocks; i++) {
1557 if (!adev->ip_blocks[i].status.sw)
1558 continue;
1559 if (adev->ip_blocks[i].status.hw)
1560 continue;
1561 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1562 if (r) {
1563 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1564 adev->ip_blocks[i].version->funcs->name, r);
1565 return r;
1566 }
1567 adev->ip_blocks[i].status.hw = true;
1568 }
1569
1570 return 0;
1571}
1572
1573static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
1574{
1575 int r = 0;
1576 int i;
1577
1578 if (adev->asic_type >= CHIP_VEGA10) {
1579 for (i = 0; i < adev->num_ip_blocks; i++) {
1580 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
1581 if (adev->in_gpu_reset || adev->in_suspend) {
1582 if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset)
1583 break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */
1584 r = adev->ip_blocks[i].version->funcs->resume(adev);
1585 if (r) {
1586 DRM_ERROR("resume of IP block <%s> failed %d\n",
1587 adev->ip_blocks[i].version->funcs->name, r);
1588 return r;
1589 }
1590 } else {
1591 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
1592 if (r) {
1593 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1594 adev->ip_blocks[i].version->funcs->name, r);
1595 return r;
1596 }
1597 }
1598 adev->ip_blocks[i].status.hw = true;
1599 }
1600 }
1601 }
1602
1603 if (adev->powerplay.pp_funcs->load_firmware) {
1604 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1605 if (r) {
1606 pr_err("firmware loading failed\n");
1607 return r;
1608 }
1609 }
1610
1611 return 0;
1612}
1613
1528/** 1614/**
1529 * amdgpu_device_ip_init - run init for hardware IPs 1615 * amdgpu_device_ip_init - run init for hardware IPs
1530 * 1616 *
@@ -1581,19 +1667,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
1581 } 1667 }
1582 } 1668 }
1583 1669
1584 for (i = 0; i < adev->num_ip_blocks; i++) { 1670 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
1585 if (!adev->ip_blocks[i].status.sw) 1671 if (r)
1586 continue; 1672 return r;
1587 if (adev->ip_blocks[i].status.hw) 1673
1588 continue; 1674 r = amdgpu_device_ip_hw_init_phase1(adev);
1589 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); 1675 if (r)
1590 if (r) { 1676 return r;
1591 DRM_ERROR("hw_init of IP block <%s> failed %d\n", 1677
1592 adev->ip_blocks[i].version->funcs->name, r); 1678 r = amdgpu_device_fw_loading(adev);
1593 return r; 1679 if (r)
1594 } 1680 return r;
1595 adev->ip_blocks[i].status.hw = true; 1681
1596 } 1682 r = amdgpu_device_ip_hw_init_phase2(adev);
1683 if (r)
1684 return r;
1597 1685
1598 amdgpu_xgmi_add_device(adev); 1686 amdgpu_xgmi_add_device(adev);
1599 amdgpu_amdkfd_device_init(adev); 1687 amdgpu_amdkfd_device_init(adev);
@@ -1656,7 +1744,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1656 1744
1657 for (j = 0; j < adev->num_ip_blocks; j++) { 1745 for (j = 0; j < adev->num_ip_blocks; j++) {
1658 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1746 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1659 if (!adev->ip_blocks[i].status.valid) 1747 if (!adev->ip_blocks[i].status.late_initialized)
1660 continue; 1748 continue;
1661 /* skip CG for VCE/UVD, it's handled specially */ 1749 /* skip CG for VCE/UVD, it's handled specially */
1662 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1750 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1686,7 +1774,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
1686 1774
1687 for (j = 0; j < adev->num_ip_blocks; j++) { 1775 for (j = 0; j < adev->num_ip_blocks; j++) {
1688 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; 1776 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
1689 if (!adev->ip_blocks[i].status.valid) 1777 if (!adev->ip_blocks[i].status.late_initialized)
1690 continue; 1778 continue;
1691 /* skip CG for VCE/UVD, it's handled specially */ 1779 /* skip CG for VCE/UVD, it's handled specially */
1692 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && 1780 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -1723,7 +1811,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1723 int i = 0, r; 1811 int i = 0, r;
1724 1812
1725 for (i = 0; i < adev->num_ip_blocks; i++) { 1813 for (i = 0; i < adev->num_ip_blocks; i++) {
1726 if (!adev->ip_blocks[i].status.valid) 1814 if (!adev->ip_blocks[i].status.hw)
1727 continue; 1815 continue;
1728 if (adev->ip_blocks[i].version->funcs->late_init) { 1816 if (adev->ip_blocks[i].version->funcs->late_init) {
1729 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); 1817 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
@@ -1732,8 +1820,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
1732 adev->ip_blocks[i].version->funcs->name, r); 1820 adev->ip_blocks[i].version->funcs->name, r);
1733 return r; 1821 return r;
1734 } 1822 }
1735 adev->ip_blocks[i].status.late_initialized = true;
1736 } 1823 }
1824 adev->ip_blocks[i].status.late_initialized = true;
1737 } 1825 }
1738 1826
1739 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); 1827 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
@@ -1803,6 +1891,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1803 continue; 1891 continue;
1804 1892
1805 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { 1893 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1894 amdgpu_ucode_free_bo(adev);
1806 amdgpu_free_static_csa(adev); 1895 amdgpu_free_static_csa(adev);
1807 amdgpu_device_wb_fini(adev); 1896 amdgpu_device_wb_fini(adev);
1808 amdgpu_device_vram_scratch_fini(adev); 1897 amdgpu_device_vram_scratch_fini(adev);
@@ -1833,6 +1922,43 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
1833 return 0; 1922 return 0;
1834} 1923}
1835 1924
1925static int amdgpu_device_enable_mgpu_fan_boost(void)
1926{
1927 struct amdgpu_gpu_instance *gpu_ins;
1928 struct amdgpu_device *adev;
1929 int i, ret = 0;
1930
1931 mutex_lock(&mgpu_info.mutex);
1932
1933 /*
1934 * MGPU fan boost feature should be enabled
1935 * only when there are two or more dGPUs in
1936 * the system
1937 */
1938 if (mgpu_info.num_dgpu < 2)
1939 goto out;
1940
1941 for (i = 0; i < mgpu_info.num_dgpu; i++) {
1942 gpu_ins = &(mgpu_info.gpu_ins[i]);
1943 adev = gpu_ins->adev;
1944 if (!(adev->flags & AMD_IS_APU) &&
1945 !gpu_ins->mgpu_fan_enabled &&
1946 adev->powerplay.pp_funcs &&
1947 adev->powerplay.pp_funcs->enable_mgpu_fan_boost) {
1948 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
1949 if (ret)
1950 break;
1951
1952 gpu_ins->mgpu_fan_enabled = 1;
1953 }
1954 }
1955
1956out:
1957 mutex_unlock(&mgpu_info.mutex);
1958
1959 return ret;
1960}
1961
1836/** 1962/**
1837 * amdgpu_device_ip_late_init_func_handler - work handler for ib test 1963 * amdgpu_device_ip_late_init_func_handler - work handler for ib test
1838 * 1964 *
@@ -1847,6 +1973,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
1847 r = amdgpu_ib_ring_tests(adev); 1973 r = amdgpu_ib_ring_tests(adev);
1848 if (r) 1974 if (r)
1849 DRM_ERROR("ib ring test failed (%d).\n", r); 1975 DRM_ERROR("ib ring test failed (%d).\n", r);
1976
1977 r = amdgpu_device_enable_mgpu_fan_boost();
1978 if (r)
1979 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
1850} 1980}
1851 1981
1852static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) 1982static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2082,7 +2212,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
2082 continue; 2212 continue;
2083 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || 2213 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2084 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || 2214 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2085 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) 2215 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
2216 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2086 continue; 2217 continue;
2087 r = adev->ip_blocks[i].version->funcs->resume(adev); 2218 r = adev->ip_blocks[i].version->funcs->resume(adev);
2088 if (r) { 2219 if (r) {
@@ -2114,6 +2245,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2114 r = amdgpu_device_ip_resume_phase1(adev); 2245 r = amdgpu_device_ip_resume_phase1(adev);
2115 if (r) 2246 if (r)
2116 return r; 2247 return r;
2248
2249 r = amdgpu_device_fw_loading(adev);
2250 if (r)
2251 return r;
2252
2117 r = amdgpu_device_ip_resume_phase2(adev); 2253 r = amdgpu_device_ip_resume_phase2(adev);
2118 2254
2119 return r; 2255 return r;
@@ -2608,6 +2744,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
2608 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 2744 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
2609 return 0; 2745 return 0;
2610 2746
2747 adev->in_suspend = true;
2611 drm_kms_helper_poll_disable(dev); 2748 drm_kms_helper_poll_disable(dev);
2612 2749
2613 if (fbcon) 2750 if (fbcon)
@@ -2793,6 +2930,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2793#ifdef CONFIG_PM 2930#ifdef CONFIG_PM
2794 dev->dev->power.disable_depth--; 2931 dev->dev->power.disable_depth--;
2795#endif 2932#endif
2933 adev->in_suspend = false;
2934
2796 return 0; 2935 return 0;
2797} 2936}
2798 2937
@@ -3061,6 +3200,10 @@ retry:
3061 if (r) 3200 if (r)
3062 goto out; 3201 goto out;
3063 3202
3203 r = amdgpu_device_fw_loading(adev);
3204 if (r)
3205 return r;
3206
3064 r = amdgpu_device_ip_resume_phase2(adev); 3207 r = amdgpu_device_ip_resume_phase2(adev);
3065 if (r) 3208 if (r)
3066 goto out; 3209 goto out;
@@ -3117,6 +3260,10 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
3117 /* we need recover gart prior to run SMC/CP/SDMA resume */ 3260 /* we need recover gart prior to run SMC/CP/SDMA resume */
3118 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); 3261 amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]);
3119 3262
3263 r = amdgpu_device_fw_loading(adev);
3264 if (r)
3265 return r;
3266
3120 /* now we are okay to resume SMC/CP/SDMA */ 3267 /* now we are okay to resume SMC/CP/SDMA */
3121 r = amdgpu_device_ip_reinit_late_sriov(adev); 3268 r = amdgpu_device_ip_reinit_late_sriov(adev);
3122 if (r) 3269 if (r)