aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/si.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/si.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c120
1 files changed, 104 insertions, 16 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 543101d5a5ed..a675ec6d2811 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -31,7 +31,8 @@
31#include "amdgpu_uvd.h" 31#include "amdgpu_uvd.h"
32#include "amdgpu_vce.h" 32#include "amdgpu_vce.h"
33#include "atom.h" 33#include "atom.h"
34#include "amdgpu_powerplay.h" 34#include "amd_pcie.h"
35#include "si_dpm.h"
35#include "sid.h" 36#include "sid.h"
36#include "si_ih.h" 37#include "si_ih.h"
37#include "gfx_v6_0.h" 38#include "gfx_v6_0.h"
@@ -1230,6 +1231,92 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1230 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 1231 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1231} 1232}
1232 1233
1234static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
1235{
1236 if (!ring || !ring->funcs->emit_wreg) {
1237 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1238 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
1239 } else {
1240 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
1241 }
1242}
1243
1244static void si_invalidate_hdp(struct amdgpu_device *adev,
1245 struct amdgpu_ring *ring)
1246{
1247 if (!ring || !ring->funcs->emit_wreg) {
1248 WREG32(mmHDP_DEBUG0, 1);
1249 RREG32(mmHDP_DEBUG0);
1250 } else {
1251 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
1252 }
1253}
1254
1255static int si_get_pcie_lanes(struct amdgpu_device *adev)
1256{
1257 u32 link_width_cntl;
1258
1259 if (adev->flags & AMD_IS_APU)
1260 return 0;
1261
1262 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1263
1264 switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1265 case LC_LINK_WIDTH_X1:
1266 return 1;
1267 case LC_LINK_WIDTH_X2:
1268 return 2;
1269 case LC_LINK_WIDTH_X4:
1270 return 4;
1271 case LC_LINK_WIDTH_X8:
1272 return 8;
1273 case LC_LINK_WIDTH_X0:
1274 case LC_LINK_WIDTH_X16:
1275 default:
1276 return 16;
1277 }
1278}
1279
1280static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1281{
1282 u32 link_width_cntl, mask;
1283
1284 if (adev->flags & AMD_IS_APU)
1285 return;
1286
1287 switch (lanes) {
1288 case 0:
1289 mask = LC_LINK_WIDTH_X0;
1290 break;
1291 case 1:
1292 mask = LC_LINK_WIDTH_X1;
1293 break;
1294 case 2:
1295 mask = LC_LINK_WIDTH_X2;
1296 break;
1297 case 4:
1298 mask = LC_LINK_WIDTH_X4;
1299 break;
1300 case 8:
1301 mask = LC_LINK_WIDTH_X8;
1302 break;
1303 case 16:
1304 mask = LC_LINK_WIDTH_X16;
1305 break;
1306 default:
1307 DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1308 return;
1309 }
1310
1311 link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1312 link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1313 link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1314 link_width_cntl |= (LC_RECONFIG_NOW |
1315 LC_RECONFIG_ARC_MISSING_ESCAPE);
1316
1317 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1318}
1319
1233static const struct amdgpu_asic_funcs si_asic_funcs = 1320static const struct amdgpu_asic_funcs si_asic_funcs =
1234{ 1321{
1235 .read_disabled_bios = &si_read_disabled_bios, 1322 .read_disabled_bios = &si_read_disabled_bios,
@@ -1240,7 +1327,11 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1240 .get_xclk = &si_get_xclk, 1327 .get_xclk = &si_get_xclk,
1241 .set_uvd_clocks = &si_set_uvd_clocks, 1328 .set_uvd_clocks = &si_set_uvd_clocks,
1242 .set_vce_clocks = NULL, 1329 .set_vce_clocks = NULL,
1330 .get_pcie_lanes = &si_get_pcie_lanes,
1331 .set_pcie_lanes = &si_set_pcie_lanes,
1243 .get_config_memsize = &si_get_config_memsize, 1332 .get_config_memsize = &si_get_config_memsize,
1333 .flush_hdp = &si_flush_hdp,
1334 .invalidate_hdp = &si_invalidate_hdp,
1244}; 1335};
1245 1336
1246static uint32_t si_get_rev_id(struct amdgpu_device *adev) 1337static uint32_t si_get_rev_id(struct amdgpu_device *adev)
@@ -1461,8 +1552,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1461{ 1552{
1462 struct pci_dev *root = adev->pdev->bus->self; 1553 struct pci_dev *root = adev->pdev->bus->self;
1463 int bridge_pos, gpu_pos; 1554 int bridge_pos, gpu_pos;
1464 u32 speed_cntl, mask, current_data_rate; 1555 u32 speed_cntl, current_data_rate;
1465 int ret, i; 1556 int i;
1466 u16 tmp16; 1557 u16 tmp16;
1467 1558
1468 if (pci_is_root_bus(adev->pdev->bus)) 1559 if (pci_is_root_bus(adev->pdev->bus))
@@ -1474,23 +1565,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1474 if (adev->flags & AMD_IS_APU) 1565 if (adev->flags & AMD_IS_APU)
1475 return; 1566 return;
1476 1567
1477 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1568 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1478 if (ret != 0) 1569 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
1479 return;
1480
1481 if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
1482 return; 1570 return;
1483 1571
1484 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); 1572 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1485 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >> 1573 current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
1486 LC_CURRENT_DATA_RATE_SHIFT; 1574 LC_CURRENT_DATA_RATE_SHIFT;
1487 if (mask & DRM_PCIE_SPEED_80) { 1575 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1488 if (current_data_rate == 2) { 1576 if (current_data_rate == 2) {
1489 DRM_INFO("PCIE gen 3 link speeds already enabled\n"); 1577 DRM_INFO("PCIE gen 3 link speeds already enabled\n");
1490 return; 1578 return;
1491 } 1579 }
1492 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); 1580 DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
1493 } else if (mask & DRM_PCIE_SPEED_50) { 1581 } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
1494 if (current_data_rate == 1) { 1582 if (current_data_rate == 1) {
1495 DRM_INFO("PCIE gen 2 link speeds already enabled\n"); 1583 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
1496 return; 1584 return;
@@ -1506,7 +1594,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1506 if (!gpu_pos) 1594 if (!gpu_pos)
1507 return; 1595 return;
1508 1596
1509 if (mask & DRM_PCIE_SPEED_80) { 1597 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
1510 if (current_data_rate != 2) { 1598 if (current_data_rate != 2) {
1511 u16 bridge_cfg, gpu_cfg; 1599 u16 bridge_cfg, gpu_cfg;
1512 u16 bridge_cfg2, gpu_cfg2; 1600 u16 bridge_cfg2, gpu_cfg2;
@@ -1589,9 +1677,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
1589 1677
1590 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); 1678 pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
1591 tmp16 &= ~0xf; 1679 tmp16 &= ~0xf;
1592 if (mask & DRM_PCIE_SPEED_80) 1680 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1593 tmp16 |= 3; 1681 tmp16 |= 3;
1594 else if (mask & DRM_PCIE_SPEED_50) 1682 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1595 tmp16 |= 2; 1683 tmp16 |= 2;
1596 else 1684 else
1597 tmp16 |= 1; 1685 tmp16 |= 1;
@@ -1962,7 +2050,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1962 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2050 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1963 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2051 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1964 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2052 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1965 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2053 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1966 if (adev->enable_virtual_display) 2054 if (adev->enable_virtual_display)
1967 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2055 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1968 else 2056 else
@@ -1976,7 +2064,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1976 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2064 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1977 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2065 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1978 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2066 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1979 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2067 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1980 if (adev->enable_virtual_display) 2068 if (adev->enable_virtual_display)
1981 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2069 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1982 else 2070 else
@@ -1990,7 +2078,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
1990 amdgpu_device_ip_block_add(adev, &si_common_ip_block); 2078 amdgpu_device_ip_block_add(adev, &si_common_ip_block);
1991 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); 2079 amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block);
1992 amdgpu_device_ip_block_add(adev, &si_ih_ip_block); 2080 amdgpu_device_ip_block_add(adev, &si_ih_ip_block);
1993 amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); 2081 amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
1994 if (adev->enable_virtual_display) 2082 if (adev->enable_virtual_display)
1995 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 2083 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
1996 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); 2084 amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block);