diff options
author | Likun Gao <Likun.Gao@amd.com> | 2018-11-08 07:19:54 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-11-09 16:29:12 -0500 |
commit | 106c7d6148e5aadd394e6701f7e498df49b869d1 (patch) | |
tree | 4ad91ee0536a5949138549cbac51b7f45dc6208b /drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |
parent | 88dfc9a3dd47027c9ffc831635e5cf4e8ed3b781 (diff) |
drm/amdgpu: abstract the function of enter/exit safe mode for RLC
Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of
rlc_init to improve the reusability of RLC.
Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 201 |
1 files changed, 58 insertions, 143 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 81a308bac230..cb066a8dccd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1283,75 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | |||
1283 | buffer[count++] = cpu_to_le32(0); | 1283 | buffer[count++] = cpu_to_le32(0); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void cz_init_cp_jump_table(struct amdgpu_device *adev) | 1286 | static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) |
1287 | { | 1287 | { |
1288 | const __le32 *fw_data; | ||
1289 | volatile u32 *dst_ptr; | ||
1290 | int me, i, max_me = 4; | ||
1291 | u32 bo_offset = 0; | ||
1292 | u32 table_offset, table_size; | ||
1293 | |||
1294 | if (adev->asic_type == CHIP_CARRIZO) | 1288 | if (adev->asic_type == CHIP_CARRIZO) |
1295 | max_me = 5; | 1289 | return 5; |
1296 | 1290 | else | |
1297 | /* write the cp table buffer */ | 1291 | return 4; |
1298 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
1299 | for (me = 0; me < max_me; me++) { | ||
1300 | if (me == 0) { | ||
1301 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1302 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
1303 | fw_data = (const __le32 *) | ||
1304 | (adev->gfx.ce_fw->data + | ||
1305 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1306 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1307 | table_size = le32_to_cpu(hdr->jt_size); | ||
1308 | } else if (me == 1) { | ||
1309 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1310 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
1311 | fw_data = (const __le32 *) | ||
1312 | (adev->gfx.pfp_fw->data + | ||
1313 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1314 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1315 | table_size = le32_to_cpu(hdr->jt_size); | ||
1316 | } else if (me == 2) { | ||
1317 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1318 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
1319 | fw_data = (const __le32 *) | ||
1320 | (adev->gfx.me_fw->data + | ||
1321 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1322 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1323 | table_size = le32_to_cpu(hdr->jt_size); | ||
1324 | } else if (me == 3) { | ||
1325 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1326 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1327 | fw_data = (const __le32 *) | ||
1328 | (adev->gfx.mec_fw->data + | ||
1329 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1330 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1331 | table_size = le32_to_cpu(hdr->jt_size); | ||
1332 | } else if (me == 4) { | ||
1333 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1334 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
1335 | fw_data = (const __le32 *) | ||
1336 | (adev->gfx.mec2_fw->data + | ||
1337 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1338 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1339 | table_size = le32_to_cpu(hdr->jt_size); | ||
1340 | } | ||
1341 | |||
1342 | for (i = 0; i < table_size; i ++) { | ||
1343 | dst_ptr[bo_offset + i] = | ||
1344 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
1345 | } | ||
1346 | |||
1347 | bo_offset += table_size; | ||
1348 | } | ||
1349 | } | 1292 | } |
1350 | 1293 | ||
1351 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | 1294 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) |
1352 | { | 1295 | { |
1353 | volatile u32 *dst_ptr; | ||
1354 | u32 dws; | ||
1355 | const struct cs_section_def *cs_data; | 1296 | const struct cs_section_def *cs_data; |
1356 | int r; | 1297 | int r; |
1357 | 1298 | ||
@@ -1360,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1360 | cs_data = adev->gfx.rlc.cs_data; | 1301 | cs_data = adev->gfx.rlc.cs_data; |
1361 | 1302 | ||
1362 | if (cs_data) { | 1303 | if (cs_data) { |
1363 | /* clear state block */ | 1304 | /* init clear state block */ |
1364 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); | 1305 | r = amdgpu_gfx_rlc_init_csb(adev); |
1365 | 1306 | if (r) | |
1366 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
1367 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1368 | &adev->gfx.rlc.clear_state_obj, | ||
1369 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1370 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1371 | if (r) { | ||
1372 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
1373 | amdgpu_gfx_rlc_fini(adev); | ||
1374 | return r; | 1307 | return r; |
1375 | } | ||
1376 | |||
1377 | /* set up the cs buffer */ | ||
1378 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1379 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); | ||
1380 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1381 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1382 | } | 1308 | } |
1383 | 1309 | ||
1384 | if ((adev->asic_type == CHIP_CARRIZO) || | 1310 | if ((adev->asic_type == CHIP_CARRIZO) || |
1385 | (adev->asic_type == CHIP_STONEY)) { | 1311 | (adev->asic_type == CHIP_STONEY)) { |
1386 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1312 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1387 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 1313 | r = amdgpu_gfx_rlc_init_cpt(adev); |
1388 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 1314 | if (r) |
1389 | &adev->gfx.rlc.cp_table_obj, | ||
1390 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1391 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1392 | if (r) { | ||
1393 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
1394 | return r; | 1315 | return r; |
1395 | } | ||
1396 | |||
1397 | cz_init_cp_jump_table(adev); | ||
1398 | |||
1399 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
1400 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1401 | } | 1316 | } |
1402 | 1317 | ||
1403 | return 0; | 1318 | return 0; |
@@ -4945,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
4945 | pr_debug("For SRIOV client, shouldn't do anything.\n"); | 4860 | pr_debug("For SRIOV client, shouldn't do anything.\n"); |
4946 | return 0; | 4861 | return 0; |
4947 | } | 4862 | } |
4948 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 4863 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
4949 | if (!gfx_v8_0_wait_for_idle(adev)) | 4864 | if (!gfx_v8_0_wait_for_idle(adev)) |
4950 | gfx_v8_0_cp_enable(adev, false); | 4865 | gfx_v8_0_cp_enable(adev, false); |
4951 | else | 4866 | else |
@@ -4954,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
4954 | adev->gfx.rlc.funcs->stop(adev); | 4869 | adev->gfx.rlc.funcs->stop(adev); |
4955 | else | 4870 | else |
4956 | pr_err("rlc is busy, skip halt rlc\n"); | 4871 | pr_err("rlc is busy, skip halt rlc\n"); |
4957 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 4872 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
4958 | return 0; | 4873 | return 0; |
4959 | } | 4874 | } |
4960 | 4875 | ||
@@ -5417,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5417 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5332 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5418 | AMD_PG_SUPPORT_CP | | 5333 | AMD_PG_SUPPORT_CP | |
5419 | AMD_PG_SUPPORT_GFX_DMG)) | 5334 | AMD_PG_SUPPORT_GFX_DMG)) |
5420 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5335 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5421 | switch (adev->asic_type) { | 5336 | switch (adev->asic_type) { |
5422 | case CHIP_CARRIZO: | 5337 | case CHIP_CARRIZO: |
5423 | case CHIP_STONEY: | 5338 | case CHIP_STONEY: |
@@ -5471,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5471 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5386 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5472 | AMD_PG_SUPPORT_CP | | 5387 | AMD_PG_SUPPORT_CP | |
5473 | AMD_PG_SUPPORT_GFX_DMG)) | 5388 | AMD_PG_SUPPORT_GFX_DMG)) |
5474 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5389 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5475 | return 0; | 5390 | return 0; |
5476 | } | 5391 | } |
5477 | 5392 | ||
@@ -5565,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, | |||
5565 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 | 5480 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 |
5566 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e | 5481 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e |
5567 | 5482 | ||
5568 | static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) | 5483 | static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) |
5569 | { | 5484 | { |
5570 | u32 data; | 5485 | uint32_t rlc_setting; |
5571 | unsigned i; | ||
5572 | 5486 | ||
5573 | data = RREG32(mmRLC_CNTL); | 5487 | rlc_setting = RREG32(mmRLC_CNTL); |
5574 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5488 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
5575 | return; | 5489 | return false; |
5576 | 5490 | ||
5577 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | 5491 | return true; |
5578 | data |= RLC_SAFE_MODE__CMD_MASK; | 5492 | } |
5579 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5580 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | ||
5581 | WREG32(mmRLC_SAFE_MODE, data); | ||
5582 | 5493 | ||
5583 | for (i = 0; i < adev->usec_timeout; i++) { | 5494 | static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) |
5584 | if ((RREG32(mmRLC_GPM_STAT) & | 5495 | { |
5585 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5496 | uint32_t data; |
5586 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == | 5497 | unsigned i; |
5587 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5498 | data = RREG32(mmRLC_CNTL); |
5588 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | 5499 | data |= RLC_SAFE_MODE__CMD_MASK; |
5589 | break; | 5500 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5590 | udelay(1); | 5501 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
5591 | } | 5502 | WREG32(mmRLC_SAFE_MODE, data); |
5592 | 5503 | ||
5593 | for (i = 0; i < adev->usec_timeout; i++) { | 5504 | /* wait for RLC_SAFE_MODE */ |
5594 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5505 | for (i = 0; i < adev->usec_timeout; i++) { |
5595 | break; | 5506 | if ((RREG32(mmRLC_GPM_STAT) & |
5596 | udelay(1); | 5507 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5597 | } | 5508 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == |
5598 | adev->gfx.rlc.in_safe_mode = true; | 5509 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5510 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | ||
5511 | break; | ||
5512 | udelay(1); | ||
5513 | } | ||
5514 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5515 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | ||
5516 | break; | ||
5517 | udelay(1); | ||
5599 | } | 5518 | } |
5600 | } | 5519 | } |
5601 | 5520 | ||
5602 | static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | 5521 | static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) |
5603 | { | 5522 | { |
5604 | u32 data = 0; | 5523 | uint32_t data; |
5605 | unsigned i; | 5524 | unsigned i; |
5606 | 5525 | ||
5607 | data = RREG32(mmRLC_CNTL); | 5526 | data = RREG32(mmRLC_CNTL); |
5608 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5527 | data |= RLC_SAFE_MODE__CMD_MASK; |
5609 | return; | 5528 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5610 | 5529 | WREG32(mmRLC_SAFE_MODE, data); | |
5611 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | ||
5612 | if (adev->gfx.rlc.in_safe_mode) { | ||
5613 | data |= RLC_SAFE_MODE__CMD_MASK; | ||
5614 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5615 | WREG32(mmRLC_SAFE_MODE, data); | ||
5616 | adev->gfx.rlc.in_safe_mode = false; | ||
5617 | } | ||
5618 | } | ||
5619 | 5530 | ||
5620 | for (i = 0; i < adev->usec_timeout; i++) { | 5531 | for (i = 0; i < adev->usec_timeout; i++) { |
5621 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5532 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
@@ -5625,9 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | |||
5625 | } | 5536 | } |
5626 | 5537 | ||
5627 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { | 5538 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { |
5628 | .enter_safe_mode = iceland_enter_rlc_safe_mode, | 5539 | .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, |
5629 | .exit_safe_mode = iceland_exit_rlc_safe_mode, | 5540 | .set_safe_mode = gfx_v8_0_set_safe_mode, |
5541 | .unset_safe_mode = gfx_v8_0_unset_safe_mode, | ||
5630 | .init = gfx_v8_0_rlc_init, | 5542 | .init = gfx_v8_0_rlc_init, |
5543 | .get_csb_size = gfx_v8_0_get_csb_size, | ||
5544 | .get_csb_buffer = gfx_v8_0_get_csb_buffer, | ||
5545 | .get_cp_table_num = gfx_v8_0_cp_jump_table_num, | ||
5631 | .resume = gfx_v8_0_rlc_resume, | 5546 | .resume = gfx_v8_0_rlc_resume, |
5632 | .stop = gfx_v8_0_rlc_stop, | 5547 | .stop = gfx_v8_0_rlc_stop, |
5633 | .reset = gfx_v8_0_rlc_reset, | 5548 | .reset = gfx_v8_0_rlc_reset, |
@@ -5639,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5639 | { | 5554 | { |
5640 | uint32_t temp, data; | 5555 | uint32_t temp, data; |
5641 | 5556 | ||
5642 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5557 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5643 | 5558 | ||
5644 | /* It is disabled by HW by default */ | 5559 | /* It is disabled by HW by default */ |
5645 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | 5560 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
@@ -5735,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5735 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5650 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5736 | } | 5651 | } |
5737 | 5652 | ||
5738 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5653 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5739 | } | 5654 | } |
5740 | 5655 | ||
5741 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | 5656 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
@@ -5745,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5745 | 5660 | ||
5746 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); | 5661 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); |
5747 | 5662 | ||
5748 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5663 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5749 | 5664 | ||
5750 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | 5665 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
5751 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); | 5666 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); |
@@ -5828,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5828 | 5743 | ||
5829 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5744 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5830 | 5745 | ||
5831 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5746 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5832 | } | 5747 | } |
5833 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, | 5748 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, |
5834 | bool enable) | 5749 | bool enable) |