diff options
author | Eric Huang <JinHuiEric.Huang@amd.com> | 2016-04-14 17:26:07 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2016-05-04 20:27:28 -0400 |
commit | 2b6cd9779707f1f7974205320a058ef80aa2cdd4 (patch) | |
tree | 4573ca58b639f2070bb58737135f2893cce7275e | |
parent | 538333f0dcc00e24ca4bd63905fa75aa41b4c665 (diff) |
drm/amd/amdgpu: add power gating initialization support for GFX8.0
Signed-off-by: Eric Huang <JinHuiEric.Huang@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 353 |
2 files changed, 364 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5399f3a2453f..6cc174f9d583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1076,6 +1076,20 @@ struct amdgpu_rlc { | |||
1076 | /* safe mode for updating CG/PG state */ | 1076 | /* safe mode for updating CG/PG state */ |
1077 | bool in_safe_mode; | 1077 | bool in_safe_mode; |
1078 | const struct amdgpu_rlc_funcs *funcs; | 1078 | const struct amdgpu_rlc_funcs *funcs; |
1079 | |||
1080 | /* for firmware data */ | ||
1081 | u32 save_and_restore_offset; | ||
1082 | u32 clear_state_descriptor_offset; | ||
1083 | u32 avail_scratch_ram_locations; | ||
1084 | u32 reg_restore_list_size; | ||
1085 | u32 reg_list_format_start; | ||
1086 | u32 reg_list_format_separate_start; | ||
1087 | u32 starting_offsets_start; | ||
1088 | u32 reg_list_format_size_bytes; | ||
1089 | u32 reg_list_size_bytes; | ||
1090 | |||
1091 | u32 *register_list_format; | ||
1092 | u32 *register_restore; | ||
1079 | }; | 1093 | }; |
1080 | 1094 | ||
1081 | struct amdgpu_mec { | 1095 | struct amdgpu_mec { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ce1054d0f391..89d7b1576a66 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -86,6 +86,8 @@ enum { | |||
86 | BPM_REG_FGCG_MAX | 86 | BPM_REG_FGCG_MAX |
87 | }; | 87 | }; |
88 | 88 | ||
89 | #define RLC_FormatDirectRegListLength 14 | ||
90 | |||
89 | MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); | 91 | MODULE_FIRMWARE("amdgpu/carrizo_ce.bin"); |
90 | MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); | 92 | MODULE_FIRMWARE("amdgpu/carrizo_pfp.bin"); |
91 | MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); | 93 | MODULE_FIRMWARE("amdgpu/carrizo_me.bin"); |
@@ -633,6 +635,7 @@ static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); | |||
633 | static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); | 635 | static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); |
634 | static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); | 636 | static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); |
635 | static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); | 637 | static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); |
638 | static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); | ||
636 | 639 | ||
637 | static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) | 640 | static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) |
638 | { | 641 | { |
@@ -838,6 +841,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | |||
838 | struct amdgpu_firmware_info *info = NULL; | 841 | struct amdgpu_firmware_info *info = NULL; |
839 | const struct common_firmware_header *header = NULL; | 842 | const struct common_firmware_header *header = NULL; |
840 | const struct gfx_firmware_header_v1_0 *cp_hdr; | 843 | const struct gfx_firmware_header_v1_0 *cp_hdr; |
844 | const struct rlc_firmware_header_v2_0 *rlc_hdr; | ||
845 | unsigned int *tmp = NULL, i; | ||
841 | 846 | ||
842 | DRM_DEBUG("\n"); | 847 | DRM_DEBUG("\n"); |
843 | 848 | ||
@@ -905,9 +910,49 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | |||
905 | if (err) | 910 | if (err) |
906 | goto out; | 911 | goto out; |
907 | err = amdgpu_ucode_validate(adev->gfx.rlc_fw); | 912 | err = amdgpu_ucode_validate(adev->gfx.rlc_fw); |
908 | cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; | 913 | rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; |
909 | adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); | 914 | adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); |
910 | adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); | 915 | adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); |
916 | |||
917 | adev->gfx.rlc.save_and_restore_offset = | ||
918 | le32_to_cpu(rlc_hdr->save_and_restore_offset); | ||
919 | adev->gfx.rlc.clear_state_descriptor_offset = | ||
920 | le32_to_cpu(rlc_hdr->clear_state_descriptor_offset); | ||
921 | adev->gfx.rlc.avail_scratch_ram_locations = | ||
922 | le32_to_cpu(rlc_hdr->avail_scratch_ram_locations); | ||
923 | adev->gfx.rlc.reg_restore_list_size = | ||
924 | le32_to_cpu(rlc_hdr->reg_restore_list_size); | ||
925 | adev->gfx.rlc.reg_list_format_start = | ||
926 | le32_to_cpu(rlc_hdr->reg_list_format_start); | ||
927 | adev->gfx.rlc.reg_list_format_separate_start = | ||
928 | le32_to_cpu(rlc_hdr->reg_list_format_separate_start); | ||
929 | adev->gfx.rlc.starting_offsets_start = | ||
930 | le32_to_cpu(rlc_hdr->starting_offsets_start); | ||
931 | adev->gfx.rlc.reg_list_format_size_bytes = | ||
932 | le32_to_cpu(rlc_hdr->reg_list_format_size_bytes); | ||
933 | adev->gfx.rlc.reg_list_size_bytes = | ||
934 | le32_to_cpu(rlc_hdr->reg_list_size_bytes); | ||
935 | |||
936 | adev->gfx.rlc.register_list_format = | ||
937 | kmalloc(adev->gfx.rlc.reg_list_format_size_bytes + | ||
938 | adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL); | ||
939 | |||
940 | if (!adev->gfx.rlc.register_list_format) { | ||
941 | err = -ENOMEM; | ||
942 | goto out; | ||
943 | } | ||
944 | |||
945 | tmp = (unsigned int *)((uint64_t)rlc_hdr + | ||
946 | le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes)); | ||
947 | for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++) | ||
948 | adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]); | ||
949 | |||
950 | adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i; | ||
951 | |||
952 | tmp = (unsigned int *)((uint64_t)rlc_hdr + | ||
953 | le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes)); | ||
954 | for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) | ||
955 | adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); | ||
911 | 956 | ||
912 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); | 957 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); |
913 | err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); | 958 | err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); |
@@ -1008,6 +1053,148 @@ out: | |||
1008 | return err; | 1053 | return err; |
1009 | } | 1054 | } |
1010 | 1055 | ||
1056 | static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | ||
1057 | volatile u32 *buffer) | ||
1058 | { | ||
1059 | u32 count = 0, i; | ||
1060 | const struct cs_section_def *sect = NULL; | ||
1061 | const struct cs_extent_def *ext = NULL; | ||
1062 | |||
1063 | if (adev->gfx.rlc.cs_data == NULL) | ||
1064 | return; | ||
1065 | if (buffer == NULL) | ||
1066 | return; | ||
1067 | |||
1068 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
1069 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | ||
1070 | |||
1071 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1)); | ||
1072 | buffer[count++] = cpu_to_le32(0x80000000); | ||
1073 | buffer[count++] = cpu_to_le32(0x80000000); | ||
1074 | |||
1075 | for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) { | ||
1076 | for (ext = sect->section; ext->extent != NULL; ++ext) { | ||
1077 | if (sect->id == SECT_CONTEXT) { | ||
1078 | buffer[count++] = | ||
1079 | cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count)); | ||
1080 | buffer[count++] = cpu_to_le32(ext->reg_index - | ||
1081 | PACKET3_SET_CONTEXT_REG_START); | ||
1082 | for (i = 0; i < ext->reg_count; i++) | ||
1083 | buffer[count++] = cpu_to_le32(ext->extent[i]); | ||
1084 | } else { | ||
1085 | return; | ||
1086 | } | ||
1087 | } | ||
1088 | } | ||
1089 | |||
1090 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
1091 | buffer[count++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG - | ||
1092 | PACKET3_SET_CONTEXT_REG_START); | ||
1093 | switch (adev->asic_type) { | ||
1094 | case CHIP_TONGA: | ||
1095 | buffer[count++] = cpu_to_le32(0x16000012); | ||
1096 | buffer[count++] = cpu_to_le32(0x0000002A); | ||
1097 | break; | ||
1098 | case CHIP_FIJI: | ||
1099 | buffer[count++] = cpu_to_le32(0x3a00161a); | ||
1100 | buffer[count++] = cpu_to_le32(0x0000002e); | ||
1101 | break; | ||
1102 | case CHIP_TOPAZ: | ||
1103 | case CHIP_CARRIZO: | ||
1104 | buffer[count++] = cpu_to_le32(0x00000002); | ||
1105 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1106 | break; | ||
1107 | case CHIP_STONEY: | ||
1108 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1109 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1110 | break; | ||
1111 | default: | ||
1112 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1113 | buffer[count++] = cpu_to_le32(0x00000000); | ||
1114 | break; | ||
1115 | } | ||
1116 | |||
1117 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
1118 | buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE); | ||
1119 | |||
1120 | buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0)); | ||
1121 | buffer[count++] = cpu_to_le32(0); | ||
1122 | } | ||
1123 | |||
1124 | static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) | ||
1125 | { | ||
1126 | int r; | ||
1127 | |||
1128 | /* clear state block */ | ||
1129 | if (adev->gfx.rlc.clear_state_obj) { | ||
1130 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | ||
1131 | if (unlikely(r != 0)) | ||
1132 | dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); | ||
1133 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
1134 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1135 | |||
1136 | amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); | ||
1137 | adev->gfx.rlc.clear_state_obj = NULL; | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | ||
1142 | { | ||
1143 | volatile u32 *dst_ptr; | ||
1144 | u32 dws; | ||
1145 | const struct cs_section_def *cs_data; | ||
1146 | int r; | ||
1147 | |||
1148 | adev->gfx.rlc.cs_data = vi_cs_data; | ||
1149 | |||
1150 | cs_data = adev->gfx.rlc.cs_data; | ||
1151 | |||
1152 | if (cs_data) { | ||
1153 | /* clear state block */ | ||
1154 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); | ||
1155 | |||
1156 | if (adev->gfx.rlc.clear_state_obj == NULL) { | ||
1157 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | ||
1158 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1159 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | ||
1160 | NULL, NULL, | ||
1161 | &adev->gfx.rlc.clear_state_obj); | ||
1162 | if (r) { | ||
1163 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
1164 | gfx_v8_0_rlc_fini(adev); | ||
1165 | return r; | ||
1166 | } | ||
1167 | } | ||
1168 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | ||
1169 | if (unlikely(r != 0)) { | ||
1170 | gfx_v8_0_rlc_fini(adev); | ||
1171 | return r; | ||
1172 | } | ||
1173 | r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
1174 | &adev->gfx.rlc.clear_state_gpu_addr); | ||
1175 | if (r) { | ||
1176 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1177 | dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); | ||
1178 | gfx_v8_0_rlc_fini(adev); | ||
1179 | return r; | ||
1180 | } | ||
1181 | |||
1182 | r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); | ||
1183 | if (r) { | ||
1184 | dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); | ||
1185 | gfx_v8_0_rlc_fini(adev); | ||
1186 | return r; | ||
1187 | } | ||
1188 | /* set up the cs buffer */ | ||
1189 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1190 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); | ||
1191 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1192 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1193 | } | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1011 | static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) | 1198 | static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) |
1012 | { | 1199 | { |
1013 | int r; | 1200 | int r; |
@@ -1681,6 +1868,12 @@ static int gfx_v8_0_sw_init(void *handle) | |||
1681 | return r; | 1868 | return r; |
1682 | } | 1869 | } |
1683 | 1870 | ||
1871 | r = gfx_v8_0_rlc_init(adev); | ||
1872 | if (r) { | ||
1873 | DRM_ERROR("Failed to init rlc BOs!\n"); | ||
1874 | return r; | ||
1875 | } | ||
1876 | |||
1684 | r = gfx_v8_0_mec_init(adev); | 1877 | r = gfx_v8_0_mec_init(adev); |
1685 | if (r) { | 1878 | if (r) { |
1686 | DRM_ERROR("Failed to init MEC BOs!\n"); | 1879 | DRM_ERROR("Failed to init MEC BOs!\n"); |
@@ -1780,6 +1973,10 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
1780 | 1973 | ||
1781 | gfx_v8_0_mec_fini(adev); | 1974 | gfx_v8_0_mec_fini(adev); |
1782 | 1975 | ||
1976 | gfx_v8_0_rlc_fini(adev); | ||
1977 | |||
1978 | kfree(adev->gfx.rlc.register_list_format); | ||
1979 | |||
1783 | return 0; | 1980 | return 0; |
1784 | } | 1981 | } |
1785 | 1982 | ||
@@ -3322,6 +3519,154 @@ static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, | |||
3322 | WREG32(mmCP_INT_CNTL_RING0, tmp); | 3519 | WREG32(mmCP_INT_CNTL_RING0, tmp); |
3323 | } | 3520 | } |
3324 | 3521 | ||
3522 | static void gfx_v8_0_init_csb(struct amdgpu_device *adev) | ||
3523 | { | ||
3524 | /* csib */ | ||
3525 | WREG32(mmRLC_CSIB_ADDR_HI, | ||
3526 | adev->gfx.rlc.clear_state_gpu_addr >> 32); | ||
3527 | WREG32(mmRLC_CSIB_ADDR_LO, | ||
3528 | adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); | ||
3529 | WREG32(mmRLC_CSIB_LENGTH, | ||
3530 | adev->gfx.rlc.clear_state_size); | ||
3531 | } | ||
3532 | |||
3533 | static void gfx_v8_0_parse_ind_reg_list(int *register_list_format, | ||
3534 | int ind_offset, | ||
3535 | int list_size, | ||
3536 | int *unique_indices, | ||
3537 | int *indices_count, | ||
3538 | int max_indices, | ||
3539 | int *ind_start_offsets, | ||
3540 | int *offset_count, | ||
3541 | int max_offset) | ||
3542 | { | ||
3543 | int indices; | ||
3544 | bool new_entry = true; | ||
3545 | |||
3546 | for (; ind_offset < list_size; ind_offset++) { | ||
3547 | |||
3548 | if (new_entry) { | ||
3549 | new_entry = false; | ||
3550 | ind_start_offsets[*offset_count] = ind_offset; | ||
3551 | *offset_count = *offset_count + 1; | ||
3552 | BUG_ON(*offset_count >= max_offset); | ||
3553 | } | ||
3554 | |||
3555 | if (register_list_format[ind_offset] == 0xFFFFFFFF) { | ||
3556 | new_entry = true; | ||
3557 | continue; | ||
3558 | } | ||
3559 | |||
3560 | ind_offset += 2; | ||
3561 | |||
3562 | /* look for the matching indice */ | ||
3563 | for (indices = 0; | ||
3564 | indices < *indices_count; | ||
3565 | indices++) { | ||
3566 | if (unique_indices[indices] == | ||
3567 | register_list_format[ind_offset]) | ||
3568 | break; | ||
3569 | } | ||
3570 | |||
3571 | if (indices >= *indices_count) { | ||
3572 | unique_indices[*indices_count] = | ||
3573 | register_list_format[ind_offset]; | ||
3574 | indices = *indices_count; | ||
3575 | *indices_count = *indices_count + 1; | ||
3576 | BUG_ON(*indices_count >= max_indices); | ||
3577 | } | ||
3578 | |||
3579 | register_list_format[ind_offset] = indices; | ||
3580 | } | ||
3581 | } | ||
3582 | |||
3583 | static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev) | ||
3584 | { | ||
3585 | int i, temp, data; | ||
3586 | int unique_indices[] = {0, 0, 0, 0, 0, 0, 0, 0}; | ||
3587 | int indices_count = 0; | ||
3588 | int indirect_start_offsets[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; | ||
3589 | int offset_count = 0; | ||
3590 | |||
3591 | int list_size; | ||
3592 | unsigned int *register_list_format = | ||
3593 | kmalloc(adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL); | ||
3594 | if (register_list_format == NULL) | ||
3595 | return -ENOMEM; | ||
3596 | memcpy(register_list_format, adev->gfx.rlc.register_list_format, | ||
3597 | adev->gfx.rlc.reg_list_format_size_bytes); | ||
3598 | |||
3599 | gfx_v8_0_parse_ind_reg_list(register_list_format, | ||
3600 | RLC_FormatDirectRegListLength, | ||
3601 | adev->gfx.rlc.reg_list_format_size_bytes >> 2, | ||
3602 | unique_indices, | ||
3603 | &indices_count, | ||
3604 | sizeof(unique_indices) / sizeof(int), | ||
3605 | indirect_start_offsets, | ||
3606 | &offset_count, | ||
3607 | sizeof(indirect_start_offsets)/sizeof(int)); | ||
3608 | |||
3609 | /* save and restore list */ | ||
3610 | temp = RREG32(mmRLC_SRM_CNTL); | ||
3611 | temp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK; | ||
3612 | WREG32(mmRLC_SRM_CNTL, temp); | ||
3613 | |||
3614 | WREG32(mmRLC_SRM_ARAM_ADDR, 0); | ||
3615 | for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) | ||
3616 | WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]); | ||
3617 | |||
3618 | /* indirect list */ | ||
3619 | WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start); | ||
3620 | for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) | ||
3621 | WREG32(mmRLC_GPM_SCRATCH_DATA, register_list_format[i]); | ||
3622 | |||
3623 | list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; | ||
3624 | list_size = list_size >> 1; | ||
3625 | WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size); | ||
3626 | WREG32(mmRLC_GPM_SCRATCH_DATA, list_size); | ||
3627 | |||
3628 | /* starting offsets starts */ | ||
3629 | WREG32(mmRLC_GPM_SCRATCH_ADDR, | ||
3630 | adev->gfx.rlc.starting_offsets_start); | ||
3631 | for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++) | ||
3632 | WREG32(mmRLC_GPM_SCRATCH_DATA, | ||
3633 | indirect_start_offsets[i]); | ||
3634 | |||
3635 | /* unique indices */ | ||
3636 | temp = mmRLC_SRM_INDEX_CNTL_ADDR_0; | ||
3637 | data = mmRLC_SRM_INDEX_CNTL_DATA_0; | ||
3638 | for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) { | ||
3639 | amdgpu_mm_wreg(adev, temp + i, unique_indices[i] & 0x3FFFF, false); | ||
3640 | amdgpu_mm_wreg(adev, data + i, unique_indices[i] >> 20, false); | ||
3641 | } | ||
3642 | kfree(register_list_format); | ||
3643 | |||
3644 | return 0; | ||
3645 | } | ||
3646 | |||
3647 | static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) | ||
3648 | { | ||
3649 | uint32_t data; | ||
3650 | |||
3651 | data = RREG32(mmRLC_SRM_CNTL); | ||
3652 | data |= RLC_SRM_CNTL__SRM_ENABLE_MASK; | ||
3653 | WREG32(mmRLC_SRM_CNTL, data); | ||
3654 | } | ||
3655 | |||
3656 | static void gfx_v8_0_init_pg(struct amdgpu_device *adev) | ||
3657 | { | ||
3658 | if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | | ||
3659 | AMD_PG_SUPPORT_GFX_SMG | | ||
3660 | AMD_PG_SUPPORT_GFX_DMG | | ||
3661 | AMD_PG_SUPPORT_CP | | ||
3662 | AMD_PG_SUPPORT_GDS | | ||
3663 | AMD_PG_SUPPORT_RLC_SMU_HS)) { | ||
3664 | gfx_v8_0_init_csb(adev); | ||
3665 | gfx_v8_0_init_save_restore_list(adev); | ||
3666 | gfx_v8_0_enable_save_restore_machine(adev); | ||
3667 | } | ||
3668 | } | ||
3669 | |||
3325 | void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) | 3670 | void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) |
3326 | { | 3671 | { |
3327 | u32 tmp = RREG32(mmRLC_CNTL); | 3672 | u32 tmp = RREG32(mmRLC_CNTL); |
@@ -3401,6 +3746,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) | |||
3401 | 3746 | ||
3402 | gfx_v8_0_rlc_reset(adev); | 3747 | gfx_v8_0_rlc_reset(adev); |
3403 | 3748 | ||
3749 | gfx_v8_0_init_pg(adev); | ||
3750 | |||
3404 | if (!adev->pp_enabled) { | 3751 | if (!adev->pp_enabled) { |
3405 | if (!adev->firmware.smu_load) { | 3752 | if (!adev->firmware.smu_load) { |
3406 | /* legacy rlc firmware loading */ | 3753 | /* legacy rlc firmware loading */ |