diff options
author | Likun Gao <Likun.Gao@amd.com> | 2018-11-08 07:19:54 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2018-11-09 16:29:12 -0500 |
commit | 106c7d6148e5aadd394e6701f7e498df49b869d1 (patch) | |
tree | 4ad91ee0536a5949138549cbac51b7f45dc6208b /drivers/gpu/drm/amd/amdgpu | |
parent | 88dfc9a3dd47027c9ffc831635e5cf4e8ed3b781 (diff) |
drm/amdgpu: abstract the function of enter/exit safe mode for RLC
Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of
rlc_init to improve the reusability of RLC.
Signed-off-by: Likun Gao <Likun.Gao@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | 229 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 148 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 201 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 183 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 6 |
8 files changed, 384 insertions, 446 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c index c5459ab6a31f..c8793e6cc3c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /* | 1 | /* |
3 | * Copyright 2014 Advanced Micro Devices, Inc. | 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
4 | * Copyright 2008 Red Hat Inc. | 3 | * Copyright 2008 Red Hat Inc. |
@@ -23,12 +22,238 @@ | |||
23 | * OTHER DEALINGS IN THE SOFTWARE. | 22 | * OTHER DEALINGS IN THE SOFTWARE. |
24 | * | 23 | * |
25 | */ | 24 | */ |
26 | 25 | #include <linux/firmware.h> | |
27 | #include "amdgpu.h" | 26 | #include "amdgpu.h" |
28 | #include "amdgpu_gfx.h" | 27 | #include "amdgpu_gfx.h" |
29 | #include "amdgpu_rlc.h" | 28 | #include "amdgpu_rlc.h" |
30 | 29 | ||
31 | /** | 30 | /** |
31 | * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode | ||
32 | * | ||
33 | * @adev: amdgpu_device pointer | ||
34 | * | ||
35 | * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode. | ||
36 | */ | ||
37 | void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev) | ||
38 | { | ||
39 | if (adev->gfx.rlc.in_safe_mode) | ||
40 | return; | ||
41 | |||
42 | /* if RLC is not enabled, do nothing */ | ||
43 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) | ||
44 | return; | ||
45 | |||
46 | if (adev->cg_flags & | ||
47 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
48 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
49 | adev->gfx.rlc.funcs->set_safe_mode(adev); | ||
50 | adev->gfx.rlc.in_safe_mode = true; | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode | ||
56 | * | ||
57 | * @adev: amdgpu_device pointer | ||
58 | * | ||
59 | * Set RLC exit safe mode if RLC is enabled and have entered into safe mode. | ||
60 | */ | ||
61 | void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev) | ||
62 | { | ||
63 | if (!(adev->gfx.rlc.in_safe_mode)) | ||
64 | return; | ||
65 | |||
66 | /* if RLC is not enabled, do nothing */ | ||
67 | if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev)) | ||
68 | return; | ||
69 | |||
70 | if (adev->cg_flags & | ||
71 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
72 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
73 | adev->gfx.rlc.funcs->unset_safe_mode(adev); | ||
74 | adev->gfx.rlc.in_safe_mode = false; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * amdgpu_gfx_rlc_init_sr - Init save restore block | ||
80 | * | ||
81 | * @adev: amdgpu_device pointer | ||
82 | * @dws: the size of save restore block | ||
83 | * | ||
84 | * Allocate and setup value to save restore block of rlc. | ||
85 | * Returns 0 on succeess or negative error code if allocate failed. | ||
86 | */ | ||
87 | int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) | ||
88 | { | ||
89 | const u32 *src_ptr; | ||
90 | volatile u32 *dst_ptr; | ||
91 | u32 i; | ||
92 | int r; | ||
93 | |||
94 | /* allocate save restore block */ | ||
95 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
96 | AMDGPU_GEM_DOMAIN_VRAM, | ||
97 | &adev->gfx.rlc.save_restore_obj, | ||
98 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
99 | (void **)&adev->gfx.rlc.sr_ptr); | ||
100 | if (r) { | ||
101 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); | ||
102 | amdgpu_gfx_rlc_fini(adev); | ||
103 | return r; | ||
104 | } | ||
105 | |||
106 | /* write the sr buffer */ | ||
107 | src_ptr = adev->gfx.rlc.reg_list; | ||
108 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
109 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
110 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
111 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
112 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * amdgpu_gfx_rlc_init_csb - Init clear state block | ||
119 | * | ||
120 | * @adev: amdgpu_device pointer | ||
121 | * | ||
122 | * Allocate and setup value to clear state block of rlc. | ||
123 | * Returns 0 on succeess or negative error code if allocate failed. | ||
124 | */ | ||
125 | int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev) | ||
126 | { | ||
127 | volatile u32 *dst_ptr; | ||
128 | u32 dws; | ||
129 | int r; | ||
130 | |||
131 | /* allocate clear state block */ | ||
132 | adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev); | ||
133 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
134 | AMDGPU_GEM_DOMAIN_VRAM, | ||
135 | &adev->gfx.rlc.clear_state_obj, | ||
136 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
137 | (void **)&adev->gfx.rlc.cs_ptr); | ||
138 | if (r) { | ||
139 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r); | ||
140 | amdgpu_gfx_rlc_fini(adev); | ||
141 | return r; | ||
142 | } | ||
143 | |||
144 | /* set up the cs buffer */ | ||
145 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
146 | adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr); | ||
147 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
148 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
149 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * amdgpu_gfx_rlc_init_cpt - Init cp table | ||
156 | * | ||
157 | * @adev: amdgpu_device pointer | ||
158 | * | ||
159 | * Allocate and setup value to cp table of rlc. | ||
160 | * Returns 0 on succeess or negative error code if allocate failed. | ||
161 | */ | ||
162 | int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev) | ||
163 | { | ||
164 | int r; | ||
165 | |||
166 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | ||
167 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | ||
168 | &adev->gfx.rlc.cp_table_obj, | ||
169 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
170 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
171 | if (r) { | ||
172 | dev_err(adev->dev, "(%d) failed to create cp table bo\n", r); | ||
173 | amdgpu_gfx_rlc_fini(adev); | ||
174 | return r; | ||
175 | } | ||
176 | |||
177 | /* set up the cp table */ | ||
178 | amdgpu_gfx_rlc_setup_cp_table(adev); | ||
179 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
180 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table | ||
187 | * | ||
188 | * @adev: amdgpu_device pointer | ||
189 | * | ||
190 | * Write cp firmware data into cp table. | ||
191 | */ | ||
192 | void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) | ||
193 | { | ||
194 | const __le32 *fw_data; | ||
195 | volatile u32 *dst_ptr; | ||
196 | int me, i, max_me; | ||
197 | u32 bo_offset = 0; | ||
198 | u32 table_offset, table_size; | ||
199 | |||
200 | max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev); | ||
201 | |||
202 | /* write the cp table buffer */ | ||
203 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
204 | for (me = 0; me < max_me; me++) { | ||
205 | if (me == 0) { | ||
206 | const struct gfx_firmware_header_v1_0 *hdr = | ||
207 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
208 | fw_data = (const __le32 *) | ||
209 | (adev->gfx.ce_fw->data + | ||
210 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
211 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
212 | table_size = le32_to_cpu(hdr->jt_size); | ||
213 | } else if (me == 1) { | ||
214 | const struct gfx_firmware_header_v1_0 *hdr = | ||
215 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
216 | fw_data = (const __le32 *) | ||
217 | (adev->gfx.pfp_fw->data + | ||
218 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
219 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
220 | table_size = le32_to_cpu(hdr->jt_size); | ||
221 | } else if (me == 2) { | ||
222 | const struct gfx_firmware_header_v1_0 *hdr = | ||
223 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
224 | fw_data = (const __le32 *) | ||
225 | (adev->gfx.me_fw->data + | ||
226 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
227 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
228 | table_size = le32_to_cpu(hdr->jt_size); | ||
229 | } else if (me == 3) { | ||
230 | const struct gfx_firmware_header_v1_0 *hdr = | ||
231 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
232 | fw_data = (const __le32 *) | ||
233 | (adev->gfx.mec_fw->data + | ||
234 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
235 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
236 | table_size = le32_to_cpu(hdr->jt_size); | ||
237 | } else if (me == 4) { | ||
238 | const struct gfx_firmware_header_v1_0 *hdr = | ||
239 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
240 | fw_data = (const __le32 *) | ||
241 | (adev->gfx.mec2_fw->data + | ||
242 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
243 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
244 | table_size = le32_to_cpu(hdr->jt_size); | ||
245 | } | ||
246 | |||
247 | for (i = 0; i < table_size; i ++) { | ||
248 | dst_ptr[bo_offset + i] = | ||
249 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
250 | } | ||
251 | |||
252 | bo_offset += table_size; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /** | ||
32 | * amdgpu_gfx_rlc_fini - Free BO which used for RLC | 257 | * amdgpu_gfx_rlc_fini - Free BO which used for RLC |
33 | * | 258 | * |
34 | * @adev: amdgpu_device pointer | 259 | * @adev: amdgpu_device pointer |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index b3b092022fc4..49a8ab52113b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | |||
@@ -1,4 +1,3 @@ | |||
1 | |||
2 | /* | 1 | /* |
3 | * Copyright 2014 Advanced Micro Devices, Inc. | 2 | * Copyright 2014 Advanced Micro Devices, Inc. |
4 | * | 3 | * |
@@ -28,9 +27,13 @@ | |||
28 | #include "clearstate_defs.h" | 27 | #include "clearstate_defs.h" |
29 | 28 | ||
30 | struct amdgpu_rlc_funcs { | 29 | struct amdgpu_rlc_funcs { |
31 | void (*enter_safe_mode)(struct amdgpu_device *adev); | 30 | bool (*is_rlc_enabled)(struct amdgpu_device *adev); |
32 | void (*exit_safe_mode)(struct amdgpu_device *adev); | 31 | void (*set_safe_mode)(struct amdgpu_device *adev); |
32 | void (*unset_safe_mode)(struct amdgpu_device *adev); | ||
33 | int (*init)(struct amdgpu_device *adev); | 33 | int (*init)(struct amdgpu_device *adev); |
34 | u32 (*get_csb_size)(struct amdgpu_device *adev); | ||
35 | void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); | ||
36 | int (*get_cp_table_num)(struct amdgpu_device *adev); | ||
34 | int (*resume)(struct amdgpu_device *adev); | 37 | int (*resume)(struct amdgpu_device *adev); |
35 | void (*stop)(struct amdgpu_device *adev); | 38 | void (*stop)(struct amdgpu_device *adev); |
36 | void (*reset)(struct amdgpu_device *adev); | 39 | void (*reset)(struct amdgpu_device *adev); |
@@ -39,21 +42,21 @@ struct amdgpu_rlc_funcs { | |||
39 | 42 | ||
40 | struct amdgpu_rlc { | 43 | struct amdgpu_rlc { |
41 | /* for power gating */ | 44 | /* for power gating */ |
42 | struct amdgpu_bo *save_restore_obj; | 45 | struct amdgpu_bo *save_restore_obj; |
43 | uint64_t save_restore_gpu_addr; | 46 | uint64_t save_restore_gpu_addr; |
44 | volatile uint32_t *sr_ptr; | 47 | volatile uint32_t *sr_ptr; |
45 | const u32 *reg_list; | 48 | const u32 *reg_list; |
46 | u32 reg_list_size; | 49 | u32 reg_list_size; |
47 | /* for clear state */ | 50 | /* for clear state */ |
48 | struct amdgpu_bo *clear_state_obj; | 51 | struct amdgpu_bo *clear_state_obj; |
49 | uint64_t clear_state_gpu_addr; | 52 | uint64_t clear_state_gpu_addr; |
50 | volatile uint32_t *cs_ptr; | 53 | volatile uint32_t *cs_ptr; |
51 | const struct cs_section_def *cs_data; | 54 | const struct cs_section_def *cs_data; |
52 | u32 clear_state_size; | 55 | u32 clear_state_size; |
53 | /* for cp tables */ | 56 | /* for cp tables */ |
54 | struct amdgpu_bo *cp_table_obj; | 57 | struct amdgpu_bo *cp_table_obj; |
55 | uint64_t cp_table_gpu_addr; | 58 | uint64_t cp_table_gpu_addr; |
56 | volatile uint32_t *cp_table_ptr; | 59 | volatile uint32_t *cp_table_ptr; |
57 | u32 cp_table_size; | 60 | u32 cp_table_size; |
58 | 61 | ||
59 | /* safe mode for updating CG/PG state */ | 62 | /* safe mode for updating CG/PG state */ |
@@ -84,6 +87,12 @@ struct amdgpu_rlc { | |||
84 | bool is_rlc_v2_1; | 87 | bool is_rlc_v2_1; |
85 | }; | 88 | }; |
86 | 89 | ||
90 | void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev); | ||
91 | void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev); | ||
92 | int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws); | ||
93 | int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev); | ||
94 | int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev); | ||
95 | void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev); | ||
87 | void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); | 96 | void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); |
88 | 97 | ||
89 | #endif | 98 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 79220a91abe3..86e14c754dd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) | |||
743 | 743 | ||
744 | if (pi->caps_sq_ramping || pi->caps_db_ramping || | 744 | if (pi->caps_sq_ramping || pi->caps_db_ramping || |
745 | pi->caps_td_ramping || pi->caps_tcp_ramping) { | 745 | pi->caps_td_ramping || pi->caps_tcp_ramping) { |
746 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 746 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
747 | 747 | ||
748 | if (enable) { | 748 | if (enable) { |
749 | ret = ci_program_pt_config_registers(adev, didt_config_ci); | 749 | ret = ci_program_pt_config_registers(adev, didt_config_ci); |
750 | if (ret) { | 750 | if (ret) { |
751 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 751 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
752 | return ret; | 752 | return ret; |
753 | } | 753 | } |
754 | } | 754 | } |
755 | 755 | ||
756 | ci_do_enable_didt(adev, enable); | 756 | ci_do_enable_didt(adev, enable); |
757 | 757 | ||
758 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 758 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
759 | } | 759 | } |
760 | 760 | ||
761 | return 0; | 761 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 192d98490188..1dc3013ea1d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -2355,7 +2355,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2355 | { | 2355 | { |
2356 | const u32 *src_ptr; | 2356 | const u32 *src_ptr; |
2357 | volatile u32 *dst_ptr; | 2357 | volatile u32 *dst_ptr; |
2358 | u32 dws, i; | 2358 | u32 dws; |
2359 | u64 reg_list_mc_addr; | 2359 | u64 reg_list_mc_addr; |
2360 | const struct cs_section_def *cs_data; | 2360 | const struct cs_section_def *cs_data; |
2361 | int r; | 2361 | int r; |
@@ -2370,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2370 | cs_data = adev->gfx.rlc.cs_data; | 2370 | cs_data = adev->gfx.rlc.cs_data; |
2371 | 2371 | ||
2372 | if (src_ptr) { | 2372 | if (src_ptr) { |
2373 | /* save restore block */ | 2373 | /* init save restore block */ |
2374 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 2374 | r = amdgpu_gfx_rlc_init_sr(adev, dws); |
2375 | AMDGPU_GEM_DOMAIN_VRAM, | 2375 | if (r) |
2376 | &adev->gfx.rlc.save_restore_obj, | ||
2377 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
2378 | (void **)&adev->gfx.rlc.sr_ptr); | ||
2379 | if (r) { | ||
2380 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", | ||
2381 | r); | ||
2382 | amdgpu_gfx_rlc_fini(adev); | ||
2383 | return r; | 2376 | return r; |
2384 | } | ||
2385 | |||
2386 | /* write the sr buffer */ | ||
2387 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
2388 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
2389 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
2390 | |||
2391 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
2392 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
2393 | } | 2377 | } |
2394 | 2378 | ||
2395 | if (cs_data) { | 2379 | if (cs_data) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 8097534aa6c9..f467b9bd090d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] = | |||
882 | 882 | ||
883 | static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); | 883 | static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); |
884 | static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); | 884 | static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); |
885 | static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); | ||
886 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev); | 885 | static void gfx_v7_0_init_pg(struct amdgpu_device *adev); |
887 | static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); | 886 | static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); |
888 | 887 | ||
@@ -3255,8 +3254,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, | |||
3255 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | 3254 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) |
3256 | { | 3255 | { |
3257 | const u32 *src_ptr; | 3256 | const u32 *src_ptr; |
3258 | volatile u32 *dst_ptr; | 3257 | u32 dws; |
3259 | u32 dws, i; | ||
3260 | const struct cs_section_def *cs_data; | 3258 | const struct cs_section_def *cs_data; |
3261 | int r; | 3259 | int r; |
3262 | 3260 | ||
@@ -3283,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3283 | cs_data = adev->gfx.rlc.cs_data; | 3281 | cs_data = adev->gfx.rlc.cs_data; |
3284 | 3282 | ||
3285 | if (src_ptr) { | 3283 | if (src_ptr) { |
3286 | /* save restore block */ | 3284 | /* init save restore block */ |
3287 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 3285 | r = amdgpu_gfx_rlc_init_sr(adev, dws); |
3288 | AMDGPU_GEM_DOMAIN_VRAM, | 3286 | if (r) |
3289 | &adev->gfx.rlc.save_restore_obj, | ||
3290 | &adev->gfx.rlc.save_restore_gpu_addr, | ||
3291 | (void **)&adev->gfx.rlc.sr_ptr); | ||
3292 | if (r) { | ||
3293 | dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); | ||
3294 | amdgpu_gfx_rlc_fini(adev); | ||
3295 | return r; | 3287 | return r; |
3296 | } | ||
3297 | |||
3298 | /* write the sr buffer */ | ||
3299 | dst_ptr = adev->gfx.rlc.sr_ptr; | ||
3300 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | ||
3301 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | ||
3302 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | ||
3303 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
3304 | } | 3288 | } |
3305 | 3289 | ||
3306 | if (cs_data) { | 3290 | if (cs_data) { |
3307 | /* clear state block */ | 3291 | /* init clear state block */ |
3308 | adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); | 3292 | r = amdgpu_gfx_rlc_init_csb(adev); |
3309 | 3293 | if (r) | |
3310 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
3311 | AMDGPU_GEM_DOMAIN_VRAM, | ||
3312 | &adev->gfx.rlc.clear_state_obj, | ||
3313 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
3314 | (void **)&adev->gfx.rlc.cs_ptr); | ||
3315 | if (r) { | ||
3316 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
3317 | amdgpu_gfx_rlc_fini(adev); | ||
3318 | return r; | 3294 | return r; |
3319 | } | ||
3320 | |||
3321 | /* set up the cs buffer */ | ||
3322 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
3323 | gfx_v7_0_get_csb_buffer(adev, dst_ptr); | ||
3324 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
3325 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
3326 | } | 3295 | } |
3327 | 3296 | ||
3328 | if (adev->gfx.rlc.cp_table_size) { | 3297 | if (adev->gfx.rlc.cp_table_size) { |
3329 | 3298 | r = amdgpu_gfx_rlc_init_cpt(adev); | |
3330 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 3299 | if (r) |
3331 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | ||
3332 | &adev->gfx.rlc.cp_table_obj, | ||
3333 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
3334 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
3335 | if (r) { | ||
3336 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
3337 | amdgpu_gfx_rlc_fini(adev); | ||
3338 | return r; | 3300 | return r; |
3339 | } | ||
3340 | |||
3341 | gfx_v7_0_init_cp_pg_table(adev); | ||
3342 | |||
3343 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
3344 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
3345 | |||
3346 | } | 3301 | } |
3347 | 3302 | ||
3348 | return 0; | 3303 | return 0; |
@@ -3423,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) | |||
3423 | return orig; | 3378 | return orig; |
3424 | } | 3379 | } |
3425 | 3380 | ||
3426 | static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | 3381 | static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev) |
3382 | { | ||
3383 | return true; | ||
3384 | } | ||
3385 | |||
3386 | static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev) | ||
3427 | { | 3387 | { |
3428 | u32 tmp, i, mask; | 3388 | u32 tmp, i, mask; |
3429 | 3389 | ||
@@ -3445,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | |||
3445 | } | 3405 | } |
3446 | } | 3406 | } |
3447 | 3407 | ||
3448 | static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) | 3408 | static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev) |
3449 | { | 3409 | { |
3450 | u32 tmp; | 3410 | u32 tmp; |
3451 | 3411 | ||
@@ -3761,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) | |||
3761 | WREG32(mmRLC_PG_CNTL, data); | 3721 | WREG32(mmRLC_PG_CNTL, data); |
3762 | } | 3722 | } |
3763 | 3723 | ||
3764 | static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) | 3724 | static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev) |
3765 | { | 3725 | { |
3766 | const __le32 *fw_data; | ||
3767 | volatile u32 *dst_ptr; | ||
3768 | int me, i, max_me = 4; | ||
3769 | u32 bo_offset = 0; | ||
3770 | u32 table_offset, table_size; | ||
3771 | |||
3772 | if (adev->asic_type == CHIP_KAVERI) | 3726 | if (adev->asic_type == CHIP_KAVERI) |
3773 | max_me = 5; | 3727 | return 5; |
3774 | 3728 | else | |
3775 | if (adev->gfx.rlc.cp_table_ptr == NULL) | 3729 | return 4; |
3776 | return; | ||
3777 | |||
3778 | /* write the cp table buffer */ | ||
3779 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
3780 | for (me = 0; me < max_me; me++) { | ||
3781 | if (me == 0) { | ||
3782 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3783 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
3784 | fw_data = (const __le32 *) | ||
3785 | (adev->gfx.ce_fw->data + | ||
3786 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3787 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3788 | table_size = le32_to_cpu(hdr->jt_size); | ||
3789 | } else if (me == 1) { | ||
3790 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3791 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
3792 | fw_data = (const __le32 *) | ||
3793 | (adev->gfx.pfp_fw->data + | ||
3794 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3795 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3796 | table_size = le32_to_cpu(hdr->jt_size); | ||
3797 | } else if (me == 2) { | ||
3798 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3799 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
3800 | fw_data = (const __le32 *) | ||
3801 | (adev->gfx.me_fw->data + | ||
3802 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3803 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3804 | table_size = le32_to_cpu(hdr->jt_size); | ||
3805 | } else if (me == 3) { | ||
3806 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3807 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
3808 | fw_data = (const __le32 *) | ||
3809 | (adev->gfx.mec_fw->data + | ||
3810 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3811 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3812 | table_size = le32_to_cpu(hdr->jt_size); | ||
3813 | } else { | ||
3814 | const struct gfx_firmware_header_v1_0 *hdr = | ||
3815 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
3816 | fw_data = (const __le32 *) | ||
3817 | (adev->gfx.mec2_fw->data + | ||
3818 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
3819 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
3820 | table_size = le32_to_cpu(hdr->jt_size); | ||
3821 | } | ||
3822 | |||
3823 | for (i = 0; i < table_size; i ++) { | ||
3824 | dst_ptr[bo_offset + i] = | ||
3825 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
3826 | } | ||
3827 | |||
3828 | bo_offset += table_size; | ||
3829 | } | ||
3830 | } | 3730 | } |
3831 | 3731 | ||
3832 | static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, | 3732 | static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, |
@@ -4265,9 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { | |||
4265 | }; | 4165 | }; |
4266 | 4166 | ||
4267 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { | 4167 | static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { |
4268 | .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, | 4168 | .is_rlc_enabled = gfx_v7_0_is_rlc_enabled, |
4269 | .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, | 4169 | .set_safe_mode = gfx_v7_0_set_safe_mode, |
4170 | .unset_safe_mode = gfx_v7_0_unset_safe_mode, | ||
4270 | .init = gfx_v7_0_rlc_init, | 4171 | .init = gfx_v7_0_rlc_init, |
4172 | .get_csb_size = gfx_v7_0_get_csb_size, | ||
4173 | .get_csb_buffer = gfx_v7_0_get_csb_buffer, | ||
4174 | .get_cp_table_num = gfx_v7_0_cp_pg_table_num, | ||
4271 | .resume = gfx_v7_0_rlc_resume, | 4175 | .resume = gfx_v7_0_rlc_resume, |
4272 | .stop = gfx_v7_0_rlc_stop, | 4176 | .stop = gfx_v7_0_rlc_stop, |
4273 | .reset = gfx_v7_0_rlc_reset, | 4177 | .reset = gfx_v7_0_rlc_reset, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 81a308bac230..cb066a8dccd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1283,75 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, | |||
1283 | buffer[count++] = cpu_to_le32(0); | 1283 | buffer[count++] = cpu_to_le32(0); |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | static void cz_init_cp_jump_table(struct amdgpu_device *adev) | 1286 | static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev) |
1287 | { | 1287 | { |
1288 | const __le32 *fw_data; | ||
1289 | volatile u32 *dst_ptr; | ||
1290 | int me, i, max_me = 4; | ||
1291 | u32 bo_offset = 0; | ||
1292 | u32 table_offset, table_size; | ||
1293 | |||
1294 | if (adev->asic_type == CHIP_CARRIZO) | 1288 | if (adev->asic_type == CHIP_CARRIZO) |
1295 | max_me = 5; | 1289 | return 5; |
1296 | 1290 | else | |
1297 | /* write the cp table buffer */ | 1291 | return 4; |
1298 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
1299 | for (me = 0; me < max_me; me++) { | ||
1300 | if (me == 0) { | ||
1301 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1302 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
1303 | fw_data = (const __le32 *) | ||
1304 | (adev->gfx.ce_fw->data + | ||
1305 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1306 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1307 | table_size = le32_to_cpu(hdr->jt_size); | ||
1308 | } else if (me == 1) { | ||
1309 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1310 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
1311 | fw_data = (const __le32 *) | ||
1312 | (adev->gfx.pfp_fw->data + | ||
1313 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1314 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1315 | table_size = le32_to_cpu(hdr->jt_size); | ||
1316 | } else if (me == 2) { | ||
1317 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1318 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
1319 | fw_data = (const __le32 *) | ||
1320 | (adev->gfx.me_fw->data + | ||
1321 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1322 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1323 | table_size = le32_to_cpu(hdr->jt_size); | ||
1324 | } else if (me == 3) { | ||
1325 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1326 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1327 | fw_data = (const __le32 *) | ||
1328 | (adev->gfx.mec_fw->data + | ||
1329 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1330 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1331 | table_size = le32_to_cpu(hdr->jt_size); | ||
1332 | } else if (me == 4) { | ||
1333 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1334 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
1335 | fw_data = (const __le32 *) | ||
1336 | (adev->gfx.mec2_fw->data + | ||
1337 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1338 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1339 | table_size = le32_to_cpu(hdr->jt_size); | ||
1340 | } | ||
1341 | |||
1342 | for (i = 0; i < table_size; i ++) { | ||
1343 | dst_ptr[bo_offset + i] = | ||
1344 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
1345 | } | ||
1346 | |||
1347 | bo_offset += table_size; | ||
1348 | } | ||
1349 | } | 1292 | } |
1350 | 1293 | ||
1351 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | 1294 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) |
1352 | { | 1295 | { |
1353 | volatile u32 *dst_ptr; | ||
1354 | u32 dws; | ||
1355 | const struct cs_section_def *cs_data; | 1296 | const struct cs_section_def *cs_data; |
1356 | int r; | 1297 | int r; |
1357 | 1298 | ||
@@ -1360,44 +1301,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1360 | cs_data = adev->gfx.rlc.cs_data; | 1301 | cs_data = adev->gfx.rlc.cs_data; |
1361 | 1302 | ||
1362 | if (cs_data) { | 1303 | if (cs_data) { |
1363 | /* clear state block */ | 1304 | /* init clear state block */ |
1364 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); | 1305 | r = amdgpu_gfx_rlc_init_csb(adev); |
1365 | 1306 | if (r) | |
1366 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | ||
1367 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1368 | &adev->gfx.rlc.clear_state_obj, | ||
1369 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1370 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1371 | if (r) { | ||
1372 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
1373 | amdgpu_gfx_rlc_fini(adev); | ||
1374 | return r; | 1307 | return r; |
1375 | } | ||
1376 | |||
1377 | /* set up the cs buffer */ | ||
1378 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1379 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); | ||
1380 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1381 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1382 | } | 1308 | } |
1383 | 1309 | ||
1384 | if ((adev->asic_type == CHIP_CARRIZO) || | 1310 | if ((adev->asic_type == CHIP_CARRIZO) || |
1385 | (adev->asic_type == CHIP_STONEY)) { | 1311 | (adev->asic_type == CHIP_STONEY)) { |
1386 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1312 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1387 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 1313 | r = amdgpu_gfx_rlc_init_cpt(adev); |
1388 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 1314 | if (r) |
1389 | &adev->gfx.rlc.cp_table_obj, | ||
1390 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1391 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1392 | if (r) { | ||
1393 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
1394 | return r; | 1315 | return r; |
1395 | } | ||
1396 | |||
1397 | cz_init_cp_jump_table(adev); | ||
1398 | |||
1399 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
1400 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1401 | } | 1316 | } |
1402 | 1317 | ||
1403 | return 0; | 1318 | return 0; |
@@ -4945,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
4945 | pr_debug("For SRIOV client, shouldn't do anything.\n"); | 4860 | pr_debug("For SRIOV client, shouldn't do anything.\n"); |
4946 | return 0; | 4861 | return 0; |
4947 | } | 4862 | } |
4948 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 4863 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
4949 | if (!gfx_v8_0_wait_for_idle(adev)) | 4864 | if (!gfx_v8_0_wait_for_idle(adev)) |
4950 | gfx_v8_0_cp_enable(adev, false); | 4865 | gfx_v8_0_cp_enable(adev, false); |
4951 | else | 4866 | else |
@@ -4954,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle) | |||
4954 | adev->gfx.rlc.funcs->stop(adev); | 4869 | adev->gfx.rlc.funcs->stop(adev); |
4955 | else | 4870 | else |
4956 | pr_err("rlc is busy, skip halt rlc\n"); | 4871 | pr_err("rlc is busy, skip halt rlc\n"); |
4957 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 4872 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
4958 | return 0; | 4873 | return 0; |
4959 | } | 4874 | } |
4960 | 4875 | ||
@@ -5417,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5417 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5332 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5418 | AMD_PG_SUPPORT_CP | | 5333 | AMD_PG_SUPPORT_CP | |
5419 | AMD_PG_SUPPORT_GFX_DMG)) | 5334 | AMD_PG_SUPPORT_GFX_DMG)) |
5420 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5335 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5421 | switch (adev->asic_type) { | 5336 | switch (adev->asic_type) { |
5422 | case CHIP_CARRIZO: | 5337 | case CHIP_CARRIZO: |
5423 | case CHIP_STONEY: | 5338 | case CHIP_STONEY: |
@@ -5471,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, | |||
5471 | AMD_PG_SUPPORT_RLC_SMU_HS | | 5386 | AMD_PG_SUPPORT_RLC_SMU_HS | |
5472 | AMD_PG_SUPPORT_CP | | 5387 | AMD_PG_SUPPORT_CP | |
5473 | AMD_PG_SUPPORT_GFX_DMG)) | 5388 | AMD_PG_SUPPORT_GFX_DMG)) |
5474 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5389 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5475 | return 0; | 5390 | return 0; |
5476 | } | 5391 | } |
5477 | 5392 | ||
@@ -5565,57 +5480,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, | |||
5565 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 | 5480 | #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 |
5566 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e | 5481 | #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e |
5567 | 5482 | ||
5568 | static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) | 5483 | static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev) |
5569 | { | 5484 | { |
5570 | u32 data; | 5485 | uint32_t rlc_setting; |
5571 | unsigned i; | ||
5572 | 5486 | ||
5573 | data = RREG32(mmRLC_CNTL); | 5487 | rlc_setting = RREG32(mmRLC_CNTL); |
5574 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5488 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
5575 | return; | 5489 | return false; |
5576 | 5490 | ||
5577 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | 5491 | return true; |
5578 | data |= RLC_SAFE_MODE__CMD_MASK; | 5492 | } |
5579 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5580 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | ||
5581 | WREG32(mmRLC_SAFE_MODE, data); | ||
5582 | 5493 | ||
5583 | for (i = 0; i < adev->usec_timeout; i++) { | 5494 | static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev) |
5584 | if ((RREG32(mmRLC_GPM_STAT) & | 5495 | { |
5585 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5496 | uint32_t data; |
5586 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == | 5497 | unsigned i; |
5587 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | | 5498 | data = RREG32(mmRLC_CNTL); |
5588 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | 5499 | data |= RLC_SAFE_MODE__CMD_MASK; |
5589 | break; | 5500 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5590 | udelay(1); | 5501 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
5591 | } | 5502 | WREG32(mmRLC_SAFE_MODE, data); |
5592 | 5503 | ||
5593 | for (i = 0; i < adev->usec_timeout; i++) { | 5504 | /* wait for RLC_SAFE_MODE */ |
5594 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5505 | for (i = 0; i < adev->usec_timeout; i++) { |
5595 | break; | 5506 | if ((RREG32(mmRLC_GPM_STAT) & |
5596 | udelay(1); | 5507 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5597 | } | 5508 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == |
5598 | adev->gfx.rlc.in_safe_mode = true; | 5509 | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | |
5510 | RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) | ||
5511 | break; | ||
5512 | udelay(1); | ||
5513 | } | ||
5514 | for (i = 0; i < adev->usec_timeout; i++) { | ||
5515 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | ||
5516 | break; | ||
5517 | udelay(1); | ||
5599 | } | 5518 | } |
5600 | } | 5519 | } |
5601 | 5520 | ||
5602 | static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | 5521 | static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev) |
5603 | { | 5522 | { |
5604 | u32 data = 0; | 5523 | uint32_t data; |
5605 | unsigned i; | 5524 | unsigned i; |
5606 | 5525 | ||
5607 | data = RREG32(mmRLC_CNTL); | 5526 | data = RREG32(mmRLC_CNTL); |
5608 | if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 5527 | data |= RLC_SAFE_MODE__CMD_MASK; |
5609 | return; | 5528 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; |
5610 | 5529 | WREG32(mmRLC_SAFE_MODE, data); | |
5611 | if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | ||
5612 | if (adev->gfx.rlc.in_safe_mode) { | ||
5613 | data |= RLC_SAFE_MODE__CMD_MASK; | ||
5614 | data &= ~RLC_SAFE_MODE__MESSAGE_MASK; | ||
5615 | WREG32(mmRLC_SAFE_MODE, data); | ||
5616 | adev->gfx.rlc.in_safe_mode = false; | ||
5617 | } | ||
5618 | } | ||
5619 | 5530 | ||
5620 | for (i = 0; i < adev->usec_timeout; i++) { | 5531 | for (i = 0; i < adev->usec_timeout; i++) { |
5621 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | 5532 | if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
@@ -5625,9 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) | |||
5625 | } | 5536 | } |
5626 | 5537 | ||
5627 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { | 5538 | static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { |
5628 | .enter_safe_mode = iceland_enter_rlc_safe_mode, | 5539 | .is_rlc_enabled = gfx_v8_0_is_rlc_enabled, |
5629 | .exit_safe_mode = iceland_exit_rlc_safe_mode, | 5540 | .set_safe_mode = gfx_v8_0_set_safe_mode, |
5541 | .unset_safe_mode = gfx_v8_0_unset_safe_mode, | ||
5630 | .init = gfx_v8_0_rlc_init, | 5542 | .init = gfx_v8_0_rlc_init, |
5543 | .get_csb_size = gfx_v8_0_get_csb_size, | ||
5544 | .get_csb_buffer = gfx_v8_0_get_csb_buffer, | ||
5545 | .get_cp_table_num = gfx_v8_0_cp_jump_table_num, | ||
5631 | .resume = gfx_v8_0_rlc_resume, | 5546 | .resume = gfx_v8_0_rlc_resume, |
5632 | .stop = gfx_v8_0_rlc_stop, | 5547 | .stop = gfx_v8_0_rlc_stop, |
5633 | .reset = gfx_v8_0_rlc_reset, | 5548 | .reset = gfx_v8_0_rlc_reset, |
@@ -5639,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5639 | { | 5554 | { |
5640 | uint32_t temp, data; | 5555 | uint32_t temp, data; |
5641 | 5556 | ||
5642 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5557 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5643 | 5558 | ||
5644 | /* It is disabled by HW by default */ | 5559 | /* It is disabled by HW by default */ |
5645 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { | 5560 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
@@ -5735,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev | |||
5735 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5650 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5736 | } | 5651 | } |
5737 | 5652 | ||
5738 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5653 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5739 | } | 5654 | } |
5740 | 5655 | ||
5741 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | 5656 | static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
@@ -5745,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5745 | 5660 | ||
5746 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); | 5661 | temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); |
5747 | 5662 | ||
5748 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 5663 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
5749 | 5664 | ||
5750 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | 5665 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
5751 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); | 5666 | temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); |
@@ -5828,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
5828 | 5743 | ||
5829 | gfx_v8_0_wait_for_rlc_serdes(adev); | 5744 | gfx_v8_0_wait_for_rlc_serdes(adev); |
5830 | 5745 | ||
5831 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 5746 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
5832 | } | 5747 | } |
5833 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, | 5748 | static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, |
5834 | bool enable) | 5749 | bool enable) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 84831839070c..d6783ba2c9d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -1050,72 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) | |||
1050 | WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); | 1050 | WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); |
1051 | } | 1051 | } |
1052 | 1052 | ||
1053 | static void rv_init_cp_jump_table(struct amdgpu_device *adev) | 1053 | static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) |
1054 | { | 1054 | { |
1055 | const __le32 *fw_data; | 1055 | return 5; |
1056 | volatile u32 *dst_ptr; | ||
1057 | int me, i, max_me = 5; | ||
1058 | u32 bo_offset = 0; | ||
1059 | u32 table_offset, table_size; | ||
1060 | |||
1061 | /* write the cp table buffer */ | ||
1062 | dst_ptr = adev->gfx.rlc.cp_table_ptr; | ||
1063 | for (me = 0; me < max_me; me++) { | ||
1064 | if (me == 0) { | ||
1065 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1066 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; | ||
1067 | fw_data = (const __le32 *) | ||
1068 | (adev->gfx.ce_fw->data + | ||
1069 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1070 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1071 | table_size = le32_to_cpu(hdr->jt_size); | ||
1072 | } else if (me == 1) { | ||
1073 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1074 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; | ||
1075 | fw_data = (const __le32 *) | ||
1076 | (adev->gfx.pfp_fw->data + | ||
1077 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1078 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1079 | table_size = le32_to_cpu(hdr->jt_size); | ||
1080 | } else if (me == 2) { | ||
1081 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1082 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; | ||
1083 | fw_data = (const __le32 *) | ||
1084 | (adev->gfx.me_fw->data + | ||
1085 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1086 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1087 | table_size = le32_to_cpu(hdr->jt_size); | ||
1088 | } else if (me == 3) { | ||
1089 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1090 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
1091 | fw_data = (const __le32 *) | ||
1092 | (adev->gfx.mec_fw->data + | ||
1093 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1094 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1095 | table_size = le32_to_cpu(hdr->jt_size); | ||
1096 | } else if (me == 4) { | ||
1097 | const struct gfx_firmware_header_v1_0 *hdr = | ||
1098 | (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
1099 | fw_data = (const __le32 *) | ||
1100 | (adev->gfx.mec2_fw->data + | ||
1101 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
1102 | table_offset = le32_to_cpu(hdr->jt_offset); | ||
1103 | table_size = le32_to_cpu(hdr->jt_size); | ||
1104 | } | ||
1105 | |||
1106 | for (i = 0; i < table_size; i ++) { | ||
1107 | dst_ptr[bo_offset + i] = | ||
1108 | cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); | ||
1109 | } | ||
1110 | |||
1111 | bo_offset += table_size; | ||
1112 | } | ||
1113 | } | 1056 | } |
1114 | 1057 | ||
1115 | static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | 1058 | static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) |
1116 | { | 1059 | { |
1117 | volatile u32 *dst_ptr; | ||
1118 | u32 dws; | ||
1119 | const struct cs_section_def *cs_data; | 1060 | const struct cs_section_def *cs_data; |
1120 | int r; | 1061 | int r; |
1121 | 1062 | ||
@@ -1124,45 +1065,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |||
1124 | cs_data = adev->gfx.rlc.cs_data; | 1065 | cs_data = adev->gfx.rlc.cs_data; |
1125 | 1066 | ||
1126 | if (cs_data) { | 1067 | if (cs_data) { |
1127 | /* clear state block */ | 1068 | /* init clear state block */ |
1128 | adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); | 1069 | r = amdgpu_gfx_rlc_init_csb(adev); |
1129 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, | 1070 | if (r) |
1130 | AMDGPU_GEM_DOMAIN_VRAM, | ||
1131 | &adev->gfx.rlc.clear_state_obj, | ||
1132 | &adev->gfx.rlc.clear_state_gpu_addr, | ||
1133 | (void **)&adev->gfx.rlc.cs_ptr); | ||
1134 | if (r) { | ||
1135 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", | ||
1136 | r); | ||
1137 | amdgpu_gfx_rlc_fini(adev); | ||
1138 | return r; | 1071 | return r; |
1139 | } | ||
1140 | /* set up the cs buffer */ | ||
1141 | dst_ptr = adev->gfx.rlc.cs_ptr; | ||
1142 | gfx_v9_0_get_csb_buffer(adev, dst_ptr); | ||
1143 | amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); | ||
1144 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
1145 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1146 | } | 1072 | } |
1147 | 1073 | ||
1148 | if (adev->asic_type == CHIP_RAVEN) { | 1074 | if (adev->asic_type == CHIP_RAVEN) { |
1149 | /* TODO: double check the cp_table_size for RV */ | 1075 | /* TODO: double check the cp_table_size for RV */ |
1150 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1076 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1151 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, | 1077 | r = amdgpu_gfx_rlc_init_cpt(adev); |
1152 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 1078 | if (r) |
1153 | &adev->gfx.rlc.cp_table_obj, | ||
1154 | &adev->gfx.rlc.cp_table_gpu_addr, | ||
1155 | (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1156 | if (r) { | ||
1157 | dev_err(adev->dev, | ||
1158 | "(%d) failed to create cp table bo\n", r); | ||
1159 | amdgpu_gfx_rlc_fini(adev); | ||
1160 | return r; | 1079 | return r; |
1161 | } | ||
1162 | |||
1163 | rv_init_cp_jump_table(adev); | ||
1164 | amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); | ||
1165 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1166 | } | 1080 | } |
1167 | 1081 | ||
1168 | switch (adev->asic_type) { | 1082 | switch (adev->asic_type) { |
@@ -3585,64 +3499,47 @@ static int gfx_v9_0_late_init(void *handle) | |||
3585 | return 0; | 3499 | return 0; |
3586 | } | 3500 | } |
3587 | 3501 | ||
3588 | static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) | 3502 | static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev) |
3589 | { | 3503 | { |
3590 | uint32_t rlc_setting, data; | 3504 | uint32_t rlc_setting; |
3591 | unsigned i; | ||
3592 | |||
3593 | if (adev->gfx.rlc.in_safe_mode) | ||
3594 | return; | ||
3595 | 3505 | ||
3596 | /* if RLC is not enabled, do nothing */ | 3506 | /* if RLC is not enabled, do nothing */ |
3597 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); | 3507 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); |
3598 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 3508 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
3599 | return; | 3509 | return false; |
3600 | |||
3601 | if (adev->cg_flags & | ||
3602 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG | | ||
3603 | AMD_CG_SUPPORT_GFX_3D_CGCG)) { | ||
3604 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3605 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); | ||
3606 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3607 | 3510 | ||
3608 | /* wait for RLC_SAFE_MODE */ | 3511 | return true; |
3609 | for (i = 0; i < adev->usec_timeout; i++) { | ||
3610 | if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) | ||
3611 | break; | ||
3612 | udelay(1); | ||
3613 | } | ||
3614 | adev->gfx.rlc.in_safe_mode = true; | ||
3615 | } | ||
3616 | } | 3512 | } |
3617 | 3513 | ||
3618 | static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) | 3514 | static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev) |
3619 | { | 3515 | { |
3620 | uint32_t rlc_setting, data; | 3516 | uint32_t data; |
3621 | 3517 | unsigned i; | |
3622 | if (!adev->gfx.rlc.in_safe_mode) | ||
3623 | return; | ||
3624 | 3518 | ||
3625 | /* if RLC is not enabled, do nothing */ | 3519 | data = RLC_SAFE_MODE__CMD_MASK; |
3626 | rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); | 3520 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
3627 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) | 3521 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); |
3628 | return; | ||
3629 | 3522 | ||
3630 | if (adev->cg_flags & | 3523 | /* wait for RLC_SAFE_MODE */ |
3631 | (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { | 3524 | for (i = 0; i < adev->usec_timeout; i++) { |
3632 | /* | 3525 | if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
3633 | * Try to exit safe mode only if it is already in safe | 3526 | break; |
3634 | * mode. | 3527 | udelay(1); |
3635 | */ | ||
3636 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3637 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3638 | adev->gfx.rlc.in_safe_mode = false; | ||
3639 | } | 3528 | } |
3640 | } | 3529 | } |
3641 | 3530 | ||
3531 | static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev) | ||
3532 | { | ||
3533 | uint32_t data; | ||
3534 | |||
3535 | data = RLC_SAFE_MODE__CMD_MASK; | ||
3536 | WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); | ||
3537 | } | ||
3538 | |||
3642 | static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, | 3539 | static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, |
3643 | bool enable) | 3540 | bool enable) |
3644 | { | 3541 | { |
3645 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 3542 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3646 | 3543 | ||
3647 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { | 3544 | if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { |
3648 | gfx_v9_0_enable_gfx_cg_power_gating(adev, true); | 3545 | gfx_v9_0_enable_gfx_cg_power_gating(adev, true); |
@@ -3653,7 +3550,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, | |||
3653 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); | 3550 | gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); |
3654 | } | 3551 | } |
3655 | 3552 | ||
3656 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 3553 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3657 | } | 3554 | } |
3658 | 3555 | ||
3659 | static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, | 3556 | static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, |
@@ -3751,7 +3648,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | |||
3751 | { | 3648 | { |
3752 | uint32_t data, def; | 3649 | uint32_t data, def; |
3753 | 3650 | ||
3754 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 3651 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3755 | 3652 | ||
3756 | /* Enable 3D CGCG/CGLS */ | 3653 | /* Enable 3D CGCG/CGLS */ |
3757 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { | 3654 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { |
@@ -3791,7 +3688,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, | |||
3791 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); | 3688 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); |
3792 | } | 3689 | } |
3793 | 3690 | ||
3794 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 3691 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3795 | } | 3692 | } |
3796 | 3693 | ||
3797 | static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, | 3694 | static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
@@ -3799,7 +3696,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
3799 | { | 3696 | { |
3800 | uint32_t def, data; | 3697 | uint32_t def, data; |
3801 | 3698 | ||
3802 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 3699 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
3803 | 3700 | ||
3804 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { | 3701 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
3805 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); | 3702 | def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); |
@@ -3839,7 +3736,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev | |||
3839 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); | 3736 | WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); |
3840 | } | 3737 | } |
3841 | 3738 | ||
3842 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 3739 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
3843 | } | 3740 | } |
3844 | 3741 | ||
3845 | static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, | 3742 | static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, |
@@ -3868,9 +3765,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, | |||
3868 | } | 3765 | } |
3869 | 3766 | ||
3870 | static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { | 3767 | static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { |
3871 | .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, | 3768 | .is_rlc_enabled = gfx_v9_0_is_rlc_enabled, |
3872 | .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, | 3769 | .set_safe_mode = gfx_v9_0_set_safe_mode, |
3770 | .unset_safe_mode = gfx_v9_0_unset_safe_mode, | ||
3873 | .init = gfx_v9_0_rlc_init, | 3771 | .init = gfx_v9_0_rlc_init, |
3772 | .get_csb_size = gfx_v9_0_get_csb_size, | ||
3773 | .get_csb_buffer = gfx_v9_0_get_csb_buffer, | ||
3774 | .get_cp_table_num = gfx_v9_0_cp_jump_table_num, | ||
3874 | .resume = gfx_v9_0_rlc_resume, | 3775 | .resume = gfx_v9_0_rlc_resume, |
3875 | .stop = gfx_v9_0_rlc_stop, | 3776 | .stop = gfx_v9_0_rlc_stop, |
3876 | .reset = gfx_v9_0_rlc_reset, | 3777 | .reset = gfx_v9_0_rlc_reset, |
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index d0e478f43443..0c9a2c03504e 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c | |||
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) | |||
508 | pi->caps_db_ramping || | 508 | pi->caps_db_ramping || |
509 | pi->caps_td_ramping || | 509 | pi->caps_td_ramping || |
510 | pi->caps_tcp_ramping) { | 510 | pi->caps_tcp_ramping) { |
511 | adev->gfx.rlc.funcs->enter_safe_mode(adev); | 511 | amdgpu_gfx_rlc_enter_safe_mode(adev); |
512 | 512 | ||
513 | if (enable) { | 513 | if (enable) { |
514 | ret = kv_program_pt_config_registers(adev, didt_config_kv); | 514 | ret = kv_program_pt_config_registers(adev, didt_config_kv); |
515 | if (ret) { | 515 | if (ret) { |
516 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 516 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
517 | return ret; | 517 | return ret; |
518 | } | 518 | } |
519 | } | 519 | } |
520 | 520 | ||
521 | kv_do_enable_didt(adev, enable); | 521 | kv_do_enable_didt(adev, enable); |
522 | 522 | ||
523 | adev->gfx.rlc.funcs->exit_safe_mode(adev); | 523 | amdgpu_gfx_rlc_exit_safe_mode(adev); |
524 | } | 524 | } |
525 | 525 | ||
526 | return 0; | 526 | return 0; |