diff options
49 files changed, 33097 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index aec28866945f..9a573e87cdd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile | |||
@@ -18,29 +18,57 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ | |||
18 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ | 18 | amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \ |
19 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o | 19 | amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o |
20 | 20 | ||
21 | # add asic specific block | ||
21 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ | 22 | amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ |
22 | ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o | 23 | ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o |
23 | 24 | ||
25 | amdgpu-y += \ | ||
26 | vi.o | ||
27 | |||
28 | # add GMC block | ||
29 | amdgpu-y += \ | ||
30 | gmc_v8_0.o | ||
31 | |||
24 | # add IH block | 32 | # add IH block |
25 | amdgpu-y += \ | 33 | amdgpu-y += \ |
26 | amdgpu_irq.o \ | 34 | amdgpu_irq.o \ |
27 | amdgpu_ih.o | 35 | amdgpu_ih.o \ |
36 | iceland_ih.o \ | ||
37 | tonga_ih.o \ | ||
38 | cz_ih.o | ||
28 | 39 | ||
29 | # add SMC block | 40 | # add SMC block |
30 | amdgpu-y += \ | 41 | amdgpu-y += \ |
31 | amdgpu_dpm.o | 42 | amdgpu_dpm.o \ |
43 | cz_smc.o cz_dpm.o \ | ||
44 | tonga_smc.o tonga_dpm.o \ | ||
45 | iceland_smc.o iceland_dpm.o | ||
46 | |||
47 | # add DCE block | ||
48 | amdgpu-y += \ | ||
49 | dce_v10_0.o \ | ||
50 | dce_v11_0.o | ||
32 | 51 | ||
33 | # add GFX block | 52 | # add GFX block |
34 | amdgpu-y += \ | 53 | amdgpu-y += \ |
35 | amdgpu_gfx.o | 54 | amdgpu_gfx.o \ |
55 | gfx_v8_0.o | ||
56 | |||
57 | # add async DMA block | ||
58 | amdgpu-y += \ | ||
59 | sdma_v2_4.o \ | ||
60 | sdma_v3_0.o | ||
36 | 61 | ||
37 | # add UVD block | 62 | # add UVD block |
38 | amdgpu-y += \ | 63 | amdgpu-y += \ |
39 | amdgpu_uvd.o | 64 | amdgpu_uvd.o \ |
65 | uvd_v5_0.o \ | ||
66 | uvd_v6_0.o | ||
40 | 67 | ||
41 | # add VCE block | 68 | # add VCE block |
42 | amdgpu-y += \ | 69 | amdgpu-y += \ |
43 | amdgpu_vce.o | 70 | amdgpu_vce.o \ |
71 | vce_v3_0.o | ||
44 | 72 | ||
45 | amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o | 73 | amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o |
46 | amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o | 74 | amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 548e0843d95a..61cf5ad78857 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #ifdef CONFIG_DRM_AMDGPU_CIK | 41 | #ifdef CONFIG_DRM_AMDGPU_CIK |
42 | #include "cik.h" | 42 | #include "cik.h" |
43 | #endif | 43 | #endif |
44 | #include "vi.h" | ||
44 | #include "bif/bif_4_1_d.h" | 45 | #include "bif/bif_4_1_d.h" |
45 | 46 | ||
46 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); | 47 | static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); |
@@ -1154,9 +1155,21 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | |||
1154 | 1155 | ||
1155 | static int amdgpu_early_init(struct amdgpu_device *adev) | 1156 | static int amdgpu_early_init(struct amdgpu_device *adev) |
1156 | { | 1157 | { |
1157 | int i, r = -EINVAL; | 1158 | int i, r; |
1158 | 1159 | ||
1159 | switch (adev->asic_type) { | 1160 | switch (adev->asic_type) { |
1161 | case CHIP_TOPAZ: | ||
1162 | case CHIP_TONGA: | ||
1163 | case CHIP_CARRIZO: | ||
1164 | if (adev->asic_type == CHIP_CARRIZO) | ||
1165 | adev->family = AMDGPU_FAMILY_CZ; | ||
1166 | else | ||
1167 | adev->family = AMDGPU_FAMILY_VI; | ||
1168 | |||
1169 | r = vi_set_ip_blocks(adev); | ||
1170 | if (r) | ||
1171 | return r; | ||
1172 | break; | ||
1160 | #ifdef CONFIG_DRM_AMDGPU_CIK | 1173 | #ifdef CONFIG_DRM_AMDGPU_CIK |
1161 | case CHIP_BONAIRE: | 1174 | case CHIP_BONAIRE: |
1162 | case CHIP_HAWAII: | 1175 | case CHIP_HAWAII: |
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h b/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h new file mode 100644 index 000000000000..1aab9bef9349 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/clearstate_vi.h | |||
@@ -0,0 +1,944 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | static const unsigned int vi_SECT_CONTEXT_def_1[] = | ||
25 | { | ||
26 | 0x00000000, // DB_RENDER_CONTROL | ||
27 | 0x00000000, // DB_COUNT_CONTROL | ||
28 | 0x00000000, // DB_DEPTH_VIEW | ||
29 | 0x00000000, // DB_RENDER_OVERRIDE | ||
30 | 0x00000000, // DB_RENDER_OVERRIDE2 | ||
31 | 0x00000000, // DB_HTILE_DATA_BASE | ||
32 | 0, // HOLE | ||
33 | 0, // HOLE | ||
34 | 0x00000000, // DB_DEPTH_BOUNDS_MIN | ||
35 | 0x00000000, // DB_DEPTH_BOUNDS_MAX | ||
36 | 0x00000000, // DB_STENCIL_CLEAR | ||
37 | 0x00000000, // DB_DEPTH_CLEAR | ||
38 | 0x00000000, // PA_SC_SCREEN_SCISSOR_TL | ||
39 | 0x40004000, // PA_SC_SCREEN_SCISSOR_BR | ||
40 | 0, // HOLE | ||
41 | 0x00000000, // DB_DEPTH_INFO | ||
42 | 0x00000000, // DB_Z_INFO | ||
43 | 0x00000000, // DB_STENCIL_INFO | ||
44 | 0x00000000, // DB_Z_READ_BASE | ||
45 | 0x00000000, // DB_STENCIL_READ_BASE | ||
46 | 0x00000000, // DB_Z_WRITE_BASE | ||
47 | 0x00000000, // DB_STENCIL_WRITE_BASE | ||
48 | 0x00000000, // DB_DEPTH_SIZE | ||
49 | 0x00000000, // DB_DEPTH_SLICE | ||
50 | 0, // HOLE | ||
51 | 0, // HOLE | ||
52 | 0, // HOLE | ||
53 | 0, // HOLE | ||
54 | 0, // HOLE | ||
55 | 0, // HOLE | ||
56 | 0, // HOLE | ||
57 | 0, // HOLE | ||
58 | 0x00000000, // TA_BC_BASE_ADDR | ||
59 | 0x00000000, // TA_BC_BASE_ADDR_HI | ||
60 | 0, // HOLE | ||
61 | 0, // HOLE | ||
62 | 0, // HOLE | ||
63 | 0, // HOLE | ||
64 | 0, // HOLE | ||
65 | 0, // HOLE | ||
66 | 0, // HOLE | ||
67 | 0, // HOLE | ||
68 | 0, // HOLE | ||
69 | 0, // HOLE | ||
70 | 0, // HOLE | ||
71 | 0, // HOLE | ||
72 | 0, // HOLE | ||
73 | 0, // HOLE | ||
74 | 0, // HOLE | ||
75 | 0, // HOLE | ||
76 | 0, // HOLE | ||
77 | 0, // HOLE | ||
78 | 0, // HOLE | ||
79 | 0, // HOLE | ||
80 | 0, // HOLE | ||
81 | 0, // HOLE | ||
82 | 0, // HOLE | ||
83 | 0, // HOLE | ||
84 | 0, // HOLE | ||
85 | 0, // HOLE | ||
86 | 0, // HOLE | ||
87 | 0, // HOLE | ||
88 | 0, // HOLE | ||
89 | 0, // HOLE | ||
90 | 0, // HOLE | ||
91 | 0, // HOLE | ||
92 | 0, // HOLE | ||
93 | 0, // HOLE | ||
94 | 0, // HOLE | ||
95 | 0, // HOLE | ||
96 | 0, // HOLE | ||
97 | 0, // HOLE | ||
98 | 0, // HOLE | ||
99 | 0, // HOLE | ||
100 | 0, // HOLE | ||
101 | 0, // HOLE | ||
102 | 0, // HOLE | ||
103 | 0, // HOLE | ||
104 | 0, // HOLE | ||
105 | 0, // HOLE | ||
106 | 0, // HOLE | ||
107 | 0, // HOLE | ||
108 | 0, // HOLE | ||
109 | 0, // HOLE | ||
110 | 0, // HOLE | ||
111 | 0, // HOLE | ||
112 | 0, // HOLE | ||
113 | 0, // HOLE | ||
114 | 0, // HOLE | ||
115 | 0, // HOLE | ||
116 | 0, // HOLE | ||
117 | 0, // HOLE | ||
118 | 0, // HOLE | ||
119 | 0, // HOLE | ||
120 | 0, // HOLE | ||
121 | 0, // HOLE | ||
122 | 0, // HOLE | ||
123 | 0, // HOLE | ||
124 | 0, // HOLE | ||
125 | 0, // HOLE | ||
126 | 0, // HOLE | ||
127 | 0, // HOLE | ||
128 | 0, // HOLE | ||
129 | 0, // HOLE | ||
130 | 0, // HOLE | ||
131 | 0, // HOLE | ||
132 | 0, // HOLE | ||
133 | 0, // HOLE | ||
134 | 0, // HOLE | ||
135 | 0, // HOLE | ||
136 | 0, // HOLE | ||
137 | 0, // HOLE | ||
138 | 0, // HOLE | ||
139 | 0, // HOLE | ||
140 | 0, // HOLE | ||
141 | 0, // HOLE | ||
142 | 0, // HOLE | ||
143 | 0, // HOLE | ||
144 | 0, // HOLE | ||
145 | 0, // HOLE | ||
146 | 0, // HOLE | ||
147 | 0, // HOLE | ||
148 | 0x00000000, // COHER_DEST_BASE_HI_0 | ||
149 | 0x00000000, // COHER_DEST_BASE_HI_1 | ||
150 | 0x00000000, // COHER_DEST_BASE_HI_2 | ||
151 | 0x00000000, // COHER_DEST_BASE_HI_3 | ||
152 | 0x00000000, // COHER_DEST_BASE_2 | ||
153 | 0x00000000, // COHER_DEST_BASE_3 | ||
154 | 0x00000000, // PA_SC_WINDOW_OFFSET | ||
155 | 0x80000000, // PA_SC_WINDOW_SCISSOR_TL | ||
156 | 0x40004000, // PA_SC_WINDOW_SCISSOR_BR | ||
157 | 0x0000ffff, // PA_SC_CLIPRECT_RULE | ||
158 | 0x00000000, // PA_SC_CLIPRECT_0_TL | ||
159 | 0x40004000, // PA_SC_CLIPRECT_0_BR | ||
160 | 0x00000000, // PA_SC_CLIPRECT_1_TL | ||
161 | 0x40004000, // PA_SC_CLIPRECT_1_BR | ||
162 | 0x00000000, // PA_SC_CLIPRECT_2_TL | ||
163 | 0x40004000, // PA_SC_CLIPRECT_2_BR | ||
164 | 0x00000000, // PA_SC_CLIPRECT_3_TL | ||
165 | 0x40004000, // PA_SC_CLIPRECT_3_BR | ||
166 | 0xaa99aaaa, // PA_SC_EDGERULE | ||
167 | 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET | ||
168 | 0xffffffff, // CB_TARGET_MASK | ||
169 | 0xffffffff, // CB_SHADER_MASK | ||
170 | 0x80000000, // PA_SC_GENERIC_SCISSOR_TL | ||
171 | 0x40004000, // PA_SC_GENERIC_SCISSOR_BR | ||
172 | 0x00000000, // COHER_DEST_BASE_0 | ||
173 | 0x00000000, // COHER_DEST_BASE_1 | ||
174 | 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL | ||
175 | 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR | ||
176 | 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL | ||
177 | 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR | ||
178 | 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL | ||
179 | 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR | ||
180 | 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL | ||
181 | 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR | ||
182 | 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL | ||
183 | 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR | ||
184 | 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL | ||
185 | 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR | ||
186 | 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL | ||
187 | 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR | ||
188 | 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL | ||
189 | 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR | ||
190 | 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL | ||
191 | 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR | ||
192 | 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL | ||
193 | 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR | ||
194 | 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL | ||
195 | 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR | ||
196 | 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL | ||
197 | 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR | ||
198 | 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL | ||
199 | 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR | ||
200 | 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL | ||
201 | 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR | ||
202 | 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL | ||
203 | 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR | ||
204 | 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL | ||
205 | 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR | ||
206 | 0x00000000, // PA_SC_VPORT_ZMIN_0 | ||
207 | 0x3f800000, // PA_SC_VPORT_ZMAX_0 | ||
208 | 0x00000000, // PA_SC_VPORT_ZMIN_1 | ||
209 | 0x3f800000, // PA_SC_VPORT_ZMAX_1 | ||
210 | 0x00000000, // PA_SC_VPORT_ZMIN_2 | ||
211 | 0x3f800000, // PA_SC_VPORT_ZMAX_2 | ||
212 | 0x00000000, // PA_SC_VPORT_ZMIN_3 | ||
213 | 0x3f800000, // PA_SC_VPORT_ZMAX_3 | ||
214 | 0x00000000, // PA_SC_VPORT_ZMIN_4 | ||
215 | 0x3f800000, // PA_SC_VPORT_ZMAX_4 | ||
216 | 0x00000000, // PA_SC_VPORT_ZMIN_5 | ||
217 | 0x3f800000, // PA_SC_VPORT_ZMAX_5 | ||
218 | 0x00000000, // PA_SC_VPORT_ZMIN_6 | ||
219 | 0x3f800000, // PA_SC_VPORT_ZMAX_6 | ||
220 | 0x00000000, // PA_SC_VPORT_ZMIN_7 | ||
221 | 0x3f800000, // PA_SC_VPORT_ZMAX_7 | ||
222 | 0x00000000, // PA_SC_VPORT_ZMIN_8 | ||
223 | 0x3f800000, // PA_SC_VPORT_ZMAX_8 | ||
224 | 0x00000000, // PA_SC_VPORT_ZMIN_9 | ||
225 | 0x3f800000, // PA_SC_VPORT_ZMAX_9 | ||
226 | 0x00000000, // PA_SC_VPORT_ZMIN_10 | ||
227 | 0x3f800000, // PA_SC_VPORT_ZMAX_10 | ||
228 | 0x00000000, // PA_SC_VPORT_ZMIN_11 | ||
229 | 0x3f800000, // PA_SC_VPORT_ZMAX_11 | ||
230 | 0x00000000, // PA_SC_VPORT_ZMIN_12 | ||
231 | 0x3f800000, // PA_SC_VPORT_ZMAX_12 | ||
232 | 0x00000000, // PA_SC_VPORT_ZMIN_13 | ||
233 | 0x3f800000, // PA_SC_VPORT_ZMAX_13 | ||
234 | 0x00000000, // PA_SC_VPORT_ZMIN_14 | ||
235 | 0x3f800000, // PA_SC_VPORT_ZMAX_14 | ||
236 | 0x00000000, // PA_SC_VPORT_ZMIN_15 | ||
237 | 0x3f800000, // PA_SC_VPORT_ZMAX_15 | ||
238 | }; | ||
239 | static const unsigned int vi_SECT_CONTEXT_def_2[] = | ||
240 | { | ||
241 | 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL | ||
242 | 0, // HOLE | ||
243 | 0x00000000, // CP_PERFMON_CNTX_CNTL | ||
244 | 0x00000000, // CP_RINGID | ||
245 | 0x00000000, // CP_VMID | ||
246 | 0, // HOLE | ||
247 | 0, // HOLE | ||
248 | 0, // HOLE | ||
249 | 0, // HOLE | ||
250 | 0, // HOLE | ||
251 | 0, // HOLE | ||
252 | 0, // HOLE | ||
253 | 0, // HOLE | ||
254 | 0, // HOLE | ||
255 | 0, // HOLE | ||
256 | 0, // HOLE | ||
257 | 0, // HOLE | ||
258 | 0, // HOLE | ||
259 | 0, // HOLE | ||
260 | 0, // HOLE | ||
261 | 0, // HOLE | ||
262 | 0, // HOLE | ||
263 | 0, // HOLE | ||
264 | 0, // HOLE | ||
265 | 0, // HOLE | ||
266 | 0, // HOLE | ||
267 | 0, // HOLE | ||
268 | 0, // HOLE | ||
269 | 0, // HOLE | ||
270 | 0, // HOLE | ||
271 | 0, // HOLE | ||
272 | 0, // HOLE | ||
273 | 0, // HOLE | ||
274 | 0, // HOLE | ||
275 | 0, // HOLE | ||
276 | 0, // HOLE | ||
277 | 0, // HOLE | ||
278 | 0, // HOLE | ||
279 | 0, // HOLE | ||
280 | 0, // HOLE | ||
281 | 0, // HOLE | ||
282 | 0, // HOLE | ||
283 | 0xffffffff, // VGT_MAX_VTX_INDX | ||
284 | 0x00000000, // VGT_MIN_VTX_INDX | ||
285 | 0x00000000, // VGT_INDX_OFFSET | ||
286 | 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX | ||
287 | 0, // HOLE | ||
288 | 0x00000000, // CB_BLEND_RED | ||
289 | 0x00000000, // CB_BLEND_GREEN | ||
290 | 0x00000000, // CB_BLEND_BLUE | ||
291 | 0x00000000, // CB_BLEND_ALPHA | ||
292 | 0x00000000, // CB_DCC_CONTROL | ||
293 | 0, // HOLE | ||
294 | 0x00000000, // DB_STENCIL_CONTROL | ||
295 | 0x00000000, // DB_STENCILREFMASK | ||
296 | 0x00000000, // DB_STENCILREFMASK_BF | ||
297 | 0, // HOLE | ||
298 | 0x00000000, // PA_CL_VPORT_XSCALE | ||
299 | 0x00000000, // PA_CL_VPORT_XOFFSET | ||
300 | 0x00000000, // PA_CL_VPORT_YSCALE | ||
301 | 0x00000000, // PA_CL_VPORT_YOFFSET | ||
302 | 0x00000000, // PA_CL_VPORT_ZSCALE | ||
303 | 0x00000000, // PA_CL_VPORT_ZOFFSET | ||
304 | 0x00000000, // PA_CL_VPORT_XSCALE_1 | ||
305 | 0x00000000, // PA_CL_VPORT_XOFFSET_1 | ||
306 | 0x00000000, // PA_CL_VPORT_YSCALE_1 | ||
307 | 0x00000000, // PA_CL_VPORT_YOFFSET_1 | ||
308 | 0x00000000, // PA_CL_VPORT_ZSCALE_1 | ||
309 | 0x00000000, // PA_CL_VPORT_ZOFFSET_1 | ||
310 | 0x00000000, // PA_CL_VPORT_XSCALE_2 | ||
311 | 0x00000000, // PA_CL_VPORT_XOFFSET_2 | ||
312 | 0x00000000, // PA_CL_VPORT_YSCALE_2 | ||
313 | 0x00000000, // PA_CL_VPORT_YOFFSET_2 | ||
314 | 0x00000000, // PA_CL_VPORT_ZSCALE_2 | ||
315 | 0x00000000, // PA_CL_VPORT_ZOFFSET_2 | ||
316 | 0x00000000, // PA_CL_VPORT_XSCALE_3 | ||
317 | 0x00000000, // PA_CL_VPORT_XOFFSET_3 | ||
318 | 0x00000000, // PA_CL_VPORT_YSCALE_3 | ||
319 | 0x00000000, // PA_CL_VPORT_YOFFSET_3 | ||
320 | 0x00000000, // PA_CL_VPORT_ZSCALE_3 | ||
321 | 0x00000000, // PA_CL_VPORT_ZOFFSET_3 | ||
322 | 0x00000000, // PA_CL_VPORT_XSCALE_4 | ||
323 | 0x00000000, // PA_CL_VPORT_XOFFSET_4 | ||
324 | 0x00000000, // PA_CL_VPORT_YSCALE_4 | ||
325 | 0x00000000, // PA_CL_VPORT_YOFFSET_4 | ||
326 | 0x00000000, // PA_CL_VPORT_ZSCALE_4 | ||
327 | 0x00000000, // PA_CL_VPORT_ZOFFSET_4 | ||
328 | 0x00000000, // PA_CL_VPORT_XSCALE_5 | ||
329 | 0x00000000, // PA_CL_VPORT_XOFFSET_5 | ||
330 | 0x00000000, // PA_CL_VPORT_YSCALE_5 | ||
331 | 0x00000000, // PA_CL_VPORT_YOFFSET_5 | ||
332 | 0x00000000, // PA_CL_VPORT_ZSCALE_5 | ||
333 | 0x00000000, // PA_CL_VPORT_ZOFFSET_5 | ||
334 | 0x00000000, // PA_CL_VPORT_XSCALE_6 | ||
335 | 0x00000000, // PA_CL_VPORT_XOFFSET_6 | ||
336 | 0x00000000, // PA_CL_VPORT_YSCALE_6 | ||
337 | 0x00000000, // PA_CL_VPORT_YOFFSET_6 | ||
338 | 0x00000000, // PA_CL_VPORT_ZSCALE_6 | ||
339 | 0x00000000, // PA_CL_VPORT_ZOFFSET_6 | ||
340 | 0x00000000, // PA_CL_VPORT_XSCALE_7 | ||
341 | 0x00000000, // PA_CL_VPORT_XOFFSET_7 | ||
342 | 0x00000000, // PA_CL_VPORT_YSCALE_7 | ||
343 | 0x00000000, // PA_CL_VPORT_YOFFSET_7 | ||
344 | 0x00000000, // PA_CL_VPORT_ZSCALE_7 | ||
345 | 0x00000000, // PA_CL_VPORT_ZOFFSET_7 | ||
346 | 0x00000000, // PA_CL_VPORT_XSCALE_8 | ||
347 | 0x00000000, // PA_CL_VPORT_XOFFSET_8 | ||
348 | 0x00000000, // PA_CL_VPORT_YSCALE_8 | ||
349 | 0x00000000, // PA_CL_VPORT_YOFFSET_8 | ||
350 | 0x00000000, // PA_CL_VPORT_ZSCALE_8 | ||
351 | 0x00000000, // PA_CL_VPORT_ZOFFSET_8 | ||
352 | 0x00000000, // PA_CL_VPORT_XSCALE_9 | ||
353 | 0x00000000, // PA_CL_VPORT_XOFFSET_9 | ||
354 | 0x00000000, // PA_CL_VPORT_YSCALE_9 | ||
355 | 0x00000000, // PA_CL_VPORT_YOFFSET_9 | ||
356 | 0x00000000, // PA_CL_VPORT_ZSCALE_9 | ||
357 | 0x00000000, // PA_CL_VPORT_ZOFFSET_9 | ||
358 | 0x00000000, // PA_CL_VPORT_XSCALE_10 | ||
359 | 0x00000000, // PA_CL_VPORT_XOFFSET_10 | ||
360 | 0x00000000, // PA_CL_VPORT_YSCALE_10 | ||
361 | 0x00000000, // PA_CL_VPORT_YOFFSET_10 | ||
362 | 0x00000000, // PA_CL_VPORT_ZSCALE_10 | ||
363 | 0x00000000, // PA_CL_VPORT_ZOFFSET_10 | ||
364 | 0x00000000, // PA_CL_VPORT_XSCALE_11 | ||
365 | 0x00000000, // PA_CL_VPORT_XOFFSET_11 | ||
366 | 0x00000000, // PA_CL_VPORT_YSCALE_11 | ||
367 | 0x00000000, // PA_CL_VPORT_YOFFSET_11 | ||
368 | 0x00000000, // PA_CL_VPORT_ZSCALE_11 | ||
369 | 0x00000000, // PA_CL_VPORT_ZOFFSET_11 | ||
370 | 0x00000000, // PA_CL_VPORT_XSCALE_12 | ||
371 | 0x00000000, // PA_CL_VPORT_XOFFSET_12 | ||
372 | 0x00000000, // PA_CL_VPORT_YSCALE_12 | ||
373 | 0x00000000, // PA_CL_VPORT_YOFFSET_12 | ||
374 | 0x00000000, // PA_CL_VPORT_ZSCALE_12 | ||
375 | 0x00000000, // PA_CL_VPORT_ZOFFSET_12 | ||
376 | 0x00000000, // PA_CL_VPORT_XSCALE_13 | ||
377 | 0x00000000, // PA_CL_VPORT_XOFFSET_13 | ||
378 | 0x00000000, // PA_CL_VPORT_YSCALE_13 | ||
379 | 0x00000000, // PA_CL_VPORT_YOFFSET_13 | ||
380 | 0x00000000, // PA_CL_VPORT_ZSCALE_13 | ||
381 | 0x00000000, // PA_CL_VPORT_ZOFFSET_13 | ||
382 | 0x00000000, // PA_CL_VPORT_XSCALE_14 | ||
383 | 0x00000000, // PA_CL_VPORT_XOFFSET_14 | ||
384 | 0x00000000, // PA_CL_VPORT_YSCALE_14 | ||
385 | 0x00000000, // PA_CL_VPORT_YOFFSET_14 | ||
386 | 0x00000000, // PA_CL_VPORT_ZSCALE_14 | ||
387 | 0x00000000, // PA_CL_VPORT_ZOFFSET_14 | ||
388 | 0x00000000, // PA_CL_VPORT_XSCALE_15 | ||
389 | 0x00000000, // PA_CL_VPORT_XOFFSET_15 | ||
390 | 0x00000000, // PA_CL_VPORT_YSCALE_15 | ||
391 | 0x00000000, // PA_CL_VPORT_YOFFSET_15 | ||
392 | 0x00000000, // PA_CL_VPORT_ZSCALE_15 | ||
393 | 0x00000000, // PA_CL_VPORT_ZOFFSET_15 | ||
394 | 0x00000000, // PA_CL_UCP_0_X | ||
395 | 0x00000000, // PA_CL_UCP_0_Y | ||
396 | 0x00000000, // PA_CL_UCP_0_Z | ||
397 | 0x00000000, // PA_CL_UCP_0_W | ||
398 | 0x00000000, // PA_CL_UCP_1_X | ||
399 | 0x00000000, // PA_CL_UCP_1_Y | ||
400 | 0x00000000, // PA_CL_UCP_1_Z | ||
401 | 0x00000000, // PA_CL_UCP_1_W | ||
402 | 0x00000000, // PA_CL_UCP_2_X | ||
403 | 0x00000000, // PA_CL_UCP_2_Y | ||
404 | 0x00000000, // PA_CL_UCP_2_Z | ||
405 | 0x00000000, // PA_CL_UCP_2_W | ||
406 | 0x00000000, // PA_CL_UCP_3_X | ||
407 | 0x00000000, // PA_CL_UCP_3_Y | ||
408 | 0x00000000, // PA_CL_UCP_3_Z | ||
409 | 0x00000000, // PA_CL_UCP_3_W | ||
410 | 0x00000000, // PA_CL_UCP_4_X | ||
411 | 0x00000000, // PA_CL_UCP_4_Y | ||
412 | 0x00000000, // PA_CL_UCP_4_Z | ||
413 | 0x00000000, // PA_CL_UCP_4_W | ||
414 | 0x00000000, // PA_CL_UCP_5_X | ||
415 | 0x00000000, // PA_CL_UCP_5_Y | ||
416 | 0x00000000, // PA_CL_UCP_5_Z | ||
417 | 0x00000000, // PA_CL_UCP_5_W | ||
418 | 0, // HOLE | ||
419 | 0, // HOLE | ||
420 | 0, // HOLE | ||
421 | 0, // HOLE | ||
422 | 0, // HOLE | ||
423 | 0, // HOLE | ||
424 | 0, // HOLE | ||
425 | 0, // HOLE | ||
426 | 0, // HOLE | ||
427 | 0, // HOLE | ||
428 | 0x00000000, // SPI_PS_INPUT_CNTL_0 | ||
429 | 0x00000000, // SPI_PS_INPUT_CNTL_1 | ||
430 | 0x00000000, // SPI_PS_INPUT_CNTL_2 | ||
431 | 0x00000000, // SPI_PS_INPUT_CNTL_3 | ||
432 | 0x00000000, // SPI_PS_INPUT_CNTL_4 | ||
433 | 0x00000000, // SPI_PS_INPUT_CNTL_5 | ||
434 | 0x00000000, // SPI_PS_INPUT_CNTL_6 | ||
435 | 0x00000000, // SPI_PS_INPUT_CNTL_7 | ||
436 | 0x00000000, // SPI_PS_INPUT_CNTL_8 | ||
437 | 0x00000000, // SPI_PS_INPUT_CNTL_9 | ||
438 | 0x00000000, // SPI_PS_INPUT_CNTL_10 | ||
439 | 0x00000000, // SPI_PS_INPUT_CNTL_11 | ||
440 | 0x00000000, // SPI_PS_INPUT_CNTL_12 | ||
441 | 0x00000000, // SPI_PS_INPUT_CNTL_13 | ||
442 | 0x00000000, // SPI_PS_INPUT_CNTL_14 | ||
443 | 0x00000000, // SPI_PS_INPUT_CNTL_15 | ||
444 | 0x00000000, // SPI_PS_INPUT_CNTL_16 | ||
445 | 0x00000000, // SPI_PS_INPUT_CNTL_17 | ||
446 | 0x00000000, // SPI_PS_INPUT_CNTL_18 | ||
447 | 0x00000000, // SPI_PS_INPUT_CNTL_19 | ||
448 | 0x00000000, // SPI_PS_INPUT_CNTL_20 | ||
449 | 0x00000000, // SPI_PS_INPUT_CNTL_21 | ||
450 | 0x00000000, // SPI_PS_INPUT_CNTL_22 | ||
451 | 0x00000000, // SPI_PS_INPUT_CNTL_23 | ||
452 | 0x00000000, // SPI_PS_INPUT_CNTL_24 | ||
453 | 0x00000000, // SPI_PS_INPUT_CNTL_25 | ||
454 | 0x00000000, // SPI_PS_INPUT_CNTL_26 | ||
455 | 0x00000000, // SPI_PS_INPUT_CNTL_27 | ||
456 | 0x00000000, // SPI_PS_INPUT_CNTL_28 | ||
457 | 0x00000000, // SPI_PS_INPUT_CNTL_29 | ||
458 | 0x00000000, // SPI_PS_INPUT_CNTL_30 | ||
459 | 0x00000000, // SPI_PS_INPUT_CNTL_31 | ||
460 | 0x00000000, // SPI_VS_OUT_CONFIG | ||
461 | 0, // HOLE | ||
462 | 0x00000000, // SPI_PS_INPUT_ENA | ||
463 | 0x00000000, // SPI_PS_INPUT_ADDR | ||
464 | 0x00000000, // SPI_INTERP_CONTROL_0 | ||
465 | 0x00000002, // SPI_PS_IN_CONTROL | ||
466 | 0, // HOLE | ||
467 | 0x00000000, // SPI_BARYC_CNTL | ||
468 | 0, // HOLE | ||
469 | 0x00000000, // SPI_TMPRING_SIZE | ||
470 | 0, // HOLE | ||
471 | 0, // HOLE | ||
472 | 0, // HOLE | ||
473 | 0, // HOLE | ||
474 | 0, // HOLE | ||
475 | 0, // HOLE | ||
476 | 0, // HOLE | ||
477 | 0, // HOLE | ||
478 | 0x00000000, // SPI_SHADER_POS_FORMAT | ||
479 | 0x00000000, // SPI_SHADER_Z_FORMAT | ||
480 | 0x00000000, // SPI_SHADER_COL_FORMAT | ||
481 | 0, // HOLE | ||
482 | 0, // HOLE | ||
483 | 0, // HOLE | ||
484 | 0, // HOLE | ||
485 | 0, // HOLE | ||
486 | 0, // HOLE | ||
487 | 0, // HOLE | ||
488 | 0, // HOLE | ||
489 | 0, // HOLE | ||
490 | 0, // HOLE | ||
491 | 0, // HOLE | ||
492 | 0, // HOLE | ||
493 | 0, // HOLE | ||
494 | 0, // HOLE | ||
495 | 0, // HOLE | ||
496 | 0, // HOLE | ||
497 | 0, // HOLE | ||
498 | 0, // HOLE | ||
499 | 0, // HOLE | ||
500 | 0, // HOLE | ||
501 | 0, // HOLE | ||
502 | 0, // HOLE | ||
503 | 0, // HOLE | ||
504 | 0, // HOLE | ||
505 | 0, // HOLE | ||
506 | 0, // HOLE | ||
507 | 0x00000000, // CB_BLEND0_CONTROL | ||
508 | 0x00000000, // CB_BLEND1_CONTROL | ||
509 | 0x00000000, // CB_BLEND2_CONTROL | ||
510 | 0x00000000, // CB_BLEND3_CONTROL | ||
511 | 0x00000000, // CB_BLEND4_CONTROL | ||
512 | 0x00000000, // CB_BLEND5_CONTROL | ||
513 | 0x00000000, // CB_BLEND6_CONTROL | ||
514 | 0x00000000, // CB_BLEND7_CONTROL | ||
515 | }; | ||
516 | static const unsigned int vi_SECT_CONTEXT_def_3[] = | ||
517 | { | ||
518 | 0x00000000, // PA_CL_POINT_X_RAD | ||
519 | 0x00000000, // PA_CL_POINT_Y_RAD | ||
520 | 0x00000000, // PA_CL_POINT_SIZE | ||
521 | 0x00000000, // PA_CL_POINT_CULL_RAD | ||
522 | 0x00000000, // VGT_DMA_BASE_HI | ||
523 | 0x00000000, // VGT_DMA_BASE | ||
524 | }; | ||
525 | static const unsigned int vi_SECT_CONTEXT_def_4[] = | ||
526 | { | ||
527 | 0x00000000, // DB_DEPTH_CONTROL | ||
528 | 0x00000000, // DB_EQAA | ||
529 | 0x00000000, // CB_COLOR_CONTROL | ||
530 | 0x00000000, // DB_SHADER_CONTROL | ||
531 | 0x00090000, // PA_CL_CLIP_CNTL | ||
532 | 0x00000004, // PA_SU_SC_MODE_CNTL | ||
533 | 0x00000000, // PA_CL_VTE_CNTL | ||
534 | 0x00000000, // PA_CL_VS_OUT_CNTL | ||
535 | 0x00000000, // PA_CL_NANINF_CNTL | ||
536 | 0x00000000, // PA_SU_LINE_STIPPLE_CNTL | ||
537 | 0x00000000, // PA_SU_LINE_STIPPLE_SCALE | ||
538 | 0x00000000, // PA_SU_PRIM_FILTER_CNTL | ||
539 | 0, // HOLE | ||
540 | 0, // HOLE | ||
541 | 0, // HOLE | ||
542 | 0, // HOLE | ||
543 | 0, // HOLE | ||
544 | 0, // HOLE | ||
545 | 0, // HOLE | ||
546 | 0, // HOLE | ||
547 | 0, // HOLE | ||
548 | 0, // HOLE | ||
549 | 0, // HOLE | ||
550 | 0, // HOLE | ||
551 | 0, // HOLE | ||
552 | 0, // HOLE | ||
553 | 0, // HOLE | ||
554 | 0, // HOLE | ||
555 | 0, // HOLE | ||
556 | 0, // HOLE | ||
557 | 0, // HOLE | ||
558 | 0, // HOLE | ||
559 | 0, // HOLE | ||
560 | 0, // HOLE | ||
561 | 0, // HOLE | ||
562 | 0, // HOLE | ||
563 | 0, // HOLE | ||
564 | 0, // HOLE | ||
565 | 0, // HOLE | ||
566 | 0, // HOLE | ||
567 | 0, // HOLE | ||
568 | 0, // HOLE | ||
569 | 0, // HOLE | ||
570 | 0, // HOLE | ||
571 | 0, // HOLE | ||
572 | 0, // HOLE | ||
573 | 0, // HOLE | ||
574 | 0, // HOLE | ||
575 | 0, // HOLE | ||
576 | 0, // HOLE | ||
577 | 0, // HOLE | ||
578 | 0, // HOLE | ||
579 | 0, // HOLE | ||
580 | 0, // HOLE | ||
581 | 0, // HOLE | ||
582 | 0, // HOLE | ||
583 | 0, // HOLE | ||
584 | 0, // HOLE | ||
585 | 0, // HOLE | ||
586 | 0, // HOLE | ||
587 | 0, // HOLE | ||
588 | 0, // HOLE | ||
589 | 0, // HOLE | ||
590 | 0, // HOLE | ||
591 | 0, // HOLE | ||
592 | 0, // HOLE | ||
593 | 0, // HOLE | ||
594 | 0, // HOLE | ||
595 | 0, // HOLE | ||
596 | 0, // HOLE | ||
597 | 0, // HOLE | ||
598 | 0, // HOLE | ||
599 | 0, // HOLE | ||
600 | 0, // HOLE | ||
601 | 0, // HOLE | ||
602 | 0, // HOLE | ||
603 | 0, // HOLE | ||
604 | 0, // HOLE | ||
605 | 0, // HOLE | ||
606 | 0, // HOLE | ||
607 | 0, // HOLE | ||
608 | 0, // HOLE | ||
609 | 0, // HOLE | ||
610 | 0, // HOLE | ||
611 | 0, // HOLE | ||
612 | 0, // HOLE | ||
613 | 0, // HOLE | ||
614 | 0, // HOLE | ||
615 | 0, // HOLE | ||
616 | 0, // HOLE | ||
617 | 0, // HOLE | ||
618 | 0, // HOLE | ||
619 | 0, // HOLE | ||
620 | 0, // HOLE | ||
621 | 0, // HOLE | ||
622 | 0, // HOLE | ||
623 | 0, // HOLE | ||
624 | 0, // HOLE | ||
625 | 0, // HOLE | ||
626 | 0, // HOLE | ||
627 | 0, // HOLE | ||
628 | 0, // HOLE | ||
629 | 0, // HOLE | ||
630 | 0, // HOLE | ||
631 | 0, // HOLE | ||
632 | 0, // HOLE | ||
633 | 0, // HOLE | ||
634 | 0, // HOLE | ||
635 | 0, // HOLE | ||
636 | 0, // HOLE | ||
637 | 0, // HOLE | ||
638 | 0, // HOLE | ||
639 | 0, // HOLE | ||
640 | 0, // HOLE | ||
641 | 0, // HOLE | ||
642 | 0, // HOLE | ||
643 | 0, // HOLE | ||
644 | 0, // HOLE | ||
645 | 0, // HOLE | ||
646 | 0, // HOLE | ||
647 | 0, // HOLE | ||
648 | 0, // HOLE | ||
649 | 0, // HOLE | ||
650 | 0, // HOLE | ||
651 | 0, // HOLE | ||
652 | 0, // HOLE | ||
653 | 0, // HOLE | ||
654 | 0, // HOLE | ||
655 | 0x00000000, // PA_SU_POINT_SIZE | ||
656 | 0x00000000, // PA_SU_POINT_MINMAX | ||
657 | 0x00000000, // PA_SU_LINE_CNTL | ||
658 | 0x00000000, // PA_SC_LINE_STIPPLE | ||
659 | 0x00000000, // VGT_OUTPUT_PATH_CNTL | ||
660 | 0x00000000, // VGT_HOS_CNTL | ||
661 | 0x00000000, // VGT_HOS_MAX_TESS_LEVEL | ||
662 | 0x00000000, // VGT_HOS_MIN_TESS_LEVEL | ||
663 | 0x00000000, // VGT_HOS_REUSE_DEPTH | ||
664 | 0x00000000, // VGT_GROUP_PRIM_TYPE | ||
665 | 0x00000000, // VGT_GROUP_FIRST_DECR | ||
666 | 0x00000000, // VGT_GROUP_DECR | ||
667 | 0x00000000, // VGT_GROUP_VECT_0_CNTL | ||
668 | 0x00000000, // VGT_GROUP_VECT_1_CNTL | ||
669 | 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL | ||
670 | 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL | ||
671 | 0x00000000, // VGT_GS_MODE | ||
672 | 0x00000000, // VGT_GS_ONCHIP_CNTL | ||
673 | 0x00000000, // PA_SC_MODE_CNTL_0 | ||
674 | 0x00000000, // PA_SC_MODE_CNTL_1 | ||
675 | 0x00000000, // VGT_ENHANCE | ||
676 | 0x00000100, // VGT_GS_PER_ES | ||
677 | 0x00000080, // VGT_ES_PER_GS | ||
678 | 0x00000002, // VGT_GS_PER_VS | ||
679 | 0x00000000, // VGT_GSVS_RING_OFFSET_1 | ||
680 | 0x00000000, // VGT_GSVS_RING_OFFSET_2 | ||
681 | 0x00000000, // VGT_GSVS_RING_OFFSET_3 | ||
682 | 0x00000000, // VGT_GS_OUT_PRIM_TYPE | ||
683 | 0x00000000, // IA_ENHANCE | ||
684 | }; | ||
685 | static const unsigned int vi_SECT_CONTEXT_def_5[] = | ||
686 | { | ||
687 | 0x00000000, // WD_ENHANCE | ||
688 | 0x00000000, // VGT_PRIMITIVEID_EN | ||
689 | }; | ||
690 | static const unsigned int vi_SECT_CONTEXT_def_6[] = | ||
691 | { | ||
692 | 0x00000000, // VGT_PRIMITIVEID_RESET | ||
693 | }; | ||
694 | static const unsigned int vi_SECT_CONTEXT_def_7[] = | ||
695 | { | ||
696 | 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN | ||
697 | 0, // HOLE | ||
698 | 0, // HOLE | ||
699 | 0x00000000, // VGT_INSTANCE_STEP_RATE_0 | ||
700 | 0x00000000, // VGT_INSTANCE_STEP_RATE_1 | ||
701 | 0x000000ff, // IA_MULTI_VGT_PARAM | ||
702 | 0x00000000, // VGT_ESGS_RING_ITEMSIZE | ||
703 | 0x00000000, // VGT_GSVS_RING_ITEMSIZE | ||
704 | 0x00000000, // VGT_REUSE_OFF | ||
705 | 0x00000000, // VGT_VTX_CNT_EN | ||
706 | 0x00000000, // DB_HTILE_SURFACE | ||
707 | 0x00000000, // DB_SRESULTS_COMPARE_STATE0 | ||
708 | 0x00000000, // DB_SRESULTS_COMPARE_STATE1 | ||
709 | 0x00000000, // DB_PRELOAD_CONTROL | ||
710 | 0, // HOLE | ||
711 | 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0 | ||
712 | 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0 | ||
713 | 0, // HOLE | ||
714 | 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0 | ||
715 | 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1 | ||
716 | 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1 | ||
717 | 0, // HOLE | ||
718 | 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1 | ||
719 | 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2 | ||
720 | 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2 | ||
721 | 0, // HOLE | ||
722 | 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2 | ||
723 | 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3 | ||
724 | 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3 | ||
725 | 0, // HOLE | ||
726 | 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3 | ||
727 | 0, // HOLE | ||
728 | 0, // HOLE | ||
729 | 0, // HOLE | ||
730 | 0, // HOLE | ||
731 | 0, // HOLE | ||
732 | 0, // HOLE | ||
733 | 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET | ||
734 | 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE | ||
735 | 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE | ||
736 | 0, // HOLE | ||
737 | 0x00000000, // VGT_GS_MAX_VERT_OUT | ||
738 | 0, // HOLE | ||
739 | 0, // HOLE | ||
740 | 0, // HOLE | ||
741 | 0, // HOLE | ||
742 | 0, // HOLE | ||
743 | 0x00000000, // VGT_TESS_DISTRIBUTION | ||
744 | 0x00000000, // VGT_SHADER_STAGES_EN | ||
745 | 0x00000000, // VGT_LS_HS_CONFIG | ||
746 | 0x00000000, // VGT_GS_VERT_ITEMSIZE | ||
747 | 0x00000000, // VGT_GS_VERT_ITEMSIZE_1 | ||
748 | 0x00000000, // VGT_GS_VERT_ITEMSIZE_2 | ||
749 | 0x00000000, // VGT_GS_VERT_ITEMSIZE_3 | ||
750 | 0x00000000, // VGT_TF_PARAM | ||
751 | 0x00000000, // DB_ALPHA_TO_MASK | ||
752 | 0x00000000, // VGT_DISPATCH_DRAW_INDEX | ||
753 | 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL | ||
754 | 0x00000000, // PA_SU_POLY_OFFSET_CLAMP | ||
755 | 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE | ||
756 | 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET | ||
757 | 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE | ||
758 | 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET | ||
759 | 0x00000000, // VGT_GS_INSTANCE_CNT | ||
760 | 0x00000000, // VGT_STRMOUT_CONFIG | ||
761 | 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG | ||
762 | 0, // HOLE | ||
763 | 0, // HOLE | ||
764 | 0, // HOLE | ||
765 | 0, // HOLE | ||
766 | 0, // HOLE | ||
767 | 0, // HOLE | ||
768 | 0, // HOLE | ||
769 | 0, // HOLE | ||
770 | 0, // HOLE | ||
771 | 0, // HOLE | ||
772 | 0, // HOLE | ||
773 | 0, // HOLE | ||
774 | 0, // HOLE | ||
775 | 0, // HOLE | ||
776 | 0x00000000, // PA_SC_CENTROID_PRIORITY_0 | ||
777 | 0x00000000, // PA_SC_CENTROID_PRIORITY_1 | ||
778 | 0x00001000, // PA_SC_LINE_CNTL | ||
779 | 0x00000000, // PA_SC_AA_CONFIG | ||
780 | 0x00000005, // PA_SU_VTX_CNTL | ||
781 | 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ | ||
782 | 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ | ||
783 | 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ | ||
784 | 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ | ||
785 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 | ||
786 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1 | ||
787 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2 | ||
788 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3 | ||
789 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0 | ||
790 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1 | ||
791 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2 | ||
792 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3 | ||
793 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0 | ||
794 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1 | ||
795 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2 | ||
796 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3 | ||
797 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0 | ||
798 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1 | ||
799 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2 | ||
800 | 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3 | ||
801 | 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0 | ||
802 | 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1 | ||
803 | 0, // HOLE | ||
804 | 0, // HOLE | ||
805 | 0, // HOLE | ||
806 | 0, // HOLE | ||
807 | 0, // HOLE | ||
808 | 0, // HOLE | ||
809 | 0x0000001e, // VGT_VERTEX_REUSE_BLOCK_CNTL | ||
810 | 0x00000020, // VGT_OUT_DEALLOC_CNTL | ||
811 | 0x00000000, // CB_COLOR0_BASE | ||
812 | 0x00000000, // CB_COLOR0_PITCH | ||
813 | 0x00000000, // CB_COLOR0_SLICE | ||
814 | 0x00000000, // CB_COLOR0_VIEW | ||
815 | 0x00000000, // CB_COLOR0_INFO | ||
816 | 0x00000000, // CB_COLOR0_ATTRIB | ||
817 | 0x00000000, // CB_COLOR0_DCC_CONTROL | ||
818 | 0x00000000, // CB_COLOR0_CMASK | ||
819 | 0x00000000, // CB_COLOR0_CMASK_SLICE | ||
820 | 0x00000000, // CB_COLOR0_FMASK | ||
821 | 0x00000000, // CB_COLOR0_FMASK_SLICE | ||
822 | 0x00000000, // CB_COLOR0_CLEAR_WORD0 | ||
823 | 0x00000000, // CB_COLOR0_CLEAR_WORD1 | ||
824 | 0x00000000, // CB_COLOR0_DCC_BASE | ||
825 | 0, // HOLE | ||
826 | 0x00000000, // CB_COLOR1_BASE | ||
827 | 0x00000000, // CB_COLOR1_PITCH | ||
828 | 0x00000000, // CB_COLOR1_SLICE | ||
829 | 0x00000000, // CB_COLOR1_VIEW | ||
830 | 0x00000000, // CB_COLOR1_INFO | ||
831 | 0x00000000, // CB_COLOR1_ATTRIB | ||
832 | 0x00000000, // CB_COLOR1_DCC_CONTROL | ||
833 | 0x00000000, // CB_COLOR1_CMASK | ||
834 | 0x00000000, // CB_COLOR1_CMASK_SLICE | ||
835 | 0x00000000, // CB_COLOR1_FMASK | ||
836 | 0x00000000, // CB_COLOR1_FMASK_SLICE | ||
837 | 0x00000000, // CB_COLOR1_CLEAR_WORD0 | ||
838 | 0x00000000, // CB_COLOR1_CLEAR_WORD1 | ||
839 | 0x00000000, // CB_COLOR1_DCC_BASE | ||
840 | 0, // HOLE | ||
841 | 0x00000000, // CB_COLOR2_BASE | ||
842 | 0x00000000, // CB_COLOR2_PITCH | ||
843 | 0x00000000, // CB_COLOR2_SLICE | ||
844 | 0x00000000, // CB_COLOR2_VIEW | ||
845 | 0x00000000, // CB_COLOR2_INFO | ||
846 | 0x00000000, // CB_COLOR2_ATTRIB | ||
847 | 0x00000000, // CB_COLOR2_DCC_CONTROL | ||
848 | 0x00000000, // CB_COLOR2_CMASK | ||
849 | 0x00000000, // CB_COLOR2_CMASK_SLICE | ||
850 | 0x00000000, // CB_COLOR2_FMASK | ||
851 | 0x00000000, // CB_COLOR2_FMASK_SLICE | ||
852 | 0x00000000, // CB_COLOR2_CLEAR_WORD0 | ||
853 | 0x00000000, // CB_COLOR2_CLEAR_WORD1 | ||
854 | 0x00000000, // CB_COLOR2_DCC_BASE | ||
855 | 0, // HOLE | ||
856 | 0x00000000, // CB_COLOR3_BASE | ||
857 | 0x00000000, // CB_COLOR3_PITCH | ||
858 | 0x00000000, // CB_COLOR3_SLICE | ||
859 | 0x00000000, // CB_COLOR3_VIEW | ||
860 | 0x00000000, // CB_COLOR3_INFO | ||
861 | 0x00000000, // CB_COLOR3_ATTRIB | ||
862 | 0x00000000, // CB_COLOR3_DCC_CONTROL | ||
863 | 0x00000000, // CB_COLOR3_CMASK | ||
864 | 0x00000000, // CB_COLOR3_CMASK_SLICE | ||
865 | 0x00000000, // CB_COLOR3_FMASK | ||
866 | 0x00000000, // CB_COLOR3_FMASK_SLICE | ||
867 | 0x00000000, // CB_COLOR3_CLEAR_WORD0 | ||
868 | 0x00000000, // CB_COLOR3_CLEAR_WORD1 | ||
869 | 0x00000000, // CB_COLOR3_DCC_BASE | ||
870 | 0, // HOLE | ||
871 | 0x00000000, // CB_COLOR4_BASE | ||
872 | 0x00000000, // CB_COLOR4_PITCH | ||
873 | 0x00000000, // CB_COLOR4_SLICE | ||
874 | 0x00000000, // CB_COLOR4_VIEW | ||
875 | 0x00000000, // CB_COLOR4_INFO | ||
876 | 0x00000000, // CB_COLOR4_ATTRIB | ||
877 | 0x00000000, // CB_COLOR4_DCC_CONTROL | ||
878 | 0x00000000, // CB_COLOR4_CMASK | ||
879 | 0x00000000, // CB_COLOR4_CMASK_SLICE | ||
880 | 0x00000000, // CB_COLOR4_FMASK | ||
881 | 0x00000000, // CB_COLOR4_FMASK_SLICE | ||
882 | 0x00000000, // CB_COLOR4_CLEAR_WORD0 | ||
883 | 0x00000000, // CB_COLOR4_CLEAR_WORD1 | ||
884 | 0x00000000, // CB_COLOR4_DCC_BASE | ||
885 | 0, // HOLE | ||
886 | 0x00000000, // CB_COLOR5_BASE | ||
887 | 0x00000000, // CB_COLOR5_PITCH | ||
888 | 0x00000000, // CB_COLOR5_SLICE | ||
889 | 0x00000000, // CB_COLOR5_VIEW | ||
890 | 0x00000000, // CB_COLOR5_INFO | ||
891 | 0x00000000, // CB_COLOR5_ATTRIB | ||
892 | 0x00000000, // CB_COLOR5_DCC_CONTROL | ||
893 | 0x00000000, // CB_COLOR5_CMASK | ||
894 | 0x00000000, // CB_COLOR5_CMASK_SLICE | ||
895 | 0x00000000, // CB_COLOR5_FMASK | ||
896 | 0x00000000, // CB_COLOR5_FMASK_SLICE | ||
897 | 0x00000000, // CB_COLOR5_CLEAR_WORD0 | ||
898 | 0x00000000, // CB_COLOR5_CLEAR_WORD1 | ||
899 | 0x00000000, // CB_COLOR5_DCC_BASE | ||
900 | 0, // HOLE | ||
901 | 0x00000000, // CB_COLOR6_BASE | ||
902 | 0x00000000, // CB_COLOR6_PITCH | ||
903 | 0x00000000, // CB_COLOR6_SLICE | ||
904 | 0x00000000, // CB_COLOR6_VIEW | ||
905 | 0x00000000, // CB_COLOR6_INFO | ||
906 | 0x00000000, // CB_COLOR6_ATTRIB | ||
907 | 0x00000000, // CB_COLOR6_DCC_CONTROL | ||
908 | 0x00000000, // CB_COLOR6_CMASK | ||
909 | 0x00000000, // CB_COLOR6_CMASK_SLICE | ||
910 | 0x00000000, // CB_COLOR6_FMASK | ||
911 | 0x00000000, // CB_COLOR6_FMASK_SLICE | ||
912 | 0x00000000, // CB_COLOR6_CLEAR_WORD0 | ||
913 | 0x00000000, // CB_COLOR6_CLEAR_WORD1 | ||
914 | 0x00000000, // CB_COLOR6_DCC_BASE | ||
915 | 0, // HOLE | ||
916 | 0x00000000, // CB_COLOR7_BASE | ||
917 | 0x00000000, // CB_COLOR7_PITCH | ||
918 | 0x00000000, // CB_COLOR7_SLICE | ||
919 | 0x00000000, // CB_COLOR7_VIEW | ||
920 | 0x00000000, // CB_COLOR7_INFO | ||
921 | 0x00000000, // CB_COLOR7_ATTRIB | ||
922 | 0x00000000, // CB_COLOR7_DCC_CONTROL | ||
923 | 0x00000000, // CB_COLOR7_CMASK | ||
924 | 0x00000000, // CB_COLOR7_CMASK_SLICE | ||
925 | 0x00000000, // CB_COLOR7_FMASK | ||
926 | 0x00000000, // CB_COLOR7_FMASK_SLICE | ||
927 | 0x00000000, // CB_COLOR7_CLEAR_WORD0 | ||
928 | 0x00000000, // CB_COLOR7_CLEAR_WORD1 | ||
929 | }; | ||
930 | static const struct cs_extent_def vi_SECT_CONTEXT_defs[] = | ||
931 | { | ||
932 | {vi_SECT_CONTEXT_def_1, 0x0000a000, 212 }, | ||
933 | {vi_SECT_CONTEXT_def_2, 0x0000a0d6, 274 }, | ||
934 | {vi_SECT_CONTEXT_def_3, 0x0000a1f5, 6 }, | ||
935 | {vi_SECT_CONTEXT_def_4, 0x0000a200, 157 }, | ||
936 | {vi_SECT_CONTEXT_def_5, 0x0000a2a0, 2 }, | ||
937 | {vi_SECT_CONTEXT_def_6, 0x0000a2a3, 1 }, | ||
938 | {vi_SECT_CONTEXT_def_7, 0x0000a2a5, 233 }, | ||
939 | { 0, 0, 0 } | ||
940 | }; | ||
941 | static const struct cs_section_def vi_cs_data[] = { | ||
942 | { vi_SECT_CONTEXT_defs, SECT_CONTEXT }, | ||
943 | { 0, SECT_NONE } | ||
944 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c new file mode 100644 index 000000000000..b5c8485d8a58 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -0,0 +1,1712 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include <linux/seq_file.h> | ||
26 | #include "drmP.h" | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_pm.h" | ||
29 | #include "amdgpu_atombios.h" | ||
30 | #include "vid.h" | ||
31 | #include "vi_dpm.h" | ||
32 | #include "amdgpu_dpm.h" | ||
33 | #include "cz_dpm.h" | ||
34 | #include "cz_ppsmc.h" | ||
35 | #include "atom.h" | ||
36 | |||
37 | #include "smu/smu_8_0_d.h" | ||
38 | #include "smu/smu_8_0_sh_mask.h" | ||
39 | #include "gca/gfx_8_0_d.h" | ||
40 | #include "gca/gfx_8_0_sh_mask.h" | ||
41 | #include "gmc/gmc_8_1_d.h" | ||
42 | #include "bif/bif_5_1_d.h" | ||
43 | #include "gfx_v8_0.h" | ||
44 | |||
45 | static struct cz_ps *cz_get_ps(struct amdgpu_ps *rps) | ||
46 | { | ||
47 | struct cz_ps *ps = rps->ps_priv; | ||
48 | |||
49 | return ps; | ||
50 | } | ||
51 | |||
52 | static struct cz_power_info *cz_get_pi(struct amdgpu_device *adev) | ||
53 | { | ||
54 | struct cz_power_info *pi = adev->pm.dpm.priv; | ||
55 | |||
56 | return pi; | ||
57 | } | ||
58 | |||
59 | static uint16_t cz_convert_8bit_index_to_voltage(struct amdgpu_device *adev, | ||
60 | uint16_t voltage) | ||
61 | { | ||
62 | uint16_t tmp = 6200 - voltage * 25; | ||
63 | |||
64 | return tmp; | ||
65 | } | ||
66 | |||
67 | static void cz_construct_max_power_limits_table(struct amdgpu_device *adev, | ||
68 | struct amdgpu_clock_and_voltage_limits *table) | ||
69 | { | ||
70 | struct cz_power_info *pi = cz_get_pi(adev); | ||
71 | struct amdgpu_clock_voltage_dependency_table *dep_table = | ||
72 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
73 | |||
74 | if (dep_table->count > 0) { | ||
75 | table->sclk = dep_table->entries[dep_table->count - 1].clk; | ||
76 | table->vddc = cz_convert_8bit_index_to_voltage(adev, | ||
77 | dep_table->entries[dep_table->count - 1].v); | ||
78 | } | ||
79 | |||
80 | table->mclk = pi->sys_info.nbp_memory_clock[0]; | ||
81 | |||
82 | } | ||
83 | |||
84 | union igp_info { | ||
85 | struct _ATOM_INTEGRATED_SYSTEM_INFO info; | ||
86 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; | ||
87 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; | ||
88 | struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9; | ||
89 | }; | ||
90 | |||
91 | static int cz_parse_sys_info_table(struct amdgpu_device *adev) | ||
92 | { | ||
93 | struct cz_power_info *pi = cz_get_pi(adev); | ||
94 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
95 | int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); | ||
96 | union igp_info *igp_info; | ||
97 | u8 frev, crev; | ||
98 | u16 data_offset; | ||
99 | int i = 0; | ||
100 | |||
101 | if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
102 | &frev, &crev, &data_offset)) { | ||
103 | igp_info = (union igp_info *)(mode_info->atom_context->bios + | ||
104 | data_offset); | ||
105 | |||
106 | if (crev != 9) { | ||
107 | DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); | ||
108 | return -EINVAL; | ||
109 | } | ||
110 | pi->sys_info.bootup_sclk = | ||
111 | le32_to_cpu(igp_info->info_9.ulBootUpEngineClock); | ||
112 | pi->sys_info.bootup_uma_clk = | ||
113 | le32_to_cpu(igp_info->info_9.ulBootUpUMAClock); | ||
114 | pi->sys_info.dentist_vco_freq = | ||
115 | le32_to_cpu(igp_info->info_9.ulDentistVCOFreq); | ||
116 | pi->sys_info.bootup_nb_voltage_index = | ||
117 | le16_to_cpu(igp_info->info_9.usBootUpNBVoltage); | ||
118 | |||
119 | if (igp_info->info_9.ucHtcTmpLmt == 0) | ||
120 | pi->sys_info.htc_tmp_lmt = 203; | ||
121 | else | ||
122 | pi->sys_info.htc_tmp_lmt = igp_info->info_9.ucHtcTmpLmt; | ||
123 | |||
124 | if (igp_info->info_9.ucHtcHystLmt == 0) | ||
125 | pi->sys_info.htc_hyst_lmt = 5; | ||
126 | else | ||
127 | pi->sys_info.htc_hyst_lmt = igp_info->info_9.ucHtcHystLmt; | ||
128 | |||
129 | if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { | ||
130 | DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); | ||
131 | return -EINVAL; | ||
132 | } | ||
133 | |||
134 | if (le32_to_cpu(igp_info->info_9.ulSystemConfig) & (1 << 3) && | ||
135 | pi->enable_nb_ps_policy) | ||
136 | pi->sys_info.nb_dpm_enable = true; | ||
137 | else | ||
138 | pi->sys_info.nb_dpm_enable = false; | ||
139 | |||
140 | for (i = 0; i < CZ_NUM_NBPSTATES; i++) { | ||
141 | if (i < CZ_NUM_NBPMEMORY_CLOCK) | ||
142 | pi->sys_info.nbp_memory_clock[i] = | ||
143 | le32_to_cpu(igp_info->info_9.ulNbpStateMemclkFreq[i]); | ||
144 | pi->sys_info.nbp_n_clock[i] = | ||
145 | le32_to_cpu(igp_info->info_9.ulNbpStateNClkFreq[i]); | ||
146 | } | ||
147 | |||
148 | for (i = 0; i < CZ_MAX_DISPLAY_CLOCK_LEVEL; i++) | ||
149 | pi->sys_info.display_clock[i] = | ||
150 | le32_to_cpu(igp_info->info_9.sDispClkVoltageMapping[i].ulMaximumSupportedCLK); | ||
151 | |||
152 | for (i = 0; i < CZ_NUM_NBPSTATES; i++) | ||
153 | pi->sys_info.nbp_voltage_index[i] = | ||
154 | le32_to_cpu(igp_info->info_9.usNBPStateVoltage[i]); | ||
155 | |||
156 | if (le32_to_cpu(igp_info->info_9.ulGPUCapInfo) & | ||
157 | SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) | ||
158 | pi->caps_enable_dfs_bypass = true; | ||
159 | |||
160 | pi->sys_info.uma_channel_number = | ||
161 | igp_info->info_9.ucUMAChannelNumber; | ||
162 | |||
163 | cz_construct_max_power_limits_table(adev, | ||
164 | &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); | ||
165 | } | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static void cz_patch_voltage_values(struct amdgpu_device *adev) | ||
171 | { | ||
172 | int i; | ||
173 | struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = | ||
174 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | ||
175 | struct amdgpu_vce_clock_voltage_dependency_table *vce_table = | ||
176 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | ||
177 | struct amdgpu_clock_voltage_dependency_table *acp_table = | ||
178 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
179 | |||
180 | if (uvd_table->count) { | ||
181 | for (i = 0; i < uvd_table->count; i++) | ||
182 | uvd_table->entries[i].v = | ||
183 | cz_convert_8bit_index_to_voltage(adev, | ||
184 | uvd_table->entries[i].v); | ||
185 | } | ||
186 | |||
187 | if (vce_table->count) { | ||
188 | for (i = 0; i < vce_table->count; i++) | ||
189 | vce_table->entries[i].v = | ||
190 | cz_convert_8bit_index_to_voltage(adev, | ||
191 | vce_table->entries[i].v); | ||
192 | } | ||
193 | |||
194 | if (acp_table->count) { | ||
195 | for (i = 0; i < acp_table->count; i++) | ||
196 | acp_table->entries[i].v = | ||
197 | cz_convert_8bit_index_to_voltage(adev, | ||
198 | acp_table->entries[i].v); | ||
199 | } | ||
200 | |||
201 | } | ||
202 | |||
203 | static void cz_construct_boot_state(struct amdgpu_device *adev) | ||
204 | { | ||
205 | struct cz_power_info *pi = cz_get_pi(adev); | ||
206 | |||
207 | pi->boot_pl.sclk = pi->sys_info.bootup_sclk; | ||
208 | pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; | ||
209 | pi->boot_pl.ds_divider_index = 0; | ||
210 | pi->boot_pl.ss_divider_index = 0; | ||
211 | pi->boot_pl.allow_gnb_slow = 1; | ||
212 | pi->boot_pl.force_nbp_state = 0; | ||
213 | pi->boot_pl.display_wm = 0; | ||
214 | pi->boot_pl.vce_wm = 0; | ||
215 | |||
216 | } | ||
217 | |||
218 | static void cz_patch_boot_state(struct amdgpu_device *adev, | ||
219 | struct cz_ps *ps) | ||
220 | { | ||
221 | struct cz_power_info *pi = cz_get_pi(adev); | ||
222 | |||
223 | ps->num_levels = 1; | ||
224 | ps->levels[0] = pi->boot_pl; | ||
225 | } | ||
226 | |||
227 | union pplib_clock_info { | ||
228 | struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; | ||
229 | struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; | ||
230 | struct _ATOM_PPLIB_CZ_CLOCK_INFO carrizo; | ||
231 | }; | ||
232 | |||
233 | static void cz_parse_pplib_clock_info(struct amdgpu_device *adev, | ||
234 | struct amdgpu_ps *rps, int index, | ||
235 | union pplib_clock_info *clock_info) | ||
236 | { | ||
237 | struct cz_power_info *pi = cz_get_pi(adev); | ||
238 | struct cz_ps *ps = cz_get_ps(rps); | ||
239 | struct cz_pl *pl = &ps->levels[index]; | ||
240 | struct amdgpu_clock_voltage_dependency_table *table = | ||
241 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
242 | |||
243 | pl->sclk = table->entries[clock_info->carrizo.index].clk; | ||
244 | pl->vddc_index = table->entries[clock_info->carrizo.index].v; | ||
245 | |||
246 | ps->num_levels = index + 1; | ||
247 | |||
248 | if (pi->caps_sclk_ds) { | ||
249 | pl->ds_divider_index = 5; | ||
250 | pl->ss_divider_index = 5; | ||
251 | } | ||
252 | |||
253 | } | ||
254 | |||
255 | static void cz_parse_pplib_non_clock_info(struct amdgpu_device *adev, | ||
256 | struct amdgpu_ps *rps, | ||
257 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, | ||
258 | u8 table_rev) | ||
259 | { | ||
260 | struct cz_ps *ps = cz_get_ps(rps); | ||
261 | |||
262 | rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); | ||
263 | rps->class = le16_to_cpu(non_clock_info->usClassification); | ||
264 | rps->class2 = le16_to_cpu(non_clock_info->usClassification2); | ||
265 | |||
266 | if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { | ||
267 | rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); | ||
268 | rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); | ||
269 | } else { | ||
270 | rps->vclk = 0; | ||
271 | rps->dclk = 0; | ||
272 | } | ||
273 | |||
274 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { | ||
275 | adev->pm.dpm.boot_ps = rps; | ||
276 | cz_patch_boot_state(adev, ps); | ||
277 | } | ||
278 | if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) | ||
279 | adev->pm.dpm.uvd_ps = rps; | ||
280 | |||
281 | } | ||
282 | |||
283 | union power_info { | ||
284 | struct _ATOM_PPLIB_POWERPLAYTABLE pplib; | ||
285 | struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; | ||
286 | struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; | ||
287 | struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; | ||
288 | struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; | ||
289 | }; | ||
290 | |||
291 | union pplib_power_state { | ||
292 | struct _ATOM_PPLIB_STATE v1; | ||
293 | struct _ATOM_PPLIB_STATE_V2 v2; | ||
294 | }; | ||
295 | |||
296 | static int cz_parse_power_table(struct amdgpu_device *adev) | ||
297 | { | ||
298 | struct amdgpu_mode_info *mode_info = &adev->mode_info; | ||
299 | struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; | ||
300 | union pplib_power_state *power_state; | ||
301 | int i, j, k, non_clock_array_index, clock_array_index; | ||
302 | union pplib_clock_info *clock_info; | ||
303 | struct _StateArray *state_array; | ||
304 | struct _ClockInfoArray *clock_info_array; | ||
305 | struct _NonClockInfoArray *non_clock_info_array; | ||
306 | union power_info *power_info; | ||
307 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); | ||
308 | u16 data_offset; | ||
309 | u8 frev, crev; | ||
310 | u8 *power_state_offset; | ||
311 | struct cz_ps *ps; | ||
312 | |||
313 | if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, | ||
314 | &frev, &crev, &data_offset)) | ||
315 | return -EINVAL; | ||
316 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); | ||
317 | |||
318 | state_array = (struct _StateArray *) | ||
319 | (mode_info->atom_context->bios + data_offset + | ||
320 | le16_to_cpu(power_info->pplib.usStateArrayOffset)); | ||
321 | clock_info_array = (struct _ClockInfoArray *) | ||
322 | (mode_info->atom_context->bios + data_offset + | ||
323 | le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); | ||
324 | non_clock_info_array = (struct _NonClockInfoArray *) | ||
325 | (mode_info->atom_context->bios + data_offset + | ||
326 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); | ||
327 | |||
328 | adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) * | ||
329 | state_array->ucNumEntries, GFP_KERNEL); | ||
330 | |||
331 | if (!adev->pm.dpm.ps) | ||
332 | return -ENOMEM; | ||
333 | |||
334 | power_state_offset = (u8 *)state_array->states; | ||
335 | adev->pm.dpm.platform_caps = | ||
336 | le32_to_cpu(power_info->pplib.ulPlatformCaps); | ||
337 | adev->pm.dpm.backbias_response_time = | ||
338 | le16_to_cpu(power_info->pplib.usBackbiasTime); | ||
339 | adev->pm.dpm.voltage_response_time = | ||
340 | le16_to_cpu(power_info->pplib.usVoltageTime); | ||
341 | |||
342 | for (i = 0; i < state_array->ucNumEntries; i++) { | ||
343 | power_state = (union pplib_power_state *)power_state_offset; | ||
344 | non_clock_array_index = power_state->v2.nonClockInfoIndex; | ||
345 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) | ||
346 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; | ||
347 | |||
348 | ps = kzalloc(sizeof(struct cz_ps), GFP_KERNEL); | ||
349 | if (ps == NULL) { | ||
350 | kfree(adev->pm.dpm.ps); | ||
351 | return -ENOMEM; | ||
352 | } | ||
353 | |||
354 | adev->pm.dpm.ps[i].ps_priv = ps; | ||
355 | k = 0; | ||
356 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { | ||
357 | clock_array_index = power_state->v2.clockInfoIndex[j]; | ||
358 | if (clock_array_index >= clock_info_array->ucNumEntries) | ||
359 | continue; | ||
360 | if (k >= CZ_MAX_HARDWARE_POWERLEVELS) | ||
361 | break; | ||
362 | clock_info = (union pplib_clock_info *) | ||
363 | &clock_info_array->clockInfo[clock_array_index * | ||
364 | clock_info_array->ucEntrySize]; | ||
365 | cz_parse_pplib_clock_info(adev, &adev->pm.dpm.ps[i], | ||
366 | k, clock_info); | ||
367 | k++; | ||
368 | } | ||
369 | cz_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], | ||
370 | non_clock_info, | ||
371 | non_clock_info_array->ucEntrySize); | ||
372 | power_state_offset += 2 + power_state->v2.ucNumDPMLevels; | ||
373 | } | ||
374 | adev->pm.dpm.num_ps = state_array->ucNumEntries; | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int cz_process_firmware_header(struct amdgpu_device *adev) | ||
380 | { | ||
381 | struct cz_power_info *pi = cz_get_pi(adev); | ||
382 | u32 tmp; | ||
383 | int ret; | ||
384 | |||
385 | ret = cz_read_smc_sram_dword(adev, SMU8_FIRMWARE_HEADER_LOCATION + | ||
386 | offsetof(struct SMU8_Firmware_Header, | ||
387 | DpmTable), | ||
388 | &tmp, pi->sram_end); | ||
389 | |||
390 | if (ret == 0) | ||
391 | pi->dpm_table_start = tmp; | ||
392 | |||
393 | return ret; | ||
394 | } | ||
395 | |||
396 | static int cz_dpm_init(struct amdgpu_device *adev) | ||
397 | { | ||
398 | struct cz_power_info *pi; | ||
399 | int ret, i; | ||
400 | |||
401 | pi = kzalloc(sizeof(struct cz_power_info), GFP_KERNEL); | ||
402 | if (NULL == pi) | ||
403 | return -ENOMEM; | ||
404 | |||
405 | adev->pm.dpm.priv = pi; | ||
406 | |||
407 | ret = amdgpu_get_platform_caps(adev); | ||
408 | if (ret) | ||
409 | return ret; | ||
410 | |||
411 | ret = amdgpu_parse_extended_power_table(adev); | ||
412 | if (ret) | ||
413 | return ret; | ||
414 | |||
415 | pi->sram_end = SMC_RAM_END; | ||
416 | |||
417 | /* set up DPM defaults */ | ||
418 | for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) | ||
419 | pi->active_target[i] = CZ_AT_DFLT; | ||
420 | |||
421 | pi->mgcg_cgtt_local0 = 0x0; | ||
422 | pi->mgcg_cgtt_local1 = 0x0; | ||
423 | pi->clock_slow_down_step = 25000; | ||
424 | pi->skip_clock_slow_down = 1; | ||
425 | pi->enable_nb_ps_policy = 1; | ||
426 | pi->caps_power_containment = true; | ||
427 | pi->caps_cac = true; | ||
428 | pi->didt_enabled = false; | ||
429 | if (pi->didt_enabled) { | ||
430 | pi->caps_sq_ramping = true; | ||
431 | pi->caps_db_ramping = true; | ||
432 | pi->caps_td_ramping = true; | ||
433 | pi->caps_tcp_ramping = true; | ||
434 | } | ||
435 | pi->caps_sclk_ds = true; | ||
436 | pi->voting_clients = 0x00c00033; | ||
437 | pi->auto_thermal_throttling_enabled = true; | ||
438 | pi->bapm_enabled = false; | ||
439 | pi->disable_nb_ps3_in_battery = false; | ||
440 | pi->voltage_drop_threshold = 0; | ||
441 | pi->caps_sclk_throttle_low_notification = false; | ||
442 | pi->gfx_pg_threshold = 500; | ||
443 | pi->caps_fps = true; | ||
444 | /* uvd */ | ||
445 | pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; | ||
446 | pi->caps_uvd_dpm = true; | ||
447 | /* vce */ | ||
448 | pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; | ||
449 | pi->caps_vce_dpm = true; | ||
450 | /* acp */ | ||
451 | pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; | ||
452 | pi->caps_acp_dpm = true; | ||
453 | |||
454 | pi->caps_stable_power_state = false; | ||
455 | pi->nb_dpm_enabled_by_driver = true; | ||
456 | pi->nb_dpm_enabled = false; | ||
457 | pi->caps_voltage_island = false; | ||
458 | /* flags which indicate need to upload pptable */ | ||
459 | pi->need_pptable_upload = true; | ||
460 | |||
461 | ret = cz_parse_sys_info_table(adev); | ||
462 | if (ret) | ||
463 | return ret; | ||
464 | |||
465 | cz_patch_voltage_values(adev); | ||
466 | cz_construct_boot_state(adev); | ||
467 | |||
468 | ret = cz_parse_power_table(adev); | ||
469 | if (ret) | ||
470 | return ret; | ||
471 | |||
472 | ret = cz_process_firmware_header(adev); | ||
473 | if (ret) | ||
474 | return ret; | ||
475 | |||
476 | pi->dpm_enabled = true; | ||
477 | |||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | static void cz_dpm_fini(struct amdgpu_device *adev) | ||
482 | { | ||
483 | int i; | ||
484 | |||
485 | for (i = 0; i < adev->pm.dpm.num_ps; i++) | ||
486 | kfree(adev->pm.dpm.ps[i].ps_priv); | ||
487 | |||
488 | kfree(adev->pm.dpm.ps); | ||
489 | kfree(adev->pm.dpm.priv); | ||
490 | amdgpu_free_extended_power_table(adev); | ||
491 | } | ||
492 | |||
493 | static void | ||
494 | cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, | ||
495 | struct seq_file *m) | ||
496 | { | ||
497 | struct amdgpu_clock_voltage_dependency_table *table = | ||
498 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
499 | u32 current_index = | ||
500 | (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & | ||
501 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> | ||
502 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; | ||
503 | u32 sclk, tmp; | ||
504 | u16 vddc; | ||
505 | |||
506 | if (current_index >= NUM_SCLK_LEVELS) { | ||
507 | seq_printf(m, "invalid dpm profile %d\n", current_index); | ||
508 | } else { | ||
509 | sclk = table->entries[current_index].clk; | ||
510 | tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & | ||
511 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> | ||
512 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; | ||
513 | vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); | ||
514 | seq_printf(m, "power level %d sclk: %u vddc: %u\n", | ||
515 | current_index, sclk, vddc); | ||
516 | } | ||
517 | } | ||
518 | |||
519 | static void cz_dpm_print_power_state(struct amdgpu_device *adev, | ||
520 | struct amdgpu_ps *rps) | ||
521 | { | ||
522 | int i; | ||
523 | struct cz_ps *ps = cz_get_ps(rps); | ||
524 | |||
525 | amdgpu_dpm_print_class_info(rps->class, rps->class2); | ||
526 | amdgpu_dpm_print_cap_info(rps->caps); | ||
527 | |||
528 | DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); | ||
529 | for (i = 0; i < ps->num_levels; i++) { | ||
530 | struct cz_pl *pl = &ps->levels[i]; | ||
531 | |||
532 | DRM_INFO("\t\tpower level %d sclk: %u vddc: %u\n", | ||
533 | i, pl->sclk, | ||
534 | cz_convert_8bit_index_to_voltage(adev, pl->vddc_index)); | ||
535 | } | ||
536 | |||
537 | amdgpu_dpm_print_ps_status(adev, rps); | ||
538 | } | ||
539 | |||
540 | static void cz_dpm_set_funcs(struct amdgpu_device *adev); | ||
541 | |||
542 | static int cz_dpm_early_init(struct amdgpu_device *adev) | ||
543 | { | ||
544 | cz_dpm_set_funcs(adev); | ||
545 | |||
546 | return 0; | ||
547 | } | ||
548 | |||
549 | static int cz_dpm_sw_init(struct amdgpu_device *adev) | ||
550 | { | ||
551 | int ret = 0; | ||
552 | /* fix me to add thermal support TODO */ | ||
553 | |||
554 | /* default to balanced state */ | ||
555 | adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; | ||
556 | adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; | ||
557 | adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO; | ||
558 | adev->pm.default_sclk = adev->clock.default_sclk; | ||
559 | adev->pm.default_mclk = adev->clock.default_mclk; | ||
560 | adev->pm.current_sclk = adev->clock.default_sclk; | ||
561 | adev->pm.current_mclk = adev->clock.default_mclk; | ||
562 | adev->pm.int_thermal_type = THERMAL_TYPE_NONE; | ||
563 | |||
564 | if (amdgpu_dpm == 0) | ||
565 | return 0; | ||
566 | |||
567 | mutex_lock(&adev->pm.mutex); | ||
568 | ret = cz_dpm_init(adev); | ||
569 | if (ret) | ||
570 | goto dpm_init_failed; | ||
571 | |||
572 | adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; | ||
573 | if (amdgpu_dpm == 1) | ||
574 | amdgpu_pm_print_power_states(adev); | ||
575 | |||
576 | ret = amdgpu_pm_sysfs_init(adev); | ||
577 | if (ret) | ||
578 | goto dpm_init_failed; | ||
579 | |||
580 | mutex_unlock(&adev->pm.mutex); | ||
581 | DRM_INFO("amdgpu: dpm initialized\n"); | ||
582 | |||
583 | return 0; | ||
584 | |||
585 | dpm_init_failed: | ||
586 | cz_dpm_fini(adev); | ||
587 | mutex_unlock(&adev->pm.mutex); | ||
588 | DRM_ERROR("amdgpu: dpm initialization failed\n"); | ||
589 | |||
590 | return ret; | ||
591 | } | ||
592 | |||
593 | static int cz_dpm_sw_fini(struct amdgpu_device *adev) | ||
594 | { | ||
595 | mutex_lock(&adev->pm.mutex); | ||
596 | amdgpu_pm_sysfs_fini(adev); | ||
597 | cz_dpm_fini(adev); | ||
598 | mutex_unlock(&adev->pm.mutex); | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | static void cz_reset_ap_mask(struct amdgpu_device *adev) | ||
604 | { | ||
605 | struct cz_power_info *pi = cz_get_pi(adev); | ||
606 | |||
607 | pi->active_process_mask = 0; | ||
608 | |||
609 | } | ||
610 | |||
611 | static int cz_dpm_download_pptable_from_smu(struct amdgpu_device *adev, | ||
612 | void **table) | ||
613 | { | ||
614 | int ret = 0; | ||
615 | |||
616 | ret = cz_smu_download_pptable(adev, table); | ||
617 | |||
618 | return ret; | ||
619 | } | ||
620 | |||
621 | static int cz_dpm_upload_pptable_to_smu(struct amdgpu_device *adev) | ||
622 | { | ||
623 | struct cz_power_info *pi = cz_get_pi(adev); | ||
624 | struct SMU8_Fusion_ClkTable *clock_table; | ||
625 | struct atom_clock_dividers dividers; | ||
626 | void *table = NULL; | ||
627 | uint8_t i = 0; | ||
628 | int ret = 0; | ||
629 | |||
630 | struct amdgpu_clock_voltage_dependency_table *vddc_table = | ||
631 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
632 | struct amdgpu_clock_voltage_dependency_table *vddgfx_table = | ||
633 | &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk; | ||
634 | struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = | ||
635 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | ||
636 | struct amdgpu_vce_clock_voltage_dependency_table *vce_table = | ||
637 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | ||
638 | struct amdgpu_clock_voltage_dependency_table *acp_table = | ||
639 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
640 | |||
641 | if (!pi->need_pptable_upload) | ||
642 | return 0; | ||
643 | |||
644 | ret = cz_dpm_download_pptable_from_smu(adev, &table); | ||
645 | if (ret) { | ||
646 | DRM_ERROR("amdgpu: Failed to get power play table from SMU!\n"); | ||
647 | return -EINVAL; | ||
648 | } | ||
649 | |||
650 | clock_table = (struct SMU8_Fusion_ClkTable *)table; | ||
651 | /* patch clock table */ | ||
652 | if (vddc_table->count > CZ_MAX_HARDWARE_POWERLEVELS || | ||
653 | vddgfx_table->count > CZ_MAX_HARDWARE_POWERLEVELS || | ||
654 | uvd_table->count > CZ_MAX_HARDWARE_POWERLEVELS || | ||
655 | vce_table->count > CZ_MAX_HARDWARE_POWERLEVELS || | ||
656 | acp_table->count > CZ_MAX_HARDWARE_POWERLEVELS) { | ||
657 | DRM_ERROR("amdgpu: Invalid Clock Voltage Dependency Table!\n"); | ||
658 | return -EINVAL; | ||
659 | } | ||
660 | |||
661 | for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) { | ||
662 | |||
663 | /* vddc sclk */ | ||
664 | clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid = | ||
665 | (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0; | ||
666 | clock_table->SclkBreakdownTable.ClkLevel[i].Frequency = | ||
667 | (i < vddc_table->count) ? vddc_table->entries[i].clk : 0; | ||
668 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
669 | clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, | ||
670 | false, ÷rs); | ||
671 | if (ret) | ||
672 | return ret; | ||
673 | clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid = | ||
674 | (uint8_t)dividers.post_divider; | ||
675 | |||
676 | /* vddgfx sclk */ | ||
677 | clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid = | ||
678 | (i < vddgfx_table->count) ? (uint8_t)vddgfx_table->entries[i].v : 0; | ||
679 | |||
680 | /* acp breakdown */ | ||
681 | clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid = | ||
682 | (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0; | ||
683 | clock_table->AclkBreakdownTable.ClkLevel[i].Frequency = | ||
684 | (i < acp_table->count) ? acp_table->entries[i].clk : 0; | ||
685 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
686 | clock_table->SclkBreakdownTable.ClkLevel[i].Frequency, | ||
687 | false, ÷rs); | ||
688 | if (ret) | ||
689 | return ret; | ||
690 | clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid = | ||
691 | (uint8_t)dividers.post_divider; | ||
692 | |||
693 | /* uvd breakdown */ | ||
694 | clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid = | ||
695 | (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; | ||
696 | clock_table->VclkBreakdownTable.ClkLevel[i].Frequency = | ||
697 | (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0; | ||
698 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
699 | clock_table->VclkBreakdownTable.ClkLevel[i].Frequency, | ||
700 | false, ÷rs); | ||
701 | if (ret) | ||
702 | return ret; | ||
703 | clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid = | ||
704 | (uint8_t)dividers.post_divider; | ||
705 | |||
706 | clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid = | ||
707 | (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0; | ||
708 | clock_table->DclkBreakdownTable.ClkLevel[i].Frequency = | ||
709 | (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0; | ||
710 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
711 | clock_table->DclkBreakdownTable.ClkLevel[i].Frequency, | ||
712 | false, ÷rs); | ||
713 | if (ret) | ||
714 | return ret; | ||
715 | clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid = | ||
716 | (uint8_t)dividers.post_divider; | ||
717 | |||
718 | /* vce breakdown */ | ||
719 | clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid = | ||
720 | (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0; | ||
721 | clock_table->EclkBreakdownTable.ClkLevel[i].Frequency = | ||
722 | (i < vce_table->count) ? vce_table->entries[i].ecclk : 0; | ||
723 | ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
724 | clock_table->EclkBreakdownTable.ClkLevel[i].Frequency, | ||
725 | false, ÷rs); | ||
726 | if (ret) | ||
727 | return ret; | ||
728 | clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid = | ||
729 | (uint8_t)dividers.post_divider; | ||
730 | } | ||
731 | |||
732 | /* its time to upload to SMU */ | ||
733 | ret = cz_smu_upload_pptable(adev); | ||
734 | if (ret) { | ||
735 | DRM_ERROR("amdgpu: Failed to put power play table to SMU!\n"); | ||
736 | return ret; | ||
737 | } | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static void cz_init_sclk_limit(struct amdgpu_device *adev) | ||
743 | { | ||
744 | struct cz_power_info *pi = cz_get_pi(adev); | ||
745 | struct amdgpu_clock_voltage_dependency_table *table = | ||
746 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
747 | uint32_t clock = 0, level; | ||
748 | |||
749 | if (!table || !table->count) { | ||
750 | DRM_ERROR("Invalid Voltage Dependency table.\n"); | ||
751 | return; | ||
752 | } | ||
753 | |||
754 | pi->sclk_dpm.soft_min_clk = 0; | ||
755 | pi->sclk_dpm.hard_min_clk = 0; | ||
756 | cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); | ||
757 | level = cz_get_argument(adev); | ||
758 | if (level < table->count) | ||
759 | clock = table->entries[level].clk; | ||
760 | else { | ||
761 | DRM_ERROR("Invalid SLCK Voltage Dependency table entry.\n"); | ||
762 | clock = table->entries[table->count - 1].clk; | ||
763 | } | ||
764 | |||
765 | pi->sclk_dpm.soft_max_clk = clock; | ||
766 | pi->sclk_dpm.hard_max_clk = clock; | ||
767 | |||
768 | } | ||
769 | |||
770 | static void cz_init_uvd_limit(struct amdgpu_device *adev) | ||
771 | { | ||
772 | struct cz_power_info *pi = cz_get_pi(adev); | ||
773 | struct amdgpu_uvd_clock_voltage_dependency_table *table = | ||
774 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; | ||
775 | uint32_t clock = 0, level; | ||
776 | |||
777 | if (!table || !table->count) { | ||
778 | DRM_ERROR("Invalid Voltage Dependency table.\n"); | ||
779 | return; | ||
780 | } | ||
781 | |||
782 | pi->uvd_dpm.soft_min_clk = 0; | ||
783 | pi->uvd_dpm.hard_min_clk = 0; | ||
784 | cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxUvdLevel); | ||
785 | level = cz_get_argument(adev); | ||
786 | if (level < table->count) | ||
787 | clock = table->entries[level].vclk; | ||
788 | else { | ||
789 | DRM_ERROR("Invalid UVD Voltage Dependency table entry.\n"); | ||
790 | clock = table->entries[table->count - 1].vclk; | ||
791 | } | ||
792 | |||
793 | pi->uvd_dpm.soft_max_clk = clock; | ||
794 | pi->uvd_dpm.hard_max_clk = clock; | ||
795 | |||
796 | } | ||
797 | |||
798 | static void cz_init_vce_limit(struct amdgpu_device *adev) | ||
799 | { | ||
800 | struct cz_power_info *pi = cz_get_pi(adev); | ||
801 | struct amdgpu_vce_clock_voltage_dependency_table *table = | ||
802 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; | ||
803 | uint32_t clock = 0, level; | ||
804 | |||
805 | if (!table || !table->count) { | ||
806 | DRM_ERROR("Invalid Voltage Dependency table.\n"); | ||
807 | return; | ||
808 | } | ||
809 | |||
810 | pi->vce_dpm.soft_min_clk = 0; | ||
811 | pi->vce_dpm.hard_min_clk = 0; | ||
812 | cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxEclkLevel); | ||
813 | level = cz_get_argument(adev); | ||
814 | if (level < table->count) | ||
815 | clock = table->entries[level].evclk; | ||
816 | else { | ||
817 | /* future BIOS would fix this error */ | ||
818 | DRM_ERROR("Invalid VCE Voltage Dependency table entry.\n"); | ||
819 | clock = table->entries[table->count - 1].evclk; | ||
820 | } | ||
821 | |||
822 | pi->vce_dpm.soft_max_clk = clock; | ||
823 | pi->vce_dpm.hard_max_clk = clock; | ||
824 | |||
825 | } | ||
826 | |||
827 | static void cz_init_acp_limit(struct amdgpu_device *adev) | ||
828 | { | ||
829 | struct cz_power_info *pi = cz_get_pi(adev); | ||
830 | struct amdgpu_clock_voltage_dependency_table *table = | ||
831 | &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; | ||
832 | uint32_t clock = 0, level; | ||
833 | |||
834 | if (!table || !table->count) { | ||
835 | DRM_ERROR("Invalid Voltage Dependency table.\n"); | ||
836 | return; | ||
837 | } | ||
838 | |||
839 | pi->acp_dpm.soft_min_clk = 0; | ||
840 | pi->acp_dpm.hard_min_clk = 0; | ||
841 | cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxAclkLevel); | ||
842 | level = cz_get_argument(adev); | ||
843 | if (level < table->count) | ||
844 | clock = table->entries[level].clk; | ||
845 | else { | ||
846 | DRM_ERROR("Invalid ACP Voltage Dependency table entry.\n"); | ||
847 | clock = table->entries[table->count - 1].clk; | ||
848 | } | ||
849 | |||
850 | pi->acp_dpm.soft_max_clk = clock; | ||
851 | pi->acp_dpm.hard_max_clk = clock; | ||
852 | |||
853 | } | ||
854 | |||
855 | static void cz_init_pg_state(struct amdgpu_device *adev) | ||
856 | { | ||
857 | struct cz_power_info *pi = cz_get_pi(adev); | ||
858 | |||
859 | pi->uvd_power_gated = false; | ||
860 | pi->vce_power_gated = false; | ||
861 | pi->acp_power_gated = false; | ||
862 | |||
863 | } | ||
864 | |||
865 | static void cz_init_sclk_threshold(struct amdgpu_device *adev) | ||
866 | { | ||
867 | struct cz_power_info *pi = cz_get_pi(adev); | ||
868 | |||
869 | pi->low_sclk_interrupt_threshold = 0; | ||
870 | |||
871 | } | ||
872 | |||
873 | static void cz_dpm_setup_asic(struct amdgpu_device *adev) | ||
874 | { | ||
875 | cz_reset_ap_mask(adev); | ||
876 | cz_dpm_upload_pptable_to_smu(adev); | ||
877 | cz_init_sclk_limit(adev); | ||
878 | cz_init_uvd_limit(adev); | ||
879 | cz_init_vce_limit(adev); | ||
880 | cz_init_acp_limit(adev); | ||
881 | cz_init_pg_state(adev); | ||
882 | cz_init_sclk_threshold(adev); | ||
883 | |||
884 | } | ||
885 | |||
886 | static bool cz_check_smu_feature(struct amdgpu_device *adev, | ||
887 | uint32_t feature) | ||
888 | { | ||
889 | uint32_t smu_feature = 0; | ||
890 | int ret; | ||
891 | |||
892 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
893 | PPSMC_MSG_GetFeatureStatus, 0); | ||
894 | if (ret) { | ||
895 | DRM_ERROR("Failed to get SMU features from SMC.\n"); | ||
896 | return false; | ||
897 | } else { | ||
898 | smu_feature = cz_get_argument(adev); | ||
899 | if (feature & smu_feature) | ||
900 | return true; | ||
901 | } | ||
902 | |||
903 | return false; | ||
904 | } | ||
905 | |||
906 | static bool cz_check_for_dpm_enabled(struct amdgpu_device *adev) | ||
907 | { | ||
908 | if (cz_check_smu_feature(adev, | ||
909 | SMU_EnabledFeatureScoreboard_SclkDpmOn)) | ||
910 | return true; | ||
911 | |||
912 | return false; | ||
913 | } | ||
914 | |||
915 | static void cz_program_voting_clients(struct amdgpu_device *adev) | ||
916 | { | ||
917 | WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, PPCZ_VOTINGRIGHTSCLIENTS_DFLT0); | ||
918 | } | ||
919 | |||
920 | static void cz_clear_voting_clients(struct amdgpu_device *adev) | ||
921 | { | ||
922 | WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); | ||
923 | } | ||
924 | |||
925 | static int cz_start_dpm(struct amdgpu_device *adev) | ||
926 | { | ||
927 | int ret = 0; | ||
928 | |||
929 | if (amdgpu_dpm) { | ||
930 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
931 | PPSMC_MSG_EnableAllSmuFeatures, SCLK_DPM_MASK); | ||
932 | if (ret) { | ||
933 | DRM_ERROR("SMU feature: SCLK_DPM enable failed\n"); | ||
934 | return -EINVAL; | ||
935 | } | ||
936 | } | ||
937 | |||
938 | return 0; | ||
939 | } | ||
940 | |||
941 | static int cz_stop_dpm(struct amdgpu_device *adev) | ||
942 | { | ||
943 | int ret = 0; | ||
944 | |||
945 | if (amdgpu_dpm && adev->pm.dpm_enabled) { | ||
946 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
947 | PPSMC_MSG_DisableAllSmuFeatures, SCLK_DPM_MASK); | ||
948 | if (ret) { | ||
949 | DRM_ERROR("SMU feature: SCLK_DPM disable failed\n"); | ||
950 | return -EINVAL; | ||
951 | } | ||
952 | } | ||
953 | |||
954 | return 0; | ||
955 | } | ||
956 | |||
957 | static uint32_t cz_get_sclk_level(struct amdgpu_device *adev, | ||
958 | uint32_t clock, uint16_t msg) | ||
959 | { | ||
960 | int i = 0; | ||
961 | struct amdgpu_clock_voltage_dependency_table *table = | ||
962 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
963 | |||
964 | switch (msg) { | ||
965 | case PPSMC_MSG_SetSclkSoftMin: | ||
966 | case PPSMC_MSG_SetSclkHardMin: | ||
967 | for (i = 0; i < table->count; i++) | ||
968 | if (clock <= table->entries[i].clk) | ||
969 | break; | ||
970 | if (i == table->count) | ||
971 | i = table->count - 1; | ||
972 | break; | ||
973 | case PPSMC_MSG_SetSclkSoftMax: | ||
974 | case PPSMC_MSG_SetSclkHardMax: | ||
975 | for (i = table->count - 1; i >= 0; i--) | ||
976 | if (clock >= table->entries[i].clk) | ||
977 | break; | ||
978 | if (i < 0) | ||
979 | i = 0; | ||
980 | break; | ||
981 | default: | ||
982 | break; | ||
983 | } | ||
984 | |||
985 | return i; | ||
986 | } | ||
987 | |||
988 | static int cz_program_bootup_state(struct amdgpu_device *adev) | ||
989 | { | ||
990 | struct cz_power_info *pi = cz_get_pi(adev); | ||
991 | uint32_t soft_min_clk = 0; | ||
992 | uint32_t soft_max_clk = 0; | ||
993 | int ret = 0; | ||
994 | |||
995 | pi->sclk_dpm.soft_min_clk = pi->sys_info.bootup_sclk; | ||
996 | pi->sclk_dpm.soft_max_clk = pi->sys_info.bootup_sclk; | ||
997 | |||
998 | soft_min_clk = cz_get_sclk_level(adev, | ||
999 | pi->sclk_dpm.soft_min_clk, | ||
1000 | PPSMC_MSG_SetSclkSoftMin); | ||
1001 | soft_max_clk = cz_get_sclk_level(adev, | ||
1002 | pi->sclk_dpm.soft_max_clk, | ||
1003 | PPSMC_MSG_SetSclkSoftMax); | ||
1004 | |||
1005 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1006 | PPSMC_MSG_SetSclkSoftMin, soft_min_clk); | ||
1007 | if (ret) | ||
1008 | return -EINVAL; | ||
1009 | |||
1010 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1011 | PPSMC_MSG_SetSclkSoftMax, soft_max_clk); | ||
1012 | if (ret) | ||
1013 | return -EINVAL; | ||
1014 | |||
1015 | return 0; | ||
1016 | } | ||
1017 | |||
1018 | /* TODO */ | ||
1019 | static int cz_disable_cgpg(struct amdgpu_device *adev) | ||
1020 | { | ||
1021 | return 0; | ||
1022 | } | ||
1023 | |||
1024 | /* TODO */ | ||
1025 | static int cz_enable_cgpg(struct amdgpu_device *adev) | ||
1026 | { | ||
1027 | return 0; | ||
1028 | } | ||
1029 | |||
1030 | /* TODO */ | ||
1031 | static int cz_program_pt_config_registers(struct amdgpu_device *adev) | ||
1032 | { | ||
1033 | return 0; | ||
1034 | } | ||
1035 | |||
1036 | static void cz_do_enable_didt(struct amdgpu_device *adev, bool enable) | ||
1037 | { | ||
1038 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1039 | uint32_t reg = 0; | ||
1040 | |||
1041 | if (pi->caps_sq_ramping) { | ||
1042 | reg = RREG32_DIDT(ixDIDT_SQ_CTRL0); | ||
1043 | if (enable) | ||
1044 | reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); | ||
1045 | else | ||
1046 | reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); | ||
1047 | WREG32_DIDT(ixDIDT_SQ_CTRL0, reg); | ||
1048 | } | ||
1049 | if (pi->caps_db_ramping) { | ||
1050 | reg = RREG32_DIDT(ixDIDT_DB_CTRL0); | ||
1051 | if (enable) | ||
1052 | reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 1); | ||
1053 | else | ||
1054 | reg = REG_SET_FIELD(reg, DIDT_DB_CTRL0, DIDT_CTRL_EN, 0); | ||
1055 | WREG32_DIDT(ixDIDT_DB_CTRL0, reg); | ||
1056 | } | ||
1057 | if (pi->caps_td_ramping) { | ||
1058 | reg = RREG32_DIDT(ixDIDT_TD_CTRL0); | ||
1059 | if (enable) | ||
1060 | reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 1); | ||
1061 | else | ||
1062 | reg = REG_SET_FIELD(reg, DIDT_TD_CTRL0, DIDT_CTRL_EN, 0); | ||
1063 | WREG32_DIDT(ixDIDT_TD_CTRL0, reg); | ||
1064 | } | ||
1065 | if (pi->caps_tcp_ramping) { | ||
1066 | reg = RREG32_DIDT(ixDIDT_TCP_CTRL0); | ||
1067 | if (enable) | ||
1068 | reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 1); | ||
1069 | else | ||
1070 | reg = REG_SET_FIELD(reg, DIDT_SQ_CTRL0, DIDT_CTRL_EN, 0); | ||
1071 | WREG32_DIDT(ixDIDT_TCP_CTRL0, reg); | ||
1072 | } | ||
1073 | |||
1074 | } | ||
1075 | |||
1076 | static int cz_enable_didt(struct amdgpu_device *adev, bool enable) | ||
1077 | { | ||
1078 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1079 | int ret; | ||
1080 | |||
1081 | if (pi->caps_sq_ramping || pi->caps_db_ramping || | ||
1082 | pi->caps_td_ramping || pi->caps_tcp_ramping) { | ||
1083 | if (adev->gfx.gfx_current_status != AMDGPU_GFX_SAFE_MODE) { | ||
1084 | ret = cz_disable_cgpg(adev); | ||
1085 | if (ret) { | ||
1086 | DRM_ERROR("Pre Di/Dt disable cg/pg failed\n"); | ||
1087 | return -EINVAL; | ||
1088 | } | ||
1089 | adev->gfx.gfx_current_status = AMDGPU_GFX_SAFE_MODE; | ||
1090 | } | ||
1091 | |||
1092 | ret = cz_program_pt_config_registers(adev); | ||
1093 | if (ret) { | ||
1094 | DRM_ERROR("Di/Dt config failed\n"); | ||
1095 | return -EINVAL; | ||
1096 | } | ||
1097 | cz_do_enable_didt(adev, enable); | ||
1098 | |||
1099 | if (adev->gfx.gfx_current_status == AMDGPU_GFX_SAFE_MODE) { | ||
1100 | ret = cz_enable_cgpg(adev); | ||
1101 | if (ret) { | ||
1102 | DRM_ERROR("Post Di/Dt enable cg/pg failed\n"); | ||
1103 | return -EINVAL; | ||
1104 | } | ||
1105 | adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* TODO */ | ||
1113 | static void cz_reset_acp_boot_level(struct amdgpu_device *adev) | ||
1114 | { | ||
1115 | } | ||
1116 | |||
1117 | static void cz_update_current_ps(struct amdgpu_device *adev, | ||
1118 | struct amdgpu_ps *rps) | ||
1119 | { | ||
1120 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1121 | struct cz_ps *ps = cz_get_ps(rps); | ||
1122 | |||
1123 | pi->current_ps = *ps; | ||
1124 | pi->current_rps = *rps; | ||
1125 | pi->current_rps.ps_priv = ps; | ||
1126 | |||
1127 | } | ||
1128 | |||
1129 | static void cz_update_requested_ps(struct amdgpu_device *adev, | ||
1130 | struct amdgpu_ps *rps) | ||
1131 | { | ||
1132 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1133 | struct cz_ps *ps = cz_get_ps(rps); | ||
1134 | |||
1135 | pi->requested_ps = *ps; | ||
1136 | pi->requested_rps = *rps; | ||
1137 | pi->requested_rps.ps_priv = ps; | ||
1138 | |||
1139 | } | ||
1140 | |||
1141 | /* PP arbiter support needed TODO */ | ||
1142 | static void cz_apply_state_adjust_rules(struct amdgpu_device *adev, | ||
1143 | struct amdgpu_ps *new_rps, | ||
1144 | struct amdgpu_ps *old_rps) | ||
1145 | { | ||
1146 | struct cz_ps *ps = cz_get_ps(new_rps); | ||
1147 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1148 | struct amdgpu_clock_and_voltage_limits *limits = | ||
1149 | &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | ||
1150 | /* 10kHz memory clock */ | ||
1151 | uint32_t mclk = 0; | ||
1152 | |||
1153 | ps->force_high = false; | ||
1154 | ps->need_dfs_bypass = true; | ||
1155 | pi->video_start = new_rps->dclk || new_rps->vclk || | ||
1156 | new_rps->evclk || new_rps->ecclk; | ||
1157 | |||
1158 | if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == | ||
1159 | ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) | ||
1160 | pi->battery_state = true; | ||
1161 | else | ||
1162 | pi->battery_state = false; | ||
1163 | |||
1164 | if (pi->caps_stable_power_state) | ||
1165 | mclk = limits->mclk; | ||
1166 | |||
1167 | if (mclk > pi->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK - 1]) | ||
1168 | ps->force_high = true; | ||
1169 | |||
1170 | } | ||
1171 | |||
1172 | static int cz_dpm_enable(struct amdgpu_device *adev) | ||
1173 | { | ||
1174 | int ret = 0; | ||
1175 | |||
1176 | /* renable will hang up SMU, so check first */ | ||
1177 | if (cz_check_for_dpm_enabled(adev)) | ||
1178 | return -EINVAL; | ||
1179 | |||
1180 | cz_program_voting_clients(adev); | ||
1181 | |||
1182 | ret = cz_start_dpm(adev); | ||
1183 | if (ret) { | ||
1184 | DRM_ERROR("Carrizo DPM enable failed\n"); | ||
1185 | return -EINVAL; | ||
1186 | } | ||
1187 | |||
1188 | ret = cz_program_bootup_state(adev); | ||
1189 | if (ret) { | ||
1190 | DRM_ERROR("Carrizo bootup state program failed\n"); | ||
1191 | return -EINVAL; | ||
1192 | } | ||
1193 | |||
1194 | ret = cz_enable_didt(adev, true); | ||
1195 | if (ret) { | ||
1196 | DRM_ERROR("Carrizo enable di/dt failed\n"); | ||
1197 | return -EINVAL; | ||
1198 | } | ||
1199 | |||
1200 | cz_reset_acp_boot_level(adev); | ||
1201 | |||
1202 | cz_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
1203 | |||
1204 | return 0; | ||
1205 | } | ||
1206 | |||
1207 | static int cz_dpm_hw_init(struct amdgpu_device *adev) | ||
1208 | { | ||
1209 | int ret; | ||
1210 | |||
1211 | if (!amdgpu_dpm) | ||
1212 | return 0; | ||
1213 | |||
1214 | mutex_lock(&adev->pm.mutex); | ||
1215 | |||
1216 | /* init smc in dpm hw init */ | ||
1217 | ret = cz_smu_init(adev); | ||
1218 | if (ret) { | ||
1219 | DRM_ERROR("amdgpu: smc initialization failed\n"); | ||
1220 | mutex_unlock(&adev->pm.mutex); | ||
1221 | return ret; | ||
1222 | } | ||
1223 | |||
1224 | /* do the actual fw loading */ | ||
1225 | ret = cz_smu_start(adev); | ||
1226 | if (ret) { | ||
1227 | DRM_ERROR("amdgpu: smc start failed\n"); | ||
1228 | mutex_unlock(&adev->pm.mutex); | ||
1229 | return ret; | ||
1230 | } | ||
1231 | |||
1232 | /* cz dpm setup asic */ | ||
1233 | cz_dpm_setup_asic(adev); | ||
1234 | |||
1235 | /* cz dpm enable */ | ||
1236 | ret = cz_dpm_enable(adev); | ||
1237 | if (ret) | ||
1238 | adev->pm.dpm_enabled = false; | ||
1239 | else | ||
1240 | adev->pm.dpm_enabled = true; | ||
1241 | |||
1242 | mutex_unlock(&adev->pm.mutex); | ||
1243 | |||
1244 | return 0; | ||
1245 | } | ||
1246 | |||
1247 | static int cz_dpm_disable(struct amdgpu_device *adev) | ||
1248 | { | ||
1249 | int ret = 0; | ||
1250 | |||
1251 | if (!cz_check_for_dpm_enabled(adev)) | ||
1252 | return -EINVAL; | ||
1253 | |||
1254 | ret = cz_enable_didt(adev, false); | ||
1255 | if (ret) { | ||
1256 | DRM_ERROR("Carrizo disable di/dt failed\n"); | ||
1257 | return -EINVAL; | ||
1258 | } | ||
1259 | |||
1260 | cz_clear_voting_clients(adev); | ||
1261 | cz_stop_dpm(adev); | ||
1262 | cz_update_current_ps(adev, adev->pm.dpm.boot_ps); | ||
1263 | |||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | static int cz_dpm_hw_fini(struct amdgpu_device *adev) | ||
1268 | { | ||
1269 | int ret = 0; | ||
1270 | |||
1271 | mutex_lock(&adev->pm.mutex); | ||
1272 | |||
1273 | cz_smu_fini(adev); | ||
1274 | |||
1275 | if (adev->pm.dpm_enabled) { | ||
1276 | ret = cz_dpm_disable(adev); | ||
1277 | if (ret) | ||
1278 | return -EINVAL; | ||
1279 | |||
1280 | adev->pm.dpm.current_ps = | ||
1281 | adev->pm.dpm.requested_ps = | ||
1282 | adev->pm.dpm.boot_ps; | ||
1283 | } | ||
1284 | |||
1285 | adev->pm.dpm_enabled = false; | ||
1286 | |||
1287 | mutex_unlock(&adev->pm.mutex); | ||
1288 | |||
1289 | return 0; | ||
1290 | } | ||
1291 | |||
1292 | static int cz_dpm_suspend(struct amdgpu_device *adev) | ||
1293 | { | ||
1294 | int ret = 0; | ||
1295 | |||
1296 | if (adev->pm.dpm_enabled) { | ||
1297 | mutex_lock(&adev->pm.mutex); | ||
1298 | |||
1299 | ret = cz_dpm_disable(adev); | ||
1300 | if (ret) | ||
1301 | return -EINVAL; | ||
1302 | |||
1303 | adev->pm.dpm.current_ps = | ||
1304 | adev->pm.dpm.requested_ps = | ||
1305 | adev->pm.dpm.boot_ps; | ||
1306 | |||
1307 | mutex_unlock(&adev->pm.mutex); | ||
1308 | } | ||
1309 | |||
1310 | return 0; | ||
1311 | } | ||
1312 | |||
1313 | static int cz_dpm_resume(struct amdgpu_device *adev) | ||
1314 | { | ||
1315 | int ret = 0; | ||
1316 | |||
1317 | mutex_lock(&adev->pm.mutex); | ||
1318 | ret = cz_smu_init(adev); | ||
1319 | if (ret) { | ||
1320 | DRM_ERROR("amdgpu: smc resume failed\n"); | ||
1321 | mutex_unlock(&adev->pm.mutex); | ||
1322 | return ret; | ||
1323 | } | ||
1324 | |||
1325 | /* do the actual fw loading */ | ||
1326 | ret = cz_smu_start(adev); | ||
1327 | if (ret) { | ||
1328 | DRM_ERROR("amdgpu: smc start failed\n"); | ||
1329 | mutex_unlock(&adev->pm.mutex); | ||
1330 | return ret; | ||
1331 | } | ||
1332 | |||
1333 | /* cz dpm setup asic */ | ||
1334 | cz_dpm_setup_asic(adev); | ||
1335 | |||
1336 | /* cz dpm enable */ | ||
1337 | ret = cz_dpm_enable(adev); | ||
1338 | if (ret) | ||
1339 | adev->pm.dpm_enabled = false; | ||
1340 | else | ||
1341 | adev->pm.dpm_enabled = true; | ||
1342 | |||
1343 | mutex_unlock(&adev->pm.mutex); | ||
1344 | /* upon resume, re-compute the clocks */ | ||
1345 | if (adev->pm.dpm_enabled) | ||
1346 | amdgpu_pm_compute_clocks(adev); | ||
1347 | |||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static int cz_dpm_set_clockgating_state(struct amdgpu_device *adev, | ||
1352 | enum amdgpu_clockgating_state state) | ||
1353 | { | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | static int cz_dpm_set_powergating_state(struct amdgpu_device *adev, | ||
1358 | enum amdgpu_powergating_state state) | ||
1359 | { | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1363 | /* borrowed from KV, need future unify */ | ||
1364 | static int cz_dpm_get_temperature(struct amdgpu_device *adev) | ||
1365 | { | ||
1366 | int actual_temp = 0; | ||
1367 | uint32_t temp = RREG32_SMC(0xC0300E0C); | ||
1368 | |||
1369 | if (temp) | ||
1370 | actual_temp = 1000 * ((temp / 8) - 49); | ||
1371 | |||
1372 | return actual_temp; | ||
1373 | } | ||
1374 | |||
1375 | static int cz_dpm_pre_set_power_state(struct amdgpu_device *adev) | ||
1376 | { | ||
1377 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1378 | struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; | ||
1379 | struct amdgpu_ps *new_ps = &requested_ps; | ||
1380 | |||
1381 | cz_update_requested_ps(adev, new_ps); | ||
1382 | cz_apply_state_adjust_rules(adev, &pi->requested_rps, | ||
1383 | &pi->current_rps); | ||
1384 | |||
1385 | return 0; | ||
1386 | } | ||
1387 | |||
1388 | static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev) | ||
1389 | { | ||
1390 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1391 | struct amdgpu_clock_and_voltage_limits *limits = | ||
1392 | &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | ||
1393 | uint32_t clock, stable_ps_clock = 0; | ||
1394 | |||
1395 | clock = pi->sclk_dpm.soft_min_clk; | ||
1396 | |||
1397 | if (pi->caps_stable_power_state) { | ||
1398 | stable_ps_clock = limits->sclk * 75 / 100; | ||
1399 | if (clock < stable_ps_clock) | ||
1400 | clock = stable_ps_clock; | ||
1401 | } | ||
1402 | |||
1403 | if (clock != pi->sclk_dpm.soft_min_clk) { | ||
1404 | pi->sclk_dpm.soft_min_clk = clock; | ||
1405 | cz_send_msg_to_smc_with_parameter(adev, | ||
1406 | PPSMC_MSG_SetSclkSoftMin, | ||
1407 | cz_get_sclk_level(adev, clock, | ||
1408 | PPSMC_MSG_SetSclkSoftMin)); | ||
1409 | } | ||
1410 | |||
1411 | if (pi->caps_stable_power_state && | ||
1412 | pi->sclk_dpm.soft_max_clk != clock) { | ||
1413 | pi->sclk_dpm.soft_max_clk = clock; | ||
1414 | cz_send_msg_to_smc_with_parameter(adev, | ||
1415 | PPSMC_MSG_SetSclkSoftMax, | ||
1416 | cz_get_sclk_level(adev, clock, | ||
1417 | PPSMC_MSG_SetSclkSoftMax)); | ||
1418 | } else { | ||
1419 | cz_send_msg_to_smc_with_parameter(adev, | ||
1420 | PPSMC_MSG_SetSclkSoftMax, | ||
1421 | cz_get_sclk_level(adev, | ||
1422 | pi->sclk_dpm.soft_max_clk, | ||
1423 | PPSMC_MSG_SetSclkSoftMax)); | ||
1424 | } | ||
1425 | |||
1426 | return 0; | ||
1427 | } | ||
1428 | |||
1429 | static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev) | ||
1430 | { | ||
1431 | int ret = 0; | ||
1432 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1433 | |||
1434 | if (pi->caps_sclk_ds) { | ||
1435 | cz_send_msg_to_smc_with_parameter(adev, | ||
1436 | PPSMC_MSG_SetMinDeepSleepSclk, | ||
1437 | CZ_MIN_DEEP_SLEEP_SCLK); | ||
1438 | } | ||
1439 | |||
1440 | return ret; | ||
1441 | } | ||
1442 | |||
1443 | /* ?? without dal support, is this still needed in setpowerstate list*/ | ||
1444 | static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev) | ||
1445 | { | ||
1446 | int ret = 0; | ||
1447 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1448 | |||
1449 | cz_send_msg_to_smc_with_parameter(adev, | ||
1450 | PPSMC_MSG_SetWatermarkFrequency, | ||
1451 | pi->sclk_dpm.soft_max_clk); | ||
1452 | |||
1453 | return ret; | ||
1454 | } | ||
1455 | |||
1456 | static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev) | ||
1457 | { | ||
1458 | int ret = 0; | ||
1459 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1460 | |||
1461 | /* also depend on dal NBPStateDisableRequired */ | ||
1462 | if (pi->nb_dpm_enabled_by_driver && !pi->nb_dpm_enabled) { | ||
1463 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1464 | PPSMC_MSG_EnableAllSmuFeatures, | ||
1465 | NB_DPM_MASK); | ||
1466 | if (ret) { | ||
1467 | DRM_ERROR("amdgpu: nb dpm enable failed\n"); | ||
1468 | return ret; | ||
1469 | } | ||
1470 | pi->nb_dpm_enabled = true; | ||
1471 | } | ||
1472 | |||
1473 | return ret; | ||
1474 | } | ||
1475 | |||
1476 | static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev, | ||
1477 | bool enable) | ||
1478 | { | ||
1479 | if (enable) | ||
1480 | cz_send_msg_to_smc(adev, PPSMC_MSG_EnableLowMemoryPstate); | ||
1481 | else | ||
1482 | cz_send_msg_to_smc(adev, PPSMC_MSG_DisableLowMemoryPstate); | ||
1483 | |||
1484 | } | ||
1485 | |||
1486 | static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev) | ||
1487 | { | ||
1488 | int ret = 0; | ||
1489 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1490 | struct cz_ps *ps = &pi->requested_ps; | ||
1491 | |||
1492 | if (pi->sys_info.nb_dpm_enable) { | ||
1493 | if (ps->force_high) | ||
1494 | cz_dpm_nbdpm_lm_pstate_enable(adev, true); | ||
1495 | else | ||
1496 | cz_dpm_nbdpm_lm_pstate_enable(adev, false); | ||
1497 | } | ||
1498 | |||
1499 | return ret; | ||
1500 | } | ||
1501 | |||
1502 | /* with dpm enabled */ | ||
1503 | static int cz_dpm_set_power_state(struct amdgpu_device *adev) | ||
1504 | { | ||
1505 | int ret = 0; | ||
1506 | |||
1507 | cz_dpm_update_sclk_limit(adev); | ||
1508 | cz_dpm_set_deep_sleep_sclk_threshold(adev); | ||
1509 | cz_dpm_set_watermark_threshold(adev); | ||
1510 | cz_dpm_enable_nbdpm(adev); | ||
1511 | cz_dpm_update_low_memory_pstate(adev); | ||
1512 | |||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1516 | static void cz_dpm_post_set_power_state(struct amdgpu_device *adev) | ||
1517 | { | ||
1518 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1519 | struct amdgpu_ps *ps = &pi->requested_rps; | ||
1520 | |||
1521 | cz_update_current_ps(adev, ps); | ||
1522 | |||
1523 | } | ||
1524 | |||
1525 | static int cz_dpm_force_highest(struct amdgpu_device *adev) | ||
1526 | { | ||
1527 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1528 | int ret = 0; | ||
1529 | |||
1530 | if (pi->sclk_dpm.soft_min_clk != pi->sclk_dpm.soft_max_clk) { | ||
1531 | pi->sclk_dpm.soft_min_clk = | ||
1532 | pi->sclk_dpm.soft_max_clk; | ||
1533 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1534 | PPSMC_MSG_SetSclkSoftMin, | ||
1535 | cz_get_sclk_level(adev, | ||
1536 | pi->sclk_dpm.soft_min_clk, | ||
1537 | PPSMC_MSG_SetSclkSoftMin)); | ||
1538 | if (ret) | ||
1539 | return ret; | ||
1540 | } | ||
1541 | |||
1542 | return ret; | ||
1543 | } | ||
1544 | |||
1545 | static int cz_dpm_force_lowest(struct amdgpu_device *adev) | ||
1546 | { | ||
1547 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1548 | int ret = 0; | ||
1549 | |||
1550 | if (pi->sclk_dpm.soft_max_clk != pi->sclk_dpm.soft_min_clk) { | ||
1551 | pi->sclk_dpm.soft_max_clk = pi->sclk_dpm.soft_min_clk; | ||
1552 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1553 | PPSMC_MSG_SetSclkSoftMax, | ||
1554 | cz_get_sclk_level(adev, | ||
1555 | pi->sclk_dpm.soft_max_clk, | ||
1556 | PPSMC_MSG_SetSclkSoftMax)); | ||
1557 | if (ret) | ||
1558 | return ret; | ||
1559 | } | ||
1560 | |||
1561 | return ret; | ||
1562 | } | ||
1563 | |||
1564 | static uint32_t cz_dpm_get_max_sclk_level(struct amdgpu_device *adev) | ||
1565 | { | ||
1566 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1567 | |||
1568 | if (!pi->max_sclk_level) { | ||
1569 | cz_send_msg_to_smc(adev, PPSMC_MSG_GetMaxSclkLevel); | ||
1570 | pi->max_sclk_level = cz_get_argument(adev) + 1; | ||
1571 | } | ||
1572 | |||
1573 | if (pi->max_sclk_level > CZ_MAX_HARDWARE_POWERLEVELS) { | ||
1574 | DRM_ERROR("Invalid max sclk level!\n"); | ||
1575 | return -EINVAL; | ||
1576 | } | ||
1577 | |||
1578 | return pi->max_sclk_level; | ||
1579 | } | ||
1580 | |||
1581 | static int cz_dpm_unforce_dpm_levels(struct amdgpu_device *adev) | ||
1582 | { | ||
1583 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1584 | struct amdgpu_clock_voltage_dependency_table *dep_table = | ||
1585 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | ||
1586 | uint32_t level = 0; | ||
1587 | int ret = 0; | ||
1588 | |||
1589 | pi->sclk_dpm.soft_min_clk = dep_table->entries[0].clk; | ||
1590 | level = cz_dpm_get_max_sclk_level(adev) - 1; | ||
1591 | if (level < dep_table->count) | ||
1592 | pi->sclk_dpm.soft_max_clk = dep_table->entries[level].clk; | ||
1593 | else | ||
1594 | pi->sclk_dpm.soft_max_clk = | ||
1595 | dep_table->entries[dep_table->count - 1].clk; | ||
1596 | |||
1597 | /* get min/max sclk soft value | ||
1598 | * notify SMU to execute */ | ||
1599 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1600 | PPSMC_MSG_SetSclkSoftMin, | ||
1601 | cz_get_sclk_level(adev, | ||
1602 | pi->sclk_dpm.soft_min_clk, | ||
1603 | PPSMC_MSG_SetSclkSoftMin)); | ||
1604 | if (ret) | ||
1605 | return ret; | ||
1606 | |||
1607 | ret = cz_send_msg_to_smc_with_parameter(adev, | ||
1608 | PPSMC_MSG_SetSclkSoftMax, | ||
1609 | cz_get_sclk_level(adev, | ||
1610 | pi->sclk_dpm.soft_max_clk, | ||
1611 | PPSMC_MSG_SetSclkSoftMax)); | ||
1612 | if (ret) | ||
1613 | return ret; | ||
1614 | |||
1615 | DRM_INFO("DPM unforce state min=%d, max=%d.\n", | ||
1616 | pi->sclk_dpm.soft_min_clk, | ||
1617 | pi->sclk_dpm.soft_max_clk); | ||
1618 | |||
1619 | return 0; | ||
1620 | } | ||
1621 | |||
1622 | static int cz_dpm_force_dpm_level(struct amdgpu_device *adev, | ||
1623 | enum amdgpu_dpm_forced_level level) | ||
1624 | { | ||
1625 | int ret = 0; | ||
1626 | |||
1627 | switch (level) { | ||
1628 | case AMDGPU_DPM_FORCED_LEVEL_HIGH: | ||
1629 | ret = cz_dpm_force_highest(adev); | ||
1630 | if (ret) | ||
1631 | return ret; | ||
1632 | break; | ||
1633 | case AMDGPU_DPM_FORCED_LEVEL_LOW: | ||
1634 | ret = cz_dpm_force_lowest(adev); | ||
1635 | if (ret) | ||
1636 | return ret; | ||
1637 | break; | ||
1638 | case AMDGPU_DPM_FORCED_LEVEL_AUTO: | ||
1639 | ret = cz_dpm_unforce_dpm_levels(adev); | ||
1640 | if (ret) | ||
1641 | return ret; | ||
1642 | break; | ||
1643 | default: | ||
1644 | break; | ||
1645 | } | ||
1646 | |||
1647 | return ret; | ||
1648 | } | ||
1649 | |||
1650 | /* fix me, display configuration change lists here | ||
1651 | * mostly dal related*/ | ||
1652 | static void cz_dpm_display_configuration_changed(struct amdgpu_device *adev) | ||
1653 | { | ||
1654 | } | ||
1655 | |||
1656 | static uint32_t cz_dpm_get_sclk(struct amdgpu_device *adev, bool low) | ||
1657 | { | ||
1658 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1659 | struct cz_ps *requested_state = cz_get_ps(&pi->requested_rps); | ||
1660 | |||
1661 | if (low) | ||
1662 | return requested_state->levels[0].sclk; | ||
1663 | else | ||
1664 | return requested_state->levels[requested_state->num_levels - 1].sclk; | ||
1665 | |||
1666 | } | ||
1667 | |||
1668 | static uint32_t cz_dpm_get_mclk(struct amdgpu_device *adev, bool low) | ||
1669 | { | ||
1670 | struct cz_power_info *pi = cz_get_pi(adev); | ||
1671 | |||
1672 | return pi->sys_info.bootup_uma_clk; | ||
1673 | } | ||
1674 | |||
1675 | const struct amdgpu_ip_funcs cz_dpm_ip_funcs = { | ||
1676 | .early_init = cz_dpm_early_init, | ||
1677 | .late_init = NULL, | ||
1678 | .sw_init = cz_dpm_sw_init, | ||
1679 | .sw_fini = cz_dpm_sw_fini, | ||
1680 | .hw_init = cz_dpm_hw_init, | ||
1681 | .hw_fini = cz_dpm_hw_fini, | ||
1682 | .suspend = cz_dpm_suspend, | ||
1683 | .resume = cz_dpm_resume, | ||
1684 | .is_idle = NULL, | ||
1685 | .wait_for_idle = NULL, | ||
1686 | .soft_reset = NULL, | ||
1687 | .print_status = NULL, | ||
1688 | .set_clockgating_state = cz_dpm_set_clockgating_state, | ||
1689 | .set_powergating_state = cz_dpm_set_powergating_state, | ||
1690 | }; | ||
1691 | |||
1692 | static const struct amdgpu_dpm_funcs cz_dpm_funcs = { | ||
1693 | .get_temperature = cz_dpm_get_temperature, | ||
1694 | .pre_set_power_state = cz_dpm_pre_set_power_state, | ||
1695 | .set_power_state = cz_dpm_set_power_state, | ||
1696 | .post_set_power_state = cz_dpm_post_set_power_state, | ||
1697 | .display_configuration_changed = cz_dpm_display_configuration_changed, | ||
1698 | .get_sclk = cz_dpm_get_sclk, | ||
1699 | .get_mclk = cz_dpm_get_mclk, | ||
1700 | .print_power_state = cz_dpm_print_power_state, | ||
1701 | .debugfs_print_current_performance_level = | ||
1702 | cz_dpm_debugfs_print_current_performance_level, | ||
1703 | .force_performance_level = cz_dpm_force_dpm_level, | ||
1704 | .vblank_too_short = NULL, | ||
1705 | .powergate_uvd = NULL, | ||
1706 | }; | ||
1707 | |||
1708 | static void cz_dpm_set_funcs(struct amdgpu_device *adev) | ||
1709 | { | ||
1710 | if (NULL == adev->pm.funcs) | ||
1711 | adev->pm.funcs = &cz_dpm_funcs; | ||
1712 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h new file mode 100644 index 000000000000..ed6449de5dc5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __CZ_DPM_H__ | ||
25 | #define __CZ_DPM_H__ | ||
26 | |||
27 | #include "smu8_fusion.h" | ||
28 | |||
29 | #define CZ_AT_DFLT 30 | ||
30 | #define CZ_NUM_NBPSTATES 4 | ||
31 | #define CZ_NUM_NBPMEMORY_CLOCK 2 | ||
32 | #define CZ_MAX_HARDWARE_POWERLEVELS 8 | ||
33 | #define CZ_MAX_DISPLAY_CLOCK_LEVEL 8 | ||
34 | #define CZ_MAX_DISPLAYPHY_IDS 10 | ||
35 | |||
36 | #define PPCZ_VOTINGRIGHTSCLIENTS_DFLT0 0x3FFFC102 | ||
37 | |||
38 | #define SMC_RAM_END 0x40000 | ||
39 | |||
40 | #define DPMFlags_SCLK_Enabled 0x00000001 | ||
41 | #define DPMFlags_UVD_Enabled 0x00000002 | ||
42 | #define DPMFlags_VCE_Enabled 0x00000004 | ||
43 | #define DPMFlags_ACP_Enabled 0x00000008 | ||
44 | #define DPMFlags_ForceHighestValid 0x40000000 | ||
45 | #define DPMFlags_Debug 0x80000000 | ||
46 | |||
47 | /* Do not change the following, it is also defined in SMU8.h */ | ||
48 | #define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 | ||
49 | #define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00100000 | ||
50 | #define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 | ||
51 | #define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 | ||
52 | |||
53 | /* temporary solution to SetMinDeepSleepSclk | ||
54 | * should indicate by display adaptor | ||
55 | * 10k Hz unit*/ | ||
56 | #define CZ_MIN_DEEP_SLEEP_SCLK 800 | ||
57 | |||
58 | enum cz_pt_config_reg_type { | ||
59 | CZ_CONFIGREG_MMR = 0, | ||
60 | CZ_CONFIGREG_SMC_IND, | ||
61 | CZ_CONFIGREG_DIDT_IND, | ||
62 | CZ_CONFIGREG_CACHE, | ||
63 | CZ_CONFIGREG_MAX | ||
64 | }; | ||
65 | |||
66 | struct cz_pt_config_reg { | ||
67 | uint32_t offset; | ||
68 | uint32_t mask; | ||
69 | uint32_t shift; | ||
70 | uint32_t value; | ||
71 | enum cz_pt_config_reg_type type; | ||
72 | }; | ||
73 | |||
74 | struct cz_dpm_entry { | ||
75 | uint32_t soft_min_clk; | ||
76 | uint32_t hard_min_clk; | ||
77 | uint32_t soft_max_clk; | ||
78 | uint32_t hard_max_clk; | ||
79 | }; | ||
80 | |||
81 | struct cz_pl { | ||
82 | uint32_t sclk; | ||
83 | uint8_t vddc_index; | ||
84 | uint8_t ds_divider_index; | ||
85 | uint8_t ss_divider_index; | ||
86 | uint8_t allow_gnb_slow; | ||
87 | uint8_t force_nbp_state; | ||
88 | uint8_t display_wm; | ||
89 | uint8_t vce_wm; | ||
90 | }; | ||
91 | |||
92 | struct cz_ps { | ||
93 | struct cz_pl levels[CZ_MAX_HARDWARE_POWERLEVELS]; | ||
94 | uint32_t num_levels; | ||
95 | bool need_dfs_bypass; | ||
96 | uint8_t dpm0_pg_nb_ps_lo; | ||
97 | uint8_t dpm0_pg_nb_ps_hi; | ||
98 | uint8_t dpmx_nb_ps_lo; | ||
99 | uint8_t dpmx_nb_ps_hi; | ||
100 | bool force_high; | ||
101 | }; | ||
102 | |||
103 | struct cz_displayphy_entry { | ||
104 | uint8_t phy_present; | ||
105 | uint8_t active_lane_mapping; | ||
106 | uint8_t display_conf_type; | ||
107 | uint8_t num_active_lanes; | ||
108 | }; | ||
109 | |||
110 | struct cz_displayphy_info { | ||
111 | bool phy_access_initialized; | ||
112 | struct cz_displayphy_entry entries[CZ_MAX_DISPLAYPHY_IDS]; | ||
113 | }; | ||
114 | |||
115 | struct cz_sys_info { | ||
116 | uint32_t bootup_uma_clk; | ||
117 | uint32_t bootup_sclk; | ||
118 | uint32_t dentist_vco_freq; | ||
119 | uint32_t nb_dpm_enable; | ||
120 | uint32_t nbp_memory_clock[CZ_NUM_NBPMEMORY_CLOCK]; | ||
121 | uint32_t nbp_n_clock[CZ_NUM_NBPSTATES]; | ||
122 | uint8_t nbp_voltage_index[CZ_NUM_NBPSTATES]; | ||
123 | uint32_t display_clock[CZ_MAX_DISPLAY_CLOCK_LEVEL]; | ||
124 | uint16_t bootup_nb_voltage_index; | ||
125 | uint8_t htc_tmp_lmt; | ||
126 | uint8_t htc_hyst_lmt; | ||
127 | uint32_t uma_channel_number; | ||
128 | }; | ||
129 | |||
130 | struct cz_power_info { | ||
131 | uint32_t active_target[CZ_MAX_HARDWARE_POWERLEVELS]; | ||
132 | struct cz_sys_info sys_info; | ||
133 | struct cz_pl boot_pl; | ||
134 | bool disable_nb_ps3_in_battery; | ||
135 | bool battery_state; | ||
136 | uint32_t lowest_valid; | ||
137 | uint32_t highest_valid; | ||
138 | uint16_t high_voltage_threshold; | ||
139 | /* smc offsets */ | ||
140 | uint32_t sram_end; | ||
141 | uint32_t dpm_table_start; | ||
142 | uint32_t soft_regs_start; | ||
143 | /* dpm SMU tables */ | ||
144 | uint8_t uvd_level_count; | ||
145 | uint8_t vce_level_count; | ||
146 | uint8_t acp_level_count; | ||
147 | uint32_t fps_high_threshold; | ||
148 | uint32_t fps_low_threshold; | ||
149 | /* dpm table */ | ||
150 | uint32_t dpm_flags; | ||
151 | struct cz_dpm_entry sclk_dpm; | ||
152 | struct cz_dpm_entry uvd_dpm; | ||
153 | struct cz_dpm_entry vce_dpm; | ||
154 | struct cz_dpm_entry acp_dpm; | ||
155 | |||
156 | uint8_t uvd_boot_level; | ||
157 | uint8_t uvd_interval; | ||
158 | uint8_t vce_boot_level; | ||
159 | uint8_t vce_interval; | ||
160 | uint8_t acp_boot_level; | ||
161 | uint8_t acp_interval; | ||
162 | |||
163 | uint8_t graphics_boot_level; | ||
164 | uint8_t graphics_interval; | ||
165 | uint8_t graphics_therm_throttle_enable; | ||
166 | uint8_t graphics_voltage_change_enable; | ||
167 | uint8_t graphics_clk_slow_enable; | ||
168 | uint8_t graphics_clk_slow_divider; | ||
169 | |||
170 | uint32_t low_sclk_interrupt_threshold; | ||
171 | bool uvd_power_gated; | ||
172 | bool vce_power_gated; | ||
173 | bool acp_power_gated; | ||
174 | |||
175 | uint32_t active_process_mask; | ||
176 | |||
177 | uint32_t mgcg_cgtt_local0; | ||
178 | uint32_t mgcg_cgtt_local1; | ||
179 | uint32_t clock_slow_down_step; | ||
180 | uint32_t skip_clock_slow_down; | ||
181 | bool enable_nb_ps_policy; | ||
182 | uint32_t voting_clients; | ||
183 | uint32_t voltage_drop_threshold; | ||
184 | uint32_t gfx_pg_threshold; | ||
185 | uint32_t max_sclk_level; | ||
186 | /* flags */ | ||
187 | bool didt_enabled; | ||
188 | bool video_start; | ||
189 | bool cac_enabled; | ||
190 | bool bapm_enabled; | ||
191 | bool nb_dpm_enabled_by_driver; | ||
192 | bool nb_dpm_enabled; | ||
193 | bool auto_thermal_throttling_enabled; | ||
194 | bool dpm_enabled; | ||
195 | bool need_pptable_upload; | ||
196 | /* caps */ | ||
197 | bool caps_cac; | ||
198 | bool caps_power_containment; | ||
199 | bool caps_sq_ramping; | ||
200 | bool caps_db_ramping; | ||
201 | bool caps_td_ramping; | ||
202 | bool caps_tcp_ramping; | ||
203 | bool caps_sclk_throttle_low_notification; | ||
204 | bool caps_fps; | ||
205 | bool caps_uvd_dpm; | ||
206 | bool caps_uvd_pg; | ||
207 | bool caps_vce_dpm; | ||
208 | bool caps_vce_pg; | ||
209 | bool caps_acp_dpm; | ||
210 | bool caps_acp_pg; | ||
211 | bool caps_stable_power_state; | ||
212 | bool caps_enable_dfs_bypass; | ||
213 | bool caps_sclk_ds; | ||
214 | bool caps_voltage_island; | ||
215 | /* power state */ | ||
216 | struct amdgpu_ps current_rps; | ||
217 | struct cz_ps current_ps; | ||
218 | struct amdgpu_ps requested_rps; | ||
219 | struct cz_ps requested_ps; | ||
220 | |||
221 | bool uvd_power_down; | ||
222 | bool vce_power_down; | ||
223 | bool acp_power_down; | ||
224 | }; | ||
225 | |||
226 | /* cz_smc.c */ | ||
227 | uint32_t cz_get_argument(struct amdgpu_device *adev); | ||
228 | int cz_send_msg_to_smc(struct amdgpu_device *adev, uint16_t msg); | ||
229 | int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, | ||
230 | uint16_t msg, uint32_t parameter); | ||
231 | int cz_read_smc_sram_dword(struct amdgpu_device *adev, | ||
232 | uint32_t smc_address, uint32_t *value, uint32_t limit); | ||
233 | int cz_smu_upload_pptable(struct amdgpu_device *adev); | ||
234 | int cz_smu_download_pptable(struct amdgpu_device *adev, void **table); | ||
235 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c new file mode 100644 index 000000000000..80d508e64a86 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "drmP.h" | ||
24 | #include "amdgpu.h" | ||
25 | #include "amdgpu_ih.h" | ||
26 | #include "vid.h" | ||
27 | |||
28 | #include "oss/oss_3_0_1_d.h" | ||
29 | #include "oss/oss_3_0_1_sh_mask.h" | ||
30 | |||
31 | #include "bif/bif_5_1_d.h" | ||
32 | #include "bif/bif_5_1_sh_mask.h" | ||
33 | |||
34 | /* | ||
35 | * Interrupts | ||
36 | * Starting with r6xx, interrupts are handled via a ring buffer. | ||
37 | * Ring buffers are areas of GPU accessible memory that the GPU | ||
38 | * writes interrupt vectors into and the host reads vectors out of. | ||
39 | * There is a rptr (read pointer) that determines where the | ||
40 | * host is currently reading, and a wptr (write pointer) | ||
41 | * which determines where the GPU has written. When the | ||
42 | * pointers are equal, the ring is idle. When the GPU | ||
43 | * writes vectors to the ring buffer, it increments the | ||
44 | * wptr. When there is an interrupt, the host then starts | ||
45 | * fetching commands and processing them until the pointers are | ||
46 | * equal again at which point it updates the rptr. | ||
47 | */ | ||
48 | |||
49 | static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev); | ||
50 | |||
51 | /** | ||
52 | * cz_ih_enable_interrupts - Enable the interrupt ring buffer | ||
53 | * | ||
54 | * @adev: amdgpu_device pointer | ||
55 | * | ||
56 | * Enable the interrupt ring buffer (VI). | ||
57 | */ | ||
58 | static void cz_ih_enable_interrupts(struct amdgpu_device *adev) | ||
59 | { | ||
60 | u32 ih_cntl = RREG32(mmIH_CNTL); | ||
61 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
62 | |||
63 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1); | ||
64 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); | ||
65 | WREG32(mmIH_CNTL, ih_cntl); | ||
66 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
67 | adev->irq.ih.enabled = true; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * cz_ih_disable_interrupts - Disable the interrupt ring buffer | ||
72 | * | ||
73 | * @adev: amdgpu_device pointer | ||
74 | * | ||
75 | * Disable the interrupt ring buffer (VI). | ||
76 | */ | ||
77 | static void cz_ih_disable_interrupts(struct amdgpu_device *adev) | ||
78 | { | ||
79 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
80 | u32 ih_cntl = RREG32(mmIH_CNTL); | ||
81 | |||
82 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); | ||
83 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0); | ||
84 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
85 | WREG32(mmIH_CNTL, ih_cntl); | ||
86 | /* set rptr, wptr to 0 */ | ||
87 | WREG32(mmIH_RB_RPTR, 0); | ||
88 | WREG32(mmIH_RB_WPTR, 0); | ||
89 | adev->irq.ih.enabled = false; | ||
90 | adev->irq.ih.rptr = 0; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * cz_ih_irq_init - init and enable the interrupt ring | ||
95 | * | ||
96 | * @adev: amdgpu_device pointer | ||
97 | * | ||
98 | * Allocate a ring buffer for the interrupt controller, | ||
99 | * enable the RLC, disable interrupts, enable the IH | ||
100 | * ring buffer and enable it (VI). | ||
101 | * Called at device load and reume. | ||
102 | * Returns 0 for success, errors for failure. | ||
103 | */ | ||
104 | static int cz_ih_irq_init(struct amdgpu_device *adev) | ||
105 | { | ||
106 | int ret = 0; | ||
107 | int rb_bufsz; | ||
108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | ||
109 | u64 wptr_off; | ||
110 | |||
111 | /* disable irqs */ | ||
112 | cz_ih_disable_interrupts(adev); | ||
113 | |||
114 | /* setup interrupt control */ | ||
115 | WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); | ||
116 | interrupt_cntl = RREG32(mmINTERRUPT_CNTL); | ||
117 | /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi | ||
118 | * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN | ||
119 | */ | ||
120 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); | ||
121 | /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ | ||
122 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); | ||
123 | WREG32(mmINTERRUPT_CNTL, interrupt_cntl); | ||
124 | |||
125 | /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ | ||
126 | WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); | ||
127 | |||
128 | rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); | ||
129 | ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1); | ||
130 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
131 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); | ||
132 | |||
133 | /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ | ||
134 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); | ||
135 | |||
136 | /* set the writeback address whether it's enabled or not */ | ||
137 | wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); | ||
138 | WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); | ||
139 | WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); | ||
140 | |||
141 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
142 | |||
143 | /* set rptr, wptr to 0 */ | ||
144 | WREG32(mmIH_RB_RPTR, 0); | ||
145 | WREG32(mmIH_RB_WPTR, 0); | ||
146 | |||
147 | /* Default settings for IH_CNTL (disabled at first) */ | ||
148 | ih_cntl = RREG32(mmIH_CNTL); | ||
149 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0); | ||
150 | |||
151 | if (adev->irq.msi_enabled) | ||
152 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1); | ||
153 | WREG32(mmIH_CNTL, ih_cntl); | ||
154 | |||
155 | pci_set_master(adev->pdev); | ||
156 | |||
157 | /* enable interrupts */ | ||
158 | cz_ih_enable_interrupts(adev); | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * cz_ih_irq_disable - disable interrupts | ||
165 | * | ||
166 | * @adev: amdgpu_device pointer | ||
167 | * | ||
168 | * Disable interrupts on the hw (VI). | ||
169 | */ | ||
170 | static void cz_ih_irq_disable(struct amdgpu_device *adev) | ||
171 | { | ||
172 | cz_ih_disable_interrupts(adev); | ||
173 | |||
174 | /* Wait and acknowledge irq */ | ||
175 | mdelay(1); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * cz_ih_get_wptr - get the IH ring buffer wptr | ||
180 | * | ||
181 | * @adev: amdgpu_device pointer | ||
182 | * | ||
183 | * Get the IH ring buffer wptr from either the register | ||
184 | * or the writeback memory buffer (VI). Also check for | ||
185 | * ring buffer overflow and deal with it. | ||
186 | * Used by cz_irq_process(VI). | ||
187 | * Returns the value of the wptr. | ||
188 | */ | ||
189 | static u32 cz_ih_get_wptr(struct amdgpu_device *adev) | ||
190 | { | ||
191 | u32 wptr, tmp; | ||
192 | |||
193 | wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); | ||
194 | |||
195 | if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { | ||
196 | wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); | ||
197 | /* When a ring buffer overflow happen start parsing interrupt | ||
198 | * from the last not overwritten vector (wptr + 16). Hopefully | ||
199 | * this should allow us to catchup. | ||
200 | */ | ||
201 | dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", | ||
202 | wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); | ||
203 | adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; | ||
204 | tmp = RREG32(mmIH_RB_CNTL); | ||
205 | tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
206 | WREG32(mmIH_RB_CNTL, tmp); | ||
207 | } | ||
208 | return (wptr & adev->irq.ih.ptr_mask); | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * cz_ih_decode_iv - decode an interrupt vector | ||
213 | * | ||
214 | * @adev: amdgpu_device pointer | ||
215 | * | ||
216 | * Decodes the interrupt vector at the current rptr | ||
217 | * position and also advance the position. | ||
218 | */ | ||
219 | static void cz_ih_decode_iv(struct amdgpu_device *adev, | ||
220 | struct amdgpu_iv_entry *entry) | ||
221 | { | ||
222 | /* wptr/rptr are in bytes! */ | ||
223 | u32 ring_index = adev->irq.ih.rptr >> 2; | ||
224 | uint32_t dw[4]; | ||
225 | |||
226 | dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); | ||
227 | dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); | ||
228 | dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); | ||
229 | dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); | ||
230 | |||
231 | entry->src_id = dw[0] & 0xff; | ||
232 | entry->src_data = dw[1] & 0xfffffff; | ||
233 | entry->ring_id = dw[2] & 0xff; | ||
234 | entry->vm_id = (dw[2] >> 8) & 0xff; | ||
235 | entry->pas_id = (dw[2] >> 16) & 0xffff; | ||
236 | |||
237 | /* wptr/rptr are in bytes! */ | ||
238 | adev->irq.ih.rptr += 16; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * cz_ih_set_rptr - set the IH ring buffer rptr | ||
243 | * | ||
244 | * @adev: amdgpu_device pointer | ||
245 | * | ||
246 | * Set the IH ring buffer rptr. | ||
247 | */ | ||
248 | static void cz_ih_set_rptr(struct amdgpu_device *adev) | ||
249 | { | ||
250 | WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); | ||
251 | } | ||
252 | |||
253 | static int cz_ih_early_init(struct amdgpu_device *adev) | ||
254 | { | ||
255 | cz_ih_set_interrupt_funcs(adev); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int cz_ih_sw_init(struct amdgpu_device *adev) | ||
260 | { | ||
261 | int r; | ||
262 | |||
263 | r = amdgpu_ih_ring_init(adev, 64 * 1024, false); | ||
264 | if (r) | ||
265 | return r; | ||
266 | |||
267 | r = amdgpu_irq_init(adev); | ||
268 | |||
269 | return r; | ||
270 | } | ||
271 | |||
272 | static int cz_ih_sw_fini(struct amdgpu_device *adev) | ||
273 | { | ||
274 | amdgpu_irq_fini(adev); | ||
275 | amdgpu_ih_ring_fini(adev); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static int cz_ih_hw_init(struct amdgpu_device *adev) | ||
281 | { | ||
282 | int r; | ||
283 | |||
284 | r = cz_ih_irq_init(adev); | ||
285 | if (r) | ||
286 | return r; | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static int cz_ih_hw_fini(struct amdgpu_device *adev) | ||
292 | { | ||
293 | cz_ih_irq_disable(adev); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int cz_ih_suspend(struct amdgpu_device *adev) | ||
299 | { | ||
300 | return cz_ih_hw_fini(adev); | ||
301 | } | ||
302 | |||
303 | static int cz_ih_resume(struct amdgpu_device *adev) | ||
304 | { | ||
305 | return cz_ih_hw_init(adev); | ||
306 | } | ||
307 | |||
308 | static bool cz_ih_is_idle(struct amdgpu_device *adev) | ||
309 | { | ||
310 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
311 | |||
312 | if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
313 | return false; | ||
314 | |||
315 | return true; | ||
316 | } | ||
317 | |||
318 | static int cz_ih_wait_for_idle(struct amdgpu_device *adev) | ||
319 | { | ||
320 | unsigned i; | ||
321 | u32 tmp; | ||
322 | |||
323 | for (i = 0; i < adev->usec_timeout; i++) { | ||
324 | /* read MC_STATUS */ | ||
325 | tmp = RREG32(mmSRBM_STATUS); | ||
326 | if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
327 | return 0; | ||
328 | udelay(1); | ||
329 | } | ||
330 | return -ETIMEDOUT; | ||
331 | } | ||
332 | |||
333 | static void cz_ih_print_status(struct amdgpu_device *adev) | ||
334 | { | ||
335 | dev_info(adev->dev, "CZ IH registers\n"); | ||
336 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
337 | RREG32(mmSRBM_STATUS)); | ||
338 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
339 | RREG32(mmSRBM_STATUS2)); | ||
340 | dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", | ||
341 | RREG32(mmINTERRUPT_CNTL)); | ||
342 | dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", | ||
343 | RREG32(mmINTERRUPT_CNTL2)); | ||
344 | dev_info(adev->dev, " IH_CNTL=0x%08X\n", | ||
345 | RREG32(mmIH_CNTL)); | ||
346 | dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", | ||
347 | RREG32(mmIH_RB_CNTL)); | ||
348 | dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", | ||
349 | RREG32(mmIH_RB_BASE)); | ||
350 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", | ||
351 | RREG32(mmIH_RB_WPTR_ADDR_LO)); | ||
352 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", | ||
353 | RREG32(mmIH_RB_WPTR_ADDR_HI)); | ||
354 | dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", | ||
355 | RREG32(mmIH_RB_RPTR)); | ||
356 | dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", | ||
357 | RREG32(mmIH_RB_WPTR)); | ||
358 | } | ||
359 | |||
360 | static int cz_ih_soft_reset(struct amdgpu_device *adev) | ||
361 | { | ||
362 | u32 srbm_soft_reset = 0; | ||
363 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
364 | |||
365 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) | ||
366 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, | ||
367 | SOFT_RESET_IH, 1); | ||
368 | |||
369 | if (srbm_soft_reset) { | ||
370 | cz_ih_print_status(adev); | ||
371 | |||
372 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
373 | tmp |= srbm_soft_reset; | ||
374 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
375 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
376 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
377 | |||
378 | udelay(50); | ||
379 | |||
380 | tmp &= ~srbm_soft_reset; | ||
381 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
382 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
383 | |||
384 | /* Wait a little for things to settle down */ | ||
385 | udelay(50); | ||
386 | |||
387 | cz_ih_print_status(adev); | ||
388 | } | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int cz_ih_set_clockgating_state(struct amdgpu_device *adev, | ||
394 | enum amdgpu_clockgating_state state) | ||
395 | { | ||
396 | // TODO | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int cz_ih_set_powergating_state(struct amdgpu_device *adev, | ||
401 | enum amdgpu_powergating_state state) | ||
402 | { | ||
403 | // TODO | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | const struct amdgpu_ip_funcs cz_ih_ip_funcs = { | ||
408 | .early_init = cz_ih_early_init, | ||
409 | .late_init = NULL, | ||
410 | .sw_init = cz_ih_sw_init, | ||
411 | .sw_fini = cz_ih_sw_fini, | ||
412 | .hw_init = cz_ih_hw_init, | ||
413 | .hw_fini = cz_ih_hw_fini, | ||
414 | .suspend = cz_ih_suspend, | ||
415 | .resume = cz_ih_resume, | ||
416 | .is_idle = cz_ih_is_idle, | ||
417 | .wait_for_idle = cz_ih_wait_for_idle, | ||
418 | .soft_reset = cz_ih_soft_reset, | ||
419 | .print_status = cz_ih_print_status, | ||
420 | .set_clockgating_state = cz_ih_set_clockgating_state, | ||
421 | .set_powergating_state = cz_ih_set_powergating_state, | ||
422 | }; | ||
423 | |||
424 | static const struct amdgpu_ih_funcs cz_ih_funcs = { | ||
425 | .get_wptr = cz_ih_get_wptr, | ||
426 | .decode_iv = cz_ih_decode_iv, | ||
427 | .set_rptr = cz_ih_set_rptr | ||
428 | }; | ||
429 | |||
430 | static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev) | ||
431 | { | ||
432 | if (adev->irq.ih_funcs == NULL) | ||
433 | adev->irq.ih_funcs = &cz_ih_funcs; | ||
434 | } | ||
435 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.h b/drivers/gpu/drm/amd/amdgpu/cz_ih.h new file mode 100644 index 000000000000..1bce136876ff --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __CZ_IH_H__ | ||
25 | #define __CZ_IH_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs cz_ih_ip_funcs; | ||
28 | |||
29 | #endif /* __CZ_IH_H__ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h new file mode 100644 index 000000000000..273616ab43db --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_ppsmc.h | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef CZ_PP_SMC_H | ||
25 | #define CZ_PP_SMC_H | ||
26 | |||
27 | #pragma pack(push, 1) | ||
28 | |||
29 | /* Fan control algorithm:*/ | ||
30 | #define FDO_MODE_HARDWARE 0 | ||
31 | #define FDO_MODE_PIECE_WISE_LINEAR 1 | ||
32 | |||
33 | enum FAN_CONTROL { | ||
34 | FAN_CONTROL_FUZZY, | ||
35 | FAN_CONTROL_TABLE | ||
36 | }; | ||
37 | |||
38 | enum DPM_ARRAY { | ||
39 | DPM_ARRAY_HARD_MAX, | ||
40 | DPM_ARRAY_HARD_MIN, | ||
41 | DPM_ARRAY_SOFT_MAX, | ||
42 | DPM_ARRAY_SOFT_MIN | ||
43 | }; | ||
44 | |||
45 | /* | ||
46 | * Return codes for driver to SMC communication. | ||
47 | * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller. | ||
48 | */ | ||
49 | #define PPSMC_Result_OK ((uint16_t)0x01) | ||
50 | #define PPSMC_Result_NoMore ((uint16_t)0x02) | ||
51 | #define PPSMC_Result_NotNow ((uint16_t)0x03) | ||
52 | #define PPSMC_Result_Failed ((uint16_t)0xFF) | ||
53 | #define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) | ||
54 | #define PPSMC_Result_UnknownVT ((uint16_t)0xFD) | ||
55 | |||
56 | #define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) | ||
57 | |||
58 | /* | ||
59 | * Supported driver messages | ||
60 | */ | ||
61 | #define PPSMC_MSG_Test ((uint16_t) 0x1) | ||
62 | #define PPSMC_MSG_GetFeatureStatus ((uint16_t) 0x2) | ||
63 | #define PPSMC_MSG_EnableAllSmuFeatures ((uint16_t) 0x3) | ||
64 | #define PPSMC_MSG_DisableAllSmuFeatures ((uint16_t) 0x4) | ||
65 | #define PPSMC_MSG_OptimizeBattery ((uint16_t) 0x5) | ||
66 | #define PPSMC_MSG_MaximizePerf ((uint16_t) 0x6) | ||
67 | #define PPSMC_MSG_UVDPowerOFF ((uint16_t) 0x7) | ||
68 | #define PPSMC_MSG_UVDPowerON ((uint16_t) 0x8) | ||
69 | #define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x9) | ||
70 | #define PPSMC_MSG_VCEPowerON ((uint16_t) 0xA) | ||
71 | #define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0xB) | ||
72 | #define PPSMC_MSG_ACPPowerON ((uint16_t) 0xC) | ||
73 | #define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0xD) | ||
74 | #define PPSMC_MSG_SDMAPowerON ((uint16_t) 0xE) | ||
75 | #define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0xF) | ||
76 | #define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x10) | ||
77 | #define PPSMC_MSG_SetMinDeepSleepSclk ((uint16_t) 0x11) | ||
78 | #define PPSMC_MSG_SetSclkSoftMin ((uint16_t) 0x12) | ||
79 | #define PPSMC_MSG_SetSclkSoftMax ((uint16_t) 0x13) | ||
80 | #define PPSMC_MSG_SetSclkHardMin ((uint16_t) 0x14) | ||
81 | #define PPSMC_MSG_SetSclkHardMax ((uint16_t) 0x15) | ||
82 | #define PPSMC_MSG_SetLclkSoftMin ((uint16_t) 0x16) | ||
83 | #define PPSMC_MSG_SetLclkSoftMax ((uint16_t) 0x17) | ||
84 | #define PPSMC_MSG_SetLclkHardMin ((uint16_t) 0x18) | ||
85 | #define PPSMC_MSG_SetLclkHardMax ((uint16_t) 0x19) | ||
86 | #define PPSMC_MSG_SetUvdSoftMin ((uint16_t) 0x1A) | ||
87 | #define PPSMC_MSG_SetUvdSoftMax ((uint16_t) 0x1B) | ||
88 | #define PPSMC_MSG_SetUvdHardMin ((uint16_t) 0x1C) | ||
89 | #define PPSMC_MSG_SetUvdHardMax ((uint16_t) 0x1D) | ||
90 | #define PPSMC_MSG_SetEclkSoftMin ((uint16_t) 0x1E) | ||
91 | #define PPSMC_MSG_SetEclkSoftMax ((uint16_t) 0x1F) | ||
92 | #define PPSMC_MSG_SetEclkHardMin ((uint16_t) 0x20) | ||
93 | #define PPSMC_MSG_SetEclkHardMax ((uint16_t) 0x21) | ||
94 | #define PPSMC_MSG_SetAclkSoftMin ((uint16_t) 0x22) | ||
95 | #define PPSMC_MSG_SetAclkSoftMax ((uint16_t) 0x23) | ||
96 | #define PPSMC_MSG_SetAclkHardMin ((uint16_t) 0x24) | ||
97 | #define PPSMC_MSG_SetAclkHardMax ((uint16_t) 0x25) | ||
98 | #define PPSMC_MSG_SetNclkSoftMin ((uint16_t) 0x26) | ||
99 | #define PPSMC_MSG_SetNclkSoftMax ((uint16_t) 0x27) | ||
100 | #define PPSMC_MSG_SetNclkHardMin ((uint16_t) 0x28) | ||
101 | #define PPSMC_MSG_SetNclkHardMax ((uint16_t) 0x29) | ||
102 | #define PPSMC_MSG_SetPstateSoftMin ((uint16_t) 0x2A) | ||
103 | #define PPSMC_MSG_SetPstateSoftMax ((uint16_t) 0x2B) | ||
104 | #define PPSMC_MSG_SetPstateHardMin ((uint16_t) 0x2C) | ||
105 | #define PPSMC_MSG_SetPstateHardMax ((uint16_t) 0x2D) | ||
106 | #define PPSMC_MSG_DisableLowMemoryPstate ((uint16_t) 0x2E) | ||
107 | #define PPSMC_MSG_EnableLowMemoryPstate ((uint16_t) 0x2F) | ||
108 | #define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x30) | ||
109 | #define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x31) | ||
110 | #define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x32) | ||
111 | #define PPSMC_MSG_DriverDramAddrHi ((uint16_t) 0x33) | ||
112 | #define PPSMC_MSG_DriverDramAddrLo ((uint16_t) 0x34) | ||
113 | #define PPSMC_MSG_CondExecDramAddrHi ((uint16_t) 0x35) | ||
114 | #define PPSMC_MSG_CondExecDramAddrLo ((uint16_t) 0x36) | ||
115 | #define PPSMC_MSG_LoadUcodes ((uint16_t) 0x37) | ||
116 | #define PPSMC_MSG_DriverResetMode ((uint16_t) 0x38) | ||
117 | #define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x39) | ||
118 | #define PPSMC_MSG_SetDisplayPhyConfig ((uint16_t) 0x3A) | ||
119 | #define PPSMC_MSG_GetMaxSclkLevel ((uint16_t) 0x3B) | ||
120 | #define PPSMC_MSG_GetMaxLclkLevel ((uint16_t) 0x3C) | ||
121 | #define PPSMC_MSG_GetMaxUvdLevel ((uint16_t) 0x3D) | ||
122 | #define PPSMC_MSG_GetMaxEclkLevel ((uint16_t) 0x3E) | ||
123 | #define PPSMC_MSG_GetMaxAclkLevel ((uint16_t) 0x3F) | ||
124 | #define PPSMC_MSG_GetMaxNclkLevel ((uint16_t) 0x40) | ||
125 | #define PPSMC_MSG_GetMaxPstate ((uint16_t) 0x41) | ||
126 | #define PPSMC_MSG_DramAddrHiVirtual ((uint16_t) 0x42) | ||
127 | #define PPSMC_MSG_DramAddrLoVirtual ((uint16_t) 0x43) | ||
128 | #define PPSMC_MSG_DramAddrHiPhysical ((uint16_t) 0x44) | ||
129 | #define PPSMC_MSG_DramAddrLoPhysical ((uint16_t) 0x45) | ||
130 | #define PPSMC_MSG_DramBufferSize ((uint16_t) 0x46) | ||
131 | #define PPSMC_MSG_SetMmPwrLogDramAddrHi ((uint16_t) 0x47) | ||
132 | #define PPSMC_MSG_SetMmPwrLogDramAddrLo ((uint16_t) 0x48) | ||
133 | #define PPSMC_MSG_SetClkTableAddrHi ((uint16_t) 0x49) | ||
134 | #define PPSMC_MSG_SetClkTableAddrLo ((uint16_t) 0x4A) | ||
135 | #define PPSMC_MSG_GetConservativePowerLimit ((uint16_t) 0x4B) | ||
136 | |||
137 | #define PPSMC_MSG_InitJobs ((uint16_t) 0x252) | ||
138 | #define PPSMC_MSG_ExecuteJob ((uint16_t) 0x254) | ||
139 | |||
140 | #define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) | ||
141 | #define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) | ||
142 | |||
143 | #define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) | ||
144 | #define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) | ||
145 | |||
146 | #define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) | ||
147 | #define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) | ||
148 | |||
149 | #define PPSMC_MSG_AllowLowSclkInterrupt ((uint16_t) 0x184) | ||
150 | #define PPSMC_MSG_MmPowerMonitorStart ((uint16_t) 0x18F) | ||
151 | #define PPSMC_MSG_MmPowerMonitorStop ((uint16_t) 0x190) | ||
152 | #define PPSMC_MSG_MmPowerMonitorRestart ((uint16_t) 0x191) | ||
153 | |||
154 | #define PPSMC_MSG_SetClockGateMask ((uint16_t) 0x260) | ||
155 | #define PPSMC_MSG_SetFpsThresholdLo ((uint16_t) 0x264) | ||
156 | #define PPSMC_MSG_SetFpsThresholdHi ((uint16_t) 0x265) | ||
157 | #define PPSMC_MSG_SetLowSclkIntrThreshold ((uint16_t) 0x266) | ||
158 | |||
159 | #define PPSMC_MSG_ClkTableXferToDram ((uint16_t) 0x267) | ||
160 | #define PPSMC_MSG_ClkTableXferToSmu ((uint16_t) 0x268) | ||
161 | #define PPSMC_MSG_GetAverageGraphicsActivity ((uint16_t) 0x269) | ||
162 | #define PPSMC_MSG_GetAverageGioActivity ((uint16_t) 0x26A) | ||
163 | #define PPSMC_MSG_SetLoggerBufferSize ((uint16_t) 0x26B) | ||
164 | #define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) | ||
165 | #define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) | ||
166 | #define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) | ||
167 | |||
168 | /* REMOVE LATER*/ | ||
169 | #define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) | ||
170 | |||
171 | /* Feature Enable Masks*/ | ||
172 | #define NB_DPM_MASK 0x00000800 | ||
173 | #define VDDGFX_MASK 0x00800000 | ||
174 | #define VCE_DPM_MASK 0x00400000 | ||
175 | #define ACP_DPM_MASK 0x00040000 | ||
176 | #define UVD_DPM_MASK 0x00010000 | ||
177 | #define GFX_CU_PG_MASK 0x00004000 | ||
178 | #define SCLK_DPM_MASK 0x00080000 | ||
179 | |||
180 | #if !defined(SMC_MICROCODE) | ||
181 | #pragma pack(pop) | ||
182 | |||
183 | #endif | ||
184 | |||
185 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c new file mode 100644 index 000000000000..a72ffc7d6c26 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c | |||
@@ -0,0 +1,962 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "amdgpu.h" | ||
26 | #include "smu8.h" | ||
27 | #include "smu8_fusion.h" | ||
28 | #include "cz_ppsmc.h" | ||
29 | #include "cz_smumgr.h" | ||
30 | #include "smu_ucode_xfer_cz.h" | ||
31 | #include "amdgpu_ucode.h" | ||
32 | |||
33 | #include "smu/smu_8_0_d.h" | ||
34 | #include "smu/smu_8_0_sh_mask.h" | ||
35 | #include "gca/gfx_8_0_d.h" | ||
36 | #include "gca/gfx_8_0_sh_mask.h" | ||
37 | |||
38 | uint32_t cz_get_argument(struct amdgpu_device *adev) | ||
39 | { | ||
40 | return RREG32(mmSMU_MP1_SRBM2P_ARG_0); | ||
41 | } | ||
42 | |||
43 | static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev) | ||
44 | { | ||
45 | struct cz_smu_private_data *priv = | ||
46 | (struct cz_smu_private_data *)(adev->smu.priv); | ||
47 | |||
48 | return priv; | ||
49 | } | ||
50 | |||
51 | int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg) | ||
52 | { | ||
53 | int i; | ||
54 | u32 content = 0, tmp; | ||
55 | |||
56 | for (i = 0; i < adev->usec_timeout; i++) { | ||
57 | tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), | ||
58 | SMU_MP1_SRBM2P_RESP_0, CONTENT); | ||
59 | if (content != tmp) | ||
60 | break; | ||
61 | udelay(1); | ||
62 | } | ||
63 | |||
64 | /* timeout means wrong logic*/ | ||
65 | if (i == adev->usec_timeout) | ||
66 | return -EINVAL; | ||
67 | |||
68 | WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0); | ||
69 | WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg) | ||
75 | { | ||
76 | int i; | ||
77 | u32 content = 0, tmp = 0; | ||
78 | |||
79 | if (cz_send_msg_to_smc_async(adev, msg)) | ||
80 | return -EINVAL; | ||
81 | |||
82 | for (i = 0; i < adev->usec_timeout; i++) { | ||
83 | tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0), | ||
84 | SMU_MP1_SRBM2P_RESP_0, CONTENT); | ||
85 | if (content != tmp) | ||
86 | break; | ||
87 | udelay(1); | ||
88 | } | ||
89 | |||
90 | /* timeout means wrong logic*/ | ||
91 | if (i == adev->usec_timeout) | ||
92 | return -EINVAL; | ||
93 | |||
94 | if (PPSMC_Result_OK != tmp) { | ||
95 | dev_err(adev->dev, "SMC Failed to send Message.\n"); | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | |||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | int cz_send_msg_to_smc_with_parameter_async(struct amdgpu_device *adev, | ||
103 | u16 msg, u32 parameter) | ||
104 | { | ||
105 | WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); | ||
106 | return cz_send_msg_to_smc_async(adev, msg); | ||
107 | } | ||
108 | |||
109 | int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, | ||
110 | u16 msg, u32 parameter) | ||
111 | { | ||
112 | WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter); | ||
113 | return cz_send_msg_to_smc(adev, msg); | ||
114 | } | ||
115 | |||
116 | static int cz_set_smc_sram_address(struct amdgpu_device *adev, | ||
117 | u32 smc_address, u32 limit) | ||
118 | { | ||
119 | if (smc_address & 3) | ||
120 | return -EINVAL; | ||
121 | if ((smc_address + 3) > limit) | ||
122 | return -EINVAL; | ||
123 | |||
124 | WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, | ||
130 | u32 *value, u32 limit) | ||
131 | { | ||
132 | int ret; | ||
133 | |||
134 | ret = cz_set_smc_sram_address(adev, smc_address, limit); | ||
135 | if (ret) | ||
136 | return ret; | ||
137 | |||
138 | *value = RREG32(mmMP0PUB_IND_DATA_0); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, | ||
144 | u32 value, u32 limit) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | ret = cz_set_smc_sram_address(adev, smc_address, limit); | ||
149 | if (ret) | ||
150 | return ret; | ||
151 | |||
152 | WREG32(mmMP0PUB_IND_DATA_0, value); | ||
153 | |||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static int cz_smu_request_load_fw(struct amdgpu_device *adev) | ||
158 | { | ||
159 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
160 | |||
161 | uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION + | ||
162 | offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); | ||
163 | |||
164 | cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4); | ||
165 | |||
166 | /*prepare toc buffers*/ | ||
167 | cz_send_msg_to_smc_with_parameter(adev, | ||
168 | PPSMC_MSG_DriverDramAddrHi, | ||
169 | priv->toc_buffer.mc_addr_high); | ||
170 | cz_send_msg_to_smc_with_parameter(adev, | ||
171 | PPSMC_MSG_DriverDramAddrLo, | ||
172 | priv->toc_buffer.mc_addr_low); | ||
173 | cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs); | ||
174 | |||
175 | /*execute jobs*/ | ||
176 | cz_send_msg_to_smc_with_parameter(adev, | ||
177 | PPSMC_MSG_ExecuteJob, | ||
178 | priv->toc_entry_aram); | ||
179 | |||
180 | cz_send_msg_to_smc_with_parameter(adev, | ||
181 | PPSMC_MSG_ExecuteJob, | ||
182 | priv->toc_entry_power_profiling_index); | ||
183 | |||
184 | cz_send_msg_to_smc_with_parameter(adev, | ||
185 | PPSMC_MSG_ExecuteJob, | ||
186 | priv->toc_entry_initialize_index); | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | *Check if the FW has been loaded, SMU will not return if loading | ||
193 | *has not finished. | ||
194 | */ | ||
195 | static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev, | ||
196 | uint32_t fw_mask) | ||
197 | { | ||
198 | int i; | ||
199 | uint32_t index = SMN_MP1_SRAM_START_ADDR + | ||
200 | SMU8_FIRMWARE_HEADER_LOCATION + | ||
201 | offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus); | ||
202 | |||
203 | WREG32(mmMP0PUB_IND_INDEX, index); | ||
204 | |||
205 | for (i = 0; i < adev->usec_timeout; i++) { | ||
206 | if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask)) | ||
207 | break; | ||
208 | udelay(1); | ||
209 | } | ||
210 | |||
211 | if (i >= adev->usec_timeout) { | ||
212 | dev_err(adev->dev, | ||
213 | "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x", | ||
214 | fw_mask, RREG32(mmMP0PUB_IND_DATA)); | ||
215 | return -EINVAL; | ||
216 | } | ||
217 | |||
218 | return 0; | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * interfaces for different ip blocks to check firmware loading status | ||
223 | * 0 for success otherwise failed | ||
224 | */ | ||
225 | static int cz_smu_check_finished(struct amdgpu_device *adev, | ||
226 | enum AMDGPU_UCODE_ID id) | ||
227 | { | ||
228 | switch (id) { | ||
229 | case AMDGPU_UCODE_ID_SDMA0: | ||
230 | if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED) | ||
231 | return 0; | ||
232 | break; | ||
233 | case AMDGPU_UCODE_ID_SDMA1: | ||
234 | if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED) | ||
235 | return 0; | ||
236 | break; | ||
237 | case AMDGPU_UCODE_ID_CP_CE: | ||
238 | if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED) | ||
239 | return 0; | ||
240 | break; | ||
241 | case AMDGPU_UCODE_ID_CP_PFP: | ||
242 | if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED) | ||
243 | return 0; | ||
244 | case AMDGPU_UCODE_ID_CP_ME: | ||
245 | if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED) | ||
246 | return 0; | ||
247 | break; | ||
248 | case AMDGPU_UCODE_ID_CP_MEC1: | ||
249 | if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED) | ||
250 | return 0; | ||
251 | break; | ||
252 | case AMDGPU_UCODE_ID_CP_MEC2: | ||
253 | if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED) | ||
254 | return 0; | ||
255 | break; | ||
256 | case AMDGPU_UCODE_ID_RLC_G: | ||
257 | if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED) | ||
258 | return 0; | ||
259 | break; | ||
260 | case AMDGPU_UCODE_ID_MAXIMUM: | ||
261 | default: | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | return 1; | ||
266 | } | ||
267 | |||
268 | static int cz_load_mec_firmware(struct amdgpu_device *adev) | ||
269 | { | ||
270 | struct amdgpu_firmware_info *ucode = | ||
271 | &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; | ||
272 | uint32_t reg_data; | ||
273 | uint32_t tmp; | ||
274 | |||
275 | if (ucode->fw == NULL) | ||
276 | return -EINVAL; | ||
277 | |||
278 | /* Disable MEC parsing/prefetching */ | ||
279 | tmp = RREG32(mmCP_MEC_CNTL); | ||
280 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); | ||
281 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); | ||
282 | WREG32(mmCP_MEC_CNTL, tmp); | ||
283 | |||
284 | tmp = RREG32(mmCP_CPC_IC_BASE_CNTL); | ||
285 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); | ||
286 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0); | ||
287 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); | ||
288 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1); | ||
289 | WREG32(mmCP_CPC_IC_BASE_CNTL, tmp); | ||
290 | |||
291 | reg_data = lower_32_bits(ucode->mc_addr) & | ||
292 | REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO); | ||
293 | WREG32(mmCP_CPC_IC_BASE_LO, reg_data); | ||
294 | |||
295 | reg_data = upper_32_bits(ucode->mc_addr) & | ||
296 | REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI); | ||
297 | WREG32(mmCP_CPC_IC_BASE_HI, reg_data); | ||
298 | |||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | int cz_smu_start(struct amdgpu_device *adev) | ||
303 | { | ||
304 | int ret = 0; | ||
305 | |||
306 | uint32_t fw_to_check = UCODE_ID_RLC_G_MASK | | ||
307 | UCODE_ID_SDMA0_MASK | | ||
308 | UCODE_ID_SDMA1_MASK | | ||
309 | UCODE_ID_CP_CE_MASK | | ||
310 | UCODE_ID_CP_ME_MASK | | ||
311 | UCODE_ID_CP_PFP_MASK | | ||
312 | UCODE_ID_CP_MEC_JT1_MASK | | ||
313 | UCODE_ID_CP_MEC_JT2_MASK; | ||
314 | |||
315 | cz_smu_request_load_fw(adev); | ||
316 | ret = cz_smu_check_fw_load_finish(adev, fw_to_check); | ||
317 | if (ret) | ||
318 | return ret; | ||
319 | |||
320 | /* manually load MEC firmware for CZ */ | ||
321 | if (adev->asic_type == CHIP_CARRIZO) { | ||
322 | ret = cz_load_mec_firmware(adev); | ||
323 | if (ret) { | ||
324 | dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); | ||
325 | return ret; | ||
326 | } | ||
327 | } | ||
328 | |||
329 | /* setup fw load flag */ | ||
330 | adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED | | ||
331 | AMDGPU_SDMA1_UCODE_LOADED | | ||
332 | AMDGPU_CPCE_UCODE_LOADED | | ||
333 | AMDGPU_CPPFP_UCODE_LOADED | | ||
334 | AMDGPU_CPME_UCODE_LOADED | | ||
335 | AMDGPU_CPMEC1_UCODE_LOADED | | ||
336 | AMDGPU_CPMEC2_UCODE_LOADED | | ||
337 | AMDGPU_CPRLC_UCODE_LOADED; | ||
338 | |||
339 | return ret; | ||
340 | } | ||
341 | |||
342 | static uint32_t cz_convert_fw_type(uint32_t fw_type) | ||
343 | { | ||
344 | enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; | ||
345 | |||
346 | switch (fw_type) { | ||
347 | case UCODE_ID_SDMA0: | ||
348 | result = AMDGPU_UCODE_ID_SDMA0; | ||
349 | break; | ||
350 | case UCODE_ID_SDMA1: | ||
351 | result = AMDGPU_UCODE_ID_SDMA1; | ||
352 | break; | ||
353 | case UCODE_ID_CP_CE: | ||
354 | result = AMDGPU_UCODE_ID_CP_CE; | ||
355 | break; | ||
356 | case UCODE_ID_CP_PFP: | ||
357 | result = AMDGPU_UCODE_ID_CP_PFP; | ||
358 | break; | ||
359 | case UCODE_ID_CP_ME: | ||
360 | result = AMDGPU_UCODE_ID_CP_ME; | ||
361 | break; | ||
362 | case UCODE_ID_CP_MEC_JT1: | ||
363 | case UCODE_ID_CP_MEC_JT2: | ||
364 | result = AMDGPU_UCODE_ID_CP_MEC1; | ||
365 | break; | ||
366 | case UCODE_ID_RLC_G: | ||
367 | result = AMDGPU_UCODE_ID_RLC_G; | ||
368 | break; | ||
369 | default: | ||
370 | DRM_ERROR("UCode type is out of range!"); | ||
371 | } | ||
372 | |||
373 | return result; | ||
374 | } | ||
375 | |||
376 | static uint8_t cz_smu_translate_firmware_enum_to_arg( | ||
377 | enum cz_scratch_entry firmware_enum) | ||
378 | { | ||
379 | uint8_t ret = 0; | ||
380 | |||
381 | switch (firmware_enum) { | ||
382 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0: | ||
383 | ret = UCODE_ID_SDMA0; | ||
384 | break; | ||
385 | case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1: | ||
386 | ret = UCODE_ID_SDMA1; | ||
387 | break; | ||
388 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE: | ||
389 | ret = UCODE_ID_CP_CE; | ||
390 | break; | ||
391 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP: | ||
392 | ret = UCODE_ID_CP_PFP; | ||
393 | break; | ||
394 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME: | ||
395 | ret = UCODE_ID_CP_ME; | ||
396 | break; | ||
397 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1: | ||
398 | ret = UCODE_ID_CP_MEC_JT1; | ||
399 | break; | ||
400 | case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2: | ||
401 | ret = UCODE_ID_CP_MEC_JT2; | ||
402 | break; | ||
403 | case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG: | ||
404 | ret = UCODE_ID_GMCON_RENG; | ||
405 | break; | ||
406 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G: | ||
407 | ret = UCODE_ID_RLC_G; | ||
408 | break; | ||
409 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH: | ||
410 | ret = UCODE_ID_RLC_SCRATCH; | ||
411 | break; | ||
412 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM: | ||
413 | ret = UCODE_ID_RLC_SRM_ARAM; | ||
414 | break; | ||
415 | case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM: | ||
416 | ret = UCODE_ID_RLC_SRM_DRAM; | ||
417 | break; | ||
418 | case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM: | ||
419 | ret = UCODE_ID_DMCU_ERAM; | ||
420 | break; | ||
421 | case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM: | ||
422 | ret = UCODE_ID_DMCU_IRAM; | ||
423 | break; | ||
424 | case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING: | ||
425 | ret = TASK_ARG_INIT_MM_PWR_LOG; | ||
426 | break; | ||
427 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT: | ||
428 | case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING: | ||
429 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS: | ||
430 | case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT: | ||
431 | case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START: | ||
432 | case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS: | ||
433 | ret = TASK_ARG_REG_MMIO; | ||
434 | break; | ||
435 | case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE: | ||
436 | ret = TASK_ARG_INIT_CLK_TABLE; | ||
437 | break; | ||
438 | } | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev, | ||
444 | enum cz_scratch_entry firmware_enum, | ||
445 | struct cz_buffer_entry *entry) | ||
446 | { | ||
447 | uint64_t gpu_addr; | ||
448 | uint32_t data_size; | ||
449 | uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum); | ||
450 | enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id); | ||
451 | struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; | ||
452 | const struct gfx_firmware_header_v1_0 *header; | ||
453 | |||
454 | if (ucode->fw == NULL) | ||
455 | return -EINVAL; | ||
456 | |||
457 | gpu_addr = ucode->mc_addr; | ||
458 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
459 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | ||
460 | |||
461 | if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) || | ||
462 | (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) { | ||
463 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | ||
464 | data_size = le32_to_cpu(header->jt_size) << 2; | ||
465 | } | ||
466 | |||
467 | entry->mc_addr_low = lower_32_bits(gpu_addr); | ||
468 | entry->mc_addr_high = upper_32_bits(gpu_addr); | ||
469 | entry->data_size = data_size; | ||
470 | entry->firmware_ID = firmware_enum; | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev, | ||
476 | enum cz_scratch_entry scratch_type, | ||
477 | uint32_t size_in_byte, | ||
478 | struct cz_buffer_entry *entry) | ||
479 | { | ||
480 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
481 | uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) | | ||
482 | priv->smu_buffer.mc_addr_low; | ||
483 | mc_addr += size_in_byte; | ||
484 | |||
485 | priv->smu_buffer_used_bytes += size_in_byte; | ||
486 | entry->data_size = size_in_byte; | ||
487 | entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes; | ||
488 | entry->mc_addr_low = lower_32_bits(mc_addr); | ||
489 | entry->mc_addr_high = upper_32_bits(mc_addr); | ||
490 | entry->firmware_ID = scratch_type; | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev, | ||
496 | enum cz_scratch_entry firmware_enum, | ||
497 | bool is_last) | ||
498 | { | ||
499 | uint8_t i; | ||
500 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
501 | struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; | ||
502 | struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; | ||
503 | |||
504 | task->type = TASK_TYPE_UCODE_LOAD; | ||
505 | task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); | ||
506 | task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; | ||
507 | |||
508 | for (i = 0; i < priv->driver_buffer_length; i++) | ||
509 | if (priv->driver_buffer[i].firmware_ID == firmware_enum) | ||
510 | break; | ||
511 | |||
512 | if (i >= priv->driver_buffer_length) { | ||
513 | dev_err(adev->dev, "Invalid Firmware Type\n"); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | task->addr.low = priv->driver_buffer[i].mc_addr_low; | ||
518 | task->addr.high = priv->driver_buffer[i].mc_addr_high; | ||
519 | task->size_bytes = priv->driver_buffer[i].data_size; | ||
520 | |||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev, | ||
525 | enum cz_scratch_entry firmware_enum, | ||
526 | uint8_t type, bool is_last) | ||
527 | { | ||
528 | uint8_t i; | ||
529 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
530 | struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; | ||
531 | struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++]; | ||
532 | |||
533 | task->type = type; | ||
534 | task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum); | ||
535 | task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count; | ||
536 | |||
537 | for (i = 0; i < priv->scratch_buffer_length; i++) | ||
538 | if (priv->scratch_buffer[i].firmware_ID == firmware_enum) | ||
539 | break; | ||
540 | |||
541 | if (i >= priv->scratch_buffer_length) { | ||
542 | dev_err(adev->dev, "Invalid Firmware Type\n"); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | task->addr.low = priv->scratch_buffer[i].mc_addr_low; | ||
547 | task->addr.high = priv->scratch_buffer[i].mc_addr_high; | ||
548 | task->size_bytes = priv->scratch_buffer[i].data_size; | ||
549 | |||
550 | if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) { | ||
551 | struct cz_ih_meta_data *pIHReg_restore = | ||
552 | (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr; | ||
553 | pIHReg_restore->command = | ||
554 | METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD; | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
560 | static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev) | ||
561 | { | ||
562 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
563 | priv->toc_entry_aram = priv->toc_entry_used_count; | ||
564 | cz_smu_populate_single_scratch_task(adev, | ||
565 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | ||
566 | TASK_TYPE_UCODE_SAVE, true); | ||
567 | |||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev) | ||
572 | { | ||
573 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
574 | struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; | ||
575 | |||
576 | toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count; | ||
577 | cz_smu_populate_single_scratch_task(adev, | ||
578 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | ||
579 | TASK_TYPE_UCODE_SAVE, false); | ||
580 | cz_smu_populate_single_scratch_task(adev, | ||
581 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | ||
582 | TASK_TYPE_UCODE_SAVE, true); | ||
583 | |||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev) | ||
588 | { | ||
589 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
590 | struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; | ||
591 | |||
592 | toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count; | ||
593 | |||
594 | /* populate ucode */ | ||
595 | if (adev->firmware.smu_load) { | ||
596 | cz_smu_populate_single_ucode_load_task(adev, | ||
597 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); | ||
598 | cz_smu_populate_single_ucode_load_task(adev, | ||
599 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); | ||
600 | cz_smu_populate_single_ucode_load_task(adev, | ||
601 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | ||
602 | cz_smu_populate_single_ucode_load_task(adev, | ||
603 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | ||
604 | cz_smu_populate_single_ucode_load_task(adev, | ||
605 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | ||
606 | cz_smu_populate_single_ucode_load_task(adev, | ||
607 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); | ||
608 | } | ||
609 | |||
610 | /* populate scratch */ | ||
611 | cz_smu_populate_single_scratch_task(adev, | ||
612 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | ||
613 | TASK_TYPE_UCODE_LOAD, false); | ||
614 | cz_smu_populate_single_scratch_task(adev, | ||
615 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | ||
616 | TASK_TYPE_UCODE_LOAD, false); | ||
617 | cz_smu_populate_single_scratch_task(adev, | ||
618 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | ||
619 | TASK_TYPE_UCODE_LOAD, true); | ||
620 | |||
621 | return 0; | ||
622 | } | ||
623 | |||
624 | static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev) | ||
625 | { | ||
626 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
627 | |||
628 | priv->toc_entry_power_profiling_index = priv->toc_entry_used_count; | ||
629 | |||
630 | cz_smu_populate_single_scratch_task(adev, | ||
631 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | ||
632 | TASK_TYPE_INITIALIZE, true); | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev) | ||
637 | { | ||
638 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
639 | |||
640 | priv->toc_entry_initialize_index = priv->toc_entry_used_count; | ||
641 | |||
642 | if (adev->firmware.smu_load) { | ||
643 | cz_smu_populate_single_ucode_load_task(adev, | ||
644 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); | ||
645 | cz_smu_populate_single_ucode_load_task(adev, | ||
646 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); | ||
647 | cz_smu_populate_single_ucode_load_task(adev, | ||
648 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); | ||
649 | cz_smu_populate_single_ucode_load_task(adev, | ||
650 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false); | ||
651 | cz_smu_populate_single_ucode_load_task(adev, | ||
652 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); | ||
653 | cz_smu_populate_single_ucode_load_task(adev, | ||
654 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); | ||
655 | cz_smu_populate_single_ucode_load_task(adev, | ||
656 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); | ||
657 | cz_smu_populate_single_ucode_load_task(adev, | ||
658 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); | ||
659 | } | ||
660 | |||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev) | ||
665 | { | ||
666 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
667 | |||
668 | priv->toc_entry_clock_table = priv->toc_entry_used_count; | ||
669 | |||
670 | cz_smu_populate_single_scratch_task(adev, | ||
671 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, | ||
672 | TASK_TYPE_INITIALIZE, true); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
676 | |||
677 | static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev) | ||
678 | { | ||
679 | int i; | ||
680 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
681 | struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr; | ||
682 | |||
683 | for (i = 0; i < NUM_JOBLIST_ENTRIES; i++) | ||
684 | toc->JobList[i] = (uint8_t)IGNORE_JOB; | ||
685 | |||
686 | return 0; | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * cz smu uninitialization | ||
691 | */ | ||
692 | int cz_smu_fini(struct amdgpu_device *adev) | ||
693 | { | ||
694 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
695 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
696 | kfree(adev->smu.priv); | ||
697 | adev->smu.priv = NULL; | ||
698 | if (adev->firmware.smu_load) | ||
699 | amdgpu_ucode_fini_bo(adev); | ||
700 | |||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | int cz_smu_download_pptable(struct amdgpu_device *adev, void **table) | ||
705 | { | ||
706 | uint8_t i; | ||
707 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
708 | |||
709 | for (i = 0; i < priv->scratch_buffer_length; i++) | ||
710 | if (priv->scratch_buffer[i].firmware_ID == | ||
711 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) | ||
712 | break; | ||
713 | |||
714 | if (i >= priv->scratch_buffer_length) { | ||
715 | dev_err(adev->dev, "Invalid Scratch Type\n"); | ||
716 | return -EINVAL; | ||
717 | } | ||
718 | |||
719 | *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr; | ||
720 | |||
721 | /* prepare buffer for pptable */ | ||
722 | cz_send_msg_to_smc_with_parameter(adev, | ||
723 | PPSMC_MSG_SetClkTableAddrHi, | ||
724 | priv->scratch_buffer[i].mc_addr_high); | ||
725 | cz_send_msg_to_smc_with_parameter(adev, | ||
726 | PPSMC_MSG_SetClkTableAddrLo, | ||
727 | priv->scratch_buffer[i].mc_addr_low); | ||
728 | cz_send_msg_to_smc_with_parameter(adev, | ||
729 | PPSMC_MSG_ExecuteJob, | ||
730 | priv->toc_entry_clock_table); | ||
731 | |||
732 | /* actual downloading */ | ||
733 | cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram); | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | int cz_smu_upload_pptable(struct amdgpu_device *adev) | ||
739 | { | ||
740 | uint8_t i; | ||
741 | struct cz_smu_private_data *priv = cz_smu_get_priv(adev); | ||
742 | |||
743 | for (i = 0; i < priv->scratch_buffer_length; i++) | ||
744 | if (priv->scratch_buffer[i].firmware_ID == | ||
745 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE) | ||
746 | break; | ||
747 | |||
748 | if (i >= priv->scratch_buffer_length) { | ||
749 | dev_err(adev->dev, "Invalid Scratch Type\n"); | ||
750 | return -EINVAL; | ||
751 | } | ||
752 | |||
753 | /* prepare SMU */ | ||
754 | cz_send_msg_to_smc_with_parameter(adev, | ||
755 | PPSMC_MSG_SetClkTableAddrHi, | ||
756 | priv->scratch_buffer[i].mc_addr_high); | ||
757 | cz_send_msg_to_smc_with_parameter(adev, | ||
758 | PPSMC_MSG_SetClkTableAddrLo, | ||
759 | priv->scratch_buffer[i].mc_addr_low); | ||
760 | cz_send_msg_to_smc_with_parameter(adev, | ||
761 | PPSMC_MSG_ExecuteJob, | ||
762 | priv->toc_entry_clock_table); | ||
763 | |||
764 | /* actual uploading */ | ||
765 | cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu); | ||
766 | |||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * cz smumgr functions initialization | ||
772 | */ | ||
773 | static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = { | ||
774 | .check_fw_load_finish = cz_smu_check_finished, | ||
775 | .request_smu_load_fw = NULL, | ||
776 | .request_smu_specific_fw = NULL, | ||
777 | }; | ||
778 | |||
779 | /* | ||
780 | * cz smu initialization | ||
781 | */ | ||
782 | int cz_smu_init(struct amdgpu_device *adev) | ||
783 | { | ||
784 | int ret = -EINVAL; | ||
785 | uint64_t mc_addr = 0; | ||
786 | struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; | ||
787 | struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; | ||
788 | void *toc_buf_ptr = NULL; | ||
789 | void *smu_buf_ptr = NULL; | ||
790 | |||
791 | struct cz_smu_private_data *priv = | ||
792 | kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL); | ||
793 | if (priv == NULL) | ||
794 | return -ENOMEM; | ||
795 | |||
796 | /* allocate firmware buffers */ | ||
797 | if (adev->firmware.smu_load) | ||
798 | amdgpu_ucode_init_bo(adev); | ||
799 | |||
800 | adev->smu.priv = priv; | ||
801 | adev->smu.fw_flags = 0; | ||
802 | priv->toc_buffer.data_size = 4096; | ||
803 | |||
804 | priv->smu_buffer.data_size = | ||
805 | ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) + | ||
806 | ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) + | ||
807 | ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) + | ||
808 | ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) + | ||
809 | ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32); | ||
810 | |||
811 | /* prepare toc buffer and smu buffer: | ||
812 | * 1. create amdgpu_bo for toc buffer and smu buffer | ||
813 | * 2. pin mc address | ||
814 | * 3. map kernel virtual address | ||
815 | */ | ||
816 | ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE, | ||
817 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf); | ||
818 | |||
819 | if (ret) { | ||
820 | dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret); | ||
821 | return ret; | ||
822 | } | ||
823 | |||
824 | ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE, | ||
825 | true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf); | ||
826 | |||
827 | if (ret) { | ||
828 | dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret); | ||
829 | return ret; | ||
830 | } | ||
831 | |||
832 | /* toc buffer reserve/pin/map */ | ||
833 | ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); | ||
834 | if (ret) { | ||
835 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
836 | dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret); | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); | ||
841 | if (ret) { | ||
842 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
843 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
844 | dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret); | ||
845 | return ret; | ||
846 | } | ||
847 | |||
848 | ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); | ||
849 | if (ret) | ||
850 | goto smu_init_failed; | ||
851 | |||
852 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
853 | |||
854 | priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr); | ||
855 | priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr); | ||
856 | priv->toc_buffer.kaddr = toc_buf_ptr; | ||
857 | |||
858 | /* smu buffer reserve/pin/map */ | ||
859 | ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); | ||
860 | if (ret) { | ||
861 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
862 | dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret); | ||
863 | return ret; | ||
864 | } | ||
865 | |||
866 | ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr); | ||
867 | if (ret) { | ||
868 | amdgpu_bo_unreserve(adev->smu.smu_buf); | ||
869 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
870 | dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret); | ||
871 | return ret; | ||
872 | } | ||
873 | |||
874 | ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); | ||
875 | if (ret) | ||
876 | goto smu_init_failed; | ||
877 | |||
878 | amdgpu_bo_unreserve(adev->smu.smu_buf); | ||
879 | |||
880 | priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr); | ||
881 | priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr); | ||
882 | priv->smu_buffer.kaddr = smu_buf_ptr; | ||
883 | |||
884 | if (adev->firmware.smu_load) { | ||
885 | if (cz_smu_populate_single_firmware_entry(adev, | ||
886 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, | ||
887 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
888 | goto smu_init_failed; | ||
889 | if (cz_smu_populate_single_firmware_entry(adev, | ||
890 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, | ||
891 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
892 | goto smu_init_failed; | ||
893 | if (cz_smu_populate_single_firmware_entry(adev, | ||
894 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, | ||
895 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
896 | goto smu_init_failed; | ||
897 | if (cz_smu_populate_single_firmware_entry(adev, | ||
898 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, | ||
899 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
900 | goto smu_init_failed; | ||
901 | if (cz_smu_populate_single_firmware_entry(adev, | ||
902 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, | ||
903 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
904 | goto smu_init_failed; | ||
905 | if (cz_smu_populate_single_firmware_entry(adev, | ||
906 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, | ||
907 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
908 | goto smu_init_failed; | ||
909 | if (cz_smu_populate_single_firmware_entry(adev, | ||
910 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, | ||
911 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
912 | goto smu_init_failed; | ||
913 | if (cz_smu_populate_single_firmware_entry(adev, | ||
914 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, | ||
915 | &priv->driver_buffer[priv->driver_buffer_length++])) | ||
916 | goto smu_init_failed; | ||
917 | } | ||
918 | |||
919 | if (cz_smu_populate_single_scratch_entry(adev, | ||
920 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | ||
921 | UCODE_ID_RLC_SCRATCH_SIZE_BYTE, | ||
922 | &priv->scratch_buffer[priv->scratch_buffer_length++])) | ||
923 | goto smu_init_failed; | ||
924 | if (cz_smu_populate_single_scratch_entry(adev, | ||
925 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | ||
926 | UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, | ||
927 | &priv->scratch_buffer[priv->scratch_buffer_length++])) | ||
928 | goto smu_init_failed; | ||
929 | if (cz_smu_populate_single_scratch_entry(adev, | ||
930 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | ||
931 | UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, | ||
932 | &priv->scratch_buffer[priv->scratch_buffer_length++])) | ||
933 | goto smu_init_failed; | ||
934 | if (cz_smu_populate_single_scratch_entry(adev, | ||
935 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | ||
936 | sizeof(struct SMU8_MultimediaPowerLogData), | ||
937 | &priv->scratch_buffer[priv->scratch_buffer_length++])) | ||
938 | goto smu_init_failed; | ||
939 | if (cz_smu_populate_single_scratch_entry(adev, | ||
940 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE, | ||
941 | sizeof(struct SMU8_Fusion_ClkTable), | ||
942 | &priv->scratch_buffer[priv->scratch_buffer_length++])) | ||
943 | goto smu_init_failed; | ||
944 | |||
945 | cz_smu_initialize_toc_empty_job_list(adev); | ||
946 | cz_smu_construct_toc_for_rlc_aram_save(adev); | ||
947 | cz_smu_construct_toc_for_vddgfx_enter(adev); | ||
948 | cz_smu_construct_toc_for_vddgfx_exit(adev); | ||
949 | cz_smu_construct_toc_for_power_profiling(adev); | ||
950 | cz_smu_construct_toc_for_bootup(adev); | ||
951 | cz_smu_construct_toc_for_clock_table(adev); | ||
952 | /* init the smumgr functions */ | ||
953 | adev->smu.smumgr_funcs = &cz_smumgr_funcs; | ||
954 | |||
955 | return 0; | ||
956 | |||
957 | smu_init_failed: | ||
958 | amdgpu_bo_unref(toc_buf); | ||
959 | amdgpu_bo_unref(smu_buf); | ||
960 | |||
961 | return ret; | ||
962 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h new file mode 100644 index 000000000000..924d355b4e2c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/cz_smumgr.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #ifndef __CZ_SMC_H__ | ||
24 | #define __CZ_SMC_H__ | ||
25 | |||
26 | #define MAX_NUM_FIRMWARE 8 | ||
27 | #define MAX_NUM_SCRATCH 11 | ||
28 | #define CZ_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 | ||
29 | #define CZ_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 | ||
30 | #define CZ_SCRATCH_SIZE_SDMA_METADATA 1024 | ||
31 | #define CZ_SCRATCH_SIZE_IH ((2*256+1)*4) | ||
32 | |||
33 | enum cz_scratch_entry { | ||
34 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, | ||
35 | CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, | ||
36 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, | ||
37 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, | ||
38 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, | ||
39 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, | ||
40 | CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, | ||
41 | CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, | ||
42 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, | ||
43 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, | ||
44 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, | ||
45 | CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, | ||
46 | CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, | ||
47 | CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, | ||
48 | CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, | ||
49 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, | ||
50 | CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, | ||
51 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, | ||
52 | CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, | ||
53 | CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START, | ||
54 | CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, | ||
55 | CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE | ||
56 | }; | ||
57 | |||
58 | struct cz_buffer_entry { | ||
59 | uint32_t data_size; | ||
60 | uint32_t mc_addr_low; | ||
61 | uint32_t mc_addr_high; | ||
62 | void *kaddr; | ||
63 | enum cz_scratch_entry firmware_ID; | ||
64 | }; | ||
65 | |||
66 | struct cz_register_index_data_pair { | ||
67 | uint32_t offset; | ||
68 | uint32_t value; | ||
69 | }; | ||
70 | |||
71 | struct cz_ih_meta_data { | ||
72 | uint32_t command; | ||
73 | struct cz_register_index_data_pair register_index_value_pair[1]; | ||
74 | }; | ||
75 | |||
76 | struct cz_smu_private_data { | ||
77 | uint8_t driver_buffer_length; | ||
78 | uint8_t scratch_buffer_length; | ||
79 | uint16_t toc_entry_used_count; | ||
80 | uint16_t toc_entry_initialize_index; | ||
81 | uint16_t toc_entry_power_profiling_index; | ||
82 | uint16_t toc_entry_aram; | ||
83 | uint16_t toc_entry_ih_register_restore_task_index; | ||
84 | uint16_t toc_entry_clock_table; | ||
85 | uint16_t ih_register_restore_task_size; | ||
86 | uint16_t smu_buffer_used_bytes; | ||
87 | |||
88 | struct cz_buffer_entry toc_buffer; | ||
89 | struct cz_buffer_entry smu_buffer; | ||
90 | struct cz_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; | ||
91 | struct cz_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; | ||
92 | }; | ||
93 | |||
94 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c new file mode 100644 index 000000000000..d412291ed70e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -0,0 +1,3871 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "drmP.h" | ||
24 | #include "amdgpu.h" | ||
25 | #include "amdgpu_pm.h" | ||
26 | #include "amdgpu_i2c.h" | ||
27 | #include "vid.h" | ||
28 | #include "atom.h" | ||
29 | #include "amdgpu_atombios.h" | ||
30 | #include "atombios_crtc.h" | ||
31 | #include "atombios_encoders.h" | ||
32 | #include "amdgpu_pll.h" | ||
33 | #include "amdgpu_connectors.h" | ||
34 | |||
35 | #include "dce/dce_10_0_d.h" | ||
36 | #include "dce/dce_10_0_sh_mask.h" | ||
37 | #include "dce/dce_10_0_enum.h" | ||
38 | #include "oss/oss_3_0_d.h" | ||
39 | #include "oss/oss_3_0_sh_mask.h" | ||
40 | #include "gmc/gmc_8_1_d.h" | ||
41 | #include "gmc/gmc_8_1_sh_mask.h" | ||
42 | |||
43 | static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev); | ||
44 | static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev); | ||
45 | |||
46 | static const u32 crtc_offsets[] = | ||
47 | { | ||
48 | CRTC0_REGISTER_OFFSET, | ||
49 | CRTC1_REGISTER_OFFSET, | ||
50 | CRTC2_REGISTER_OFFSET, | ||
51 | CRTC3_REGISTER_OFFSET, | ||
52 | CRTC4_REGISTER_OFFSET, | ||
53 | CRTC5_REGISTER_OFFSET, | ||
54 | CRTC6_REGISTER_OFFSET | ||
55 | }; | ||
56 | |||
57 | static const u32 hpd_offsets[] = | ||
58 | { | ||
59 | HPD0_REGISTER_OFFSET, | ||
60 | HPD1_REGISTER_OFFSET, | ||
61 | HPD2_REGISTER_OFFSET, | ||
62 | HPD3_REGISTER_OFFSET, | ||
63 | HPD4_REGISTER_OFFSET, | ||
64 | HPD5_REGISTER_OFFSET | ||
65 | }; | ||
66 | |||
67 | static const uint32_t dig_offsets[] = { | ||
68 | DIG0_REGISTER_OFFSET, | ||
69 | DIG1_REGISTER_OFFSET, | ||
70 | DIG2_REGISTER_OFFSET, | ||
71 | DIG3_REGISTER_OFFSET, | ||
72 | DIG4_REGISTER_OFFSET, | ||
73 | DIG5_REGISTER_OFFSET, | ||
74 | DIG6_REGISTER_OFFSET | ||
75 | }; | ||
76 | |||
77 | static const struct { | ||
78 | uint32_t reg; | ||
79 | uint32_t vblank; | ||
80 | uint32_t vline; | ||
81 | uint32_t hpd; | ||
82 | |||
83 | } interrupt_status_offsets[] = { { | ||
84 | .reg = mmDISP_INTERRUPT_STATUS, | ||
85 | .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, | ||
86 | .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, | ||
87 | .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK | ||
88 | }, { | ||
89 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, | ||
90 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, | ||
91 | .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, | ||
92 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK | ||
93 | }, { | ||
94 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, | ||
95 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, | ||
96 | .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, | ||
97 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK | ||
98 | }, { | ||
99 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, | ||
100 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, | ||
101 | .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, | ||
102 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK | ||
103 | }, { | ||
104 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, | ||
105 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, | ||
106 | .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, | ||
107 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK | ||
108 | }, { | ||
109 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, | ||
110 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, | ||
111 | .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, | ||
112 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | ||
113 | } }; | ||
114 | |||
115 | static const u32 golden_settings_tonga_a11[] = | ||
116 | { | ||
117 | mmDCI_CLK_CNTL, 0x00000080, 0x00000000, | ||
118 | mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, | ||
119 | mmFBC_MISC, 0x1f311fff, 0x12300000, | ||
120 | mmHDMI_CONTROL, 0x31000111, 0x00000011, | ||
121 | }; | ||
122 | |||
123 | static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev) | ||
124 | { | ||
125 | switch (adev->asic_type) { | ||
126 | case CHIP_TONGA: | ||
127 | amdgpu_program_register_sequence(adev, | ||
128 | golden_settings_tonga_a11, | ||
129 | (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); | ||
130 | break; | ||
131 | default: | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static u32 dce_v10_0_audio_endpt_rreg(struct amdgpu_device *adev, | ||
137 | u32 block_offset, u32 reg) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | u32 r; | ||
141 | |||
142 | spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); | ||
143 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | ||
144 | r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); | ||
145 | spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); | ||
146 | |||
147 | return r; | ||
148 | } | ||
149 | |||
150 | static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev, | ||
151 | u32 block_offset, u32 reg, u32 v) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); | ||
156 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | ||
157 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); | ||
158 | spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); | ||
159 | } | ||
160 | |||
161 | static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc) | ||
162 | { | ||
163 | if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & | ||
164 | CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) | ||
165 | return true; | ||
166 | else | ||
167 | return false; | ||
168 | } | ||
169 | |||
170 | static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc) | ||
171 | { | ||
172 | u32 pos1, pos2; | ||
173 | |||
174 | pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
175 | pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
176 | |||
177 | if (pos1 != pos2) | ||
178 | return true; | ||
179 | else | ||
180 | return false; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * dce_v10_0_vblank_wait - vblank wait asic callback. | ||
185 | * | ||
186 | * @adev: amdgpu_device pointer | ||
187 | * @crtc: crtc to wait for vblank on | ||
188 | * | ||
189 | * Wait for vblank on the requested crtc (evergreen+). | ||
190 | */ | ||
191 | static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc) | ||
192 | { | ||
193 | unsigned i = 0; | ||
194 | |||
195 | if (crtc >= adev->mode_info.num_crtc) | ||
196 | return; | ||
197 | |||
198 | if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) | ||
199 | return; | ||
200 | |||
201 | /* depending on when we hit vblank, we may be close to active; if so, | ||
202 | * wait for another frame. | ||
203 | */ | ||
204 | while (dce_v10_0_is_in_vblank(adev, crtc)) { | ||
205 | if (i++ % 100 == 0) { | ||
206 | if (!dce_v10_0_is_counter_moving(adev, crtc)) | ||
207 | break; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | while (!dce_v10_0_is_in_vblank(adev, crtc)) { | ||
212 | if (i++ % 100 == 0) { | ||
213 | if (!dce_v10_0_is_counter_moving(adev, crtc)) | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) | ||
220 | { | ||
221 | if (crtc >= adev->mode_info.num_crtc) | ||
222 | return 0; | ||
223 | else | ||
224 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * dce_v10_0_page_flip - pageflip callback. | ||
229 | * | ||
230 | * @adev: amdgpu_device pointer | ||
231 | * @crtc_id: crtc to cleanup pageflip on | ||
232 | * @crtc_base: new address of the crtc (GPU MC address) | ||
233 | * | ||
234 | * Does the actual pageflip (evergreen+). | ||
235 | * During vblank we take the crtc lock and wait for the update_pending | ||
236 | * bit to go high, when it does, we release the lock, and allow the | ||
237 | * double buffered update to take place. | ||
238 | * Returns the current update pending status. | ||
239 | */ | ||
240 | static void dce_v10_0_page_flip(struct amdgpu_device *adev, | ||
241 | int crtc_id, u64 crtc_base) | ||
242 | { | ||
243 | struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | ||
244 | u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); | ||
245 | int i; | ||
246 | |||
247 | /* Lock the graphics update lock */ | ||
248 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); | ||
249 | WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); | ||
250 | |||
251 | /* update the scanout addresses */ | ||
252 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
253 | upper_32_bits(crtc_base)); | ||
254 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
255 | lower_32_bits(crtc_base)); | ||
256 | |||
257 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
258 | upper_32_bits(crtc_base)); | ||
259 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
260 | lower_32_bits(crtc_base)); | ||
261 | |||
262 | /* Wait for update_pending to go high. */ | ||
263 | for (i = 0; i < adev->usec_timeout; i++) { | ||
264 | if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & | ||
265 | GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) | ||
266 | break; | ||
267 | udelay(1); | ||
268 | } | ||
269 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
270 | |||
271 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
272 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); | ||
273 | WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); | ||
274 | } | ||
275 | |||
276 | static int dce_v10_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | ||
277 | u32 *vbl, u32 *position) | ||
278 | { | ||
279 | if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) | ||
280 | return -EINVAL; | ||
281 | |||
282 | *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); | ||
283 | *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * dce_v10_0_hpd_sense - hpd sense callback. | ||
290 | * | ||
291 | * @adev: amdgpu_device pointer | ||
292 | * @hpd: hpd (hotplug detect) pin | ||
293 | * | ||
294 | * Checks if a digital monitor is connected (evergreen+). | ||
295 | * Returns true if connected, false if not connected. | ||
296 | */ | ||
297 | static bool dce_v10_0_hpd_sense(struct amdgpu_device *adev, | ||
298 | enum amdgpu_hpd_id hpd) | ||
299 | { | ||
300 | int idx; | ||
301 | bool connected = false; | ||
302 | |||
303 | switch (hpd) { | ||
304 | case AMDGPU_HPD_1: | ||
305 | idx = 0; | ||
306 | break; | ||
307 | case AMDGPU_HPD_2: | ||
308 | idx = 1; | ||
309 | break; | ||
310 | case AMDGPU_HPD_3: | ||
311 | idx = 2; | ||
312 | break; | ||
313 | case AMDGPU_HPD_4: | ||
314 | idx = 3; | ||
315 | break; | ||
316 | case AMDGPU_HPD_5: | ||
317 | idx = 4; | ||
318 | break; | ||
319 | case AMDGPU_HPD_6: | ||
320 | idx = 5; | ||
321 | break; | ||
322 | default: | ||
323 | return connected; | ||
324 | } | ||
325 | |||
326 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | ||
327 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | ||
328 | connected = true; | ||
329 | |||
330 | return connected; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * dce_v10_0_hpd_set_polarity - hpd set polarity callback. | ||
335 | * | ||
336 | * @adev: amdgpu_device pointer | ||
337 | * @hpd: hpd (hotplug detect) pin | ||
338 | * | ||
339 | * Set the polarity of the hpd pin (evergreen+). | ||
340 | */ | ||
341 | static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev, | ||
342 | enum amdgpu_hpd_id hpd) | ||
343 | { | ||
344 | u32 tmp; | ||
345 | bool connected = dce_v10_0_hpd_sense(adev, hpd); | ||
346 | int idx; | ||
347 | |||
348 | switch (hpd) { | ||
349 | case AMDGPU_HPD_1: | ||
350 | idx = 0; | ||
351 | break; | ||
352 | case AMDGPU_HPD_2: | ||
353 | idx = 1; | ||
354 | break; | ||
355 | case AMDGPU_HPD_3: | ||
356 | idx = 2; | ||
357 | break; | ||
358 | case AMDGPU_HPD_4: | ||
359 | idx = 3; | ||
360 | break; | ||
361 | case AMDGPU_HPD_5: | ||
362 | idx = 4; | ||
363 | break; | ||
364 | case AMDGPU_HPD_6: | ||
365 | idx = 5; | ||
366 | break; | ||
367 | default: | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | ||
372 | if (connected) | ||
373 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | ||
374 | else | ||
375 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | ||
376 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * dce_v10_0_hpd_init - hpd setup callback. | ||
381 | * | ||
382 | * @adev: amdgpu_device pointer | ||
383 | * | ||
384 | * Setup the hpd pins used by the card (evergreen+). | ||
385 | * Enable the pin, set the polarity, and enable the hpd interrupts. | ||
386 | */ | ||
387 | static void dce_v10_0_hpd_init(struct amdgpu_device *adev) | ||
388 | { | ||
389 | struct drm_device *dev = adev->ddev; | ||
390 | struct drm_connector *connector; | ||
391 | u32 tmp; | ||
392 | int idx; | ||
393 | |||
394 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
395 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
396 | |||
397 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
398 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
399 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
400 | * aux dp channel on imac and help (but not completely fix) | ||
401 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
402 | * also avoid interrupt storms during dpms. | ||
403 | */ | ||
404 | continue; | ||
405 | } | ||
406 | |||
407 | switch (amdgpu_connector->hpd.hpd) { | ||
408 | case AMDGPU_HPD_1: | ||
409 | idx = 0; | ||
410 | break; | ||
411 | case AMDGPU_HPD_2: | ||
412 | idx = 1; | ||
413 | break; | ||
414 | case AMDGPU_HPD_3: | ||
415 | idx = 2; | ||
416 | break; | ||
417 | case AMDGPU_HPD_4: | ||
418 | idx = 3; | ||
419 | break; | ||
420 | case AMDGPU_HPD_5: | ||
421 | idx = 4; | ||
422 | break; | ||
423 | case AMDGPU_HPD_6: | ||
424 | idx = 5; | ||
425 | break; | ||
426 | default: | ||
427 | continue; | ||
428 | } | ||
429 | |||
430 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | ||
431 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | ||
432 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | ||
433 | |||
434 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | ||
435 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | ||
436 | DC_HPD_CONNECT_INT_DELAY, | ||
437 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | ||
438 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | ||
439 | DC_HPD_DISCONNECT_INT_DELAY, | ||
440 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | ||
441 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | ||
442 | |||
443 | dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | ||
444 | amdgpu_irq_get(adev, &adev->hpd_irq, | ||
445 | amdgpu_connector->hpd.hpd); | ||
446 | } | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * dce_v10_0_hpd_fini - hpd tear down callback. | ||
451 | * | ||
452 | * @adev: amdgpu_device pointer | ||
453 | * | ||
454 | * Tear down the hpd pins used by the card (evergreen+). | ||
455 | * Disable the hpd interrupts. | ||
456 | */ | ||
457 | static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) | ||
458 | { | ||
459 | struct drm_device *dev = adev->ddev; | ||
460 | struct drm_connector *connector; | ||
461 | u32 tmp; | ||
462 | int idx; | ||
463 | |||
464 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
465 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
466 | |||
467 | switch (amdgpu_connector->hpd.hpd) { | ||
468 | case AMDGPU_HPD_1: | ||
469 | idx = 0; | ||
470 | break; | ||
471 | case AMDGPU_HPD_2: | ||
472 | idx = 1; | ||
473 | break; | ||
474 | case AMDGPU_HPD_3: | ||
475 | idx = 2; | ||
476 | break; | ||
477 | case AMDGPU_HPD_4: | ||
478 | idx = 3; | ||
479 | break; | ||
480 | case AMDGPU_HPD_5: | ||
481 | idx = 4; | ||
482 | break; | ||
483 | case AMDGPU_HPD_6: | ||
484 | idx = 5; | ||
485 | break; | ||
486 | default: | ||
487 | continue; | ||
488 | } | ||
489 | |||
490 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | ||
491 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | ||
492 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | ||
493 | |||
494 | amdgpu_irq_put(adev, &adev->hpd_irq, | ||
495 | amdgpu_connector->hpd.hpd); | ||
496 | } | ||
497 | } | ||
498 | |||
499 | static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) | ||
500 | { | ||
501 | return mmDC_GPIO_HPD_A; | ||
502 | } | ||
503 | |||
504 | static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev) | ||
505 | { | ||
506 | u32 crtc_hung = 0; | ||
507 | u32 crtc_status[6]; | ||
508 | u32 i, j, tmp; | ||
509 | |||
510 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
511 | tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); | ||
512 | if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { | ||
513 | crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); | ||
514 | crtc_hung |= (1 << i); | ||
515 | } | ||
516 | } | ||
517 | |||
518 | for (j = 0; j < 10; j++) { | ||
519 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
520 | if (crtc_hung & (1 << i)) { | ||
521 | tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); | ||
522 | if (tmp != crtc_status[i]) | ||
523 | crtc_hung &= ~(1 << i); | ||
524 | } | ||
525 | } | ||
526 | if (crtc_hung == 0) | ||
527 | return false; | ||
528 | udelay(100); | ||
529 | } | ||
530 | |||
531 | return true; | ||
532 | } | ||
533 | |||
534 | static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev, | ||
535 | struct amdgpu_mode_mc_save *save) | ||
536 | { | ||
537 | u32 crtc_enabled, tmp; | ||
538 | int i; | ||
539 | |||
540 | save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); | ||
541 | save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); | ||
542 | |||
543 | /* disable VGA render */ | ||
544 | tmp = RREG32(mmVGA_RENDER_CONTROL); | ||
545 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | ||
546 | WREG32(mmVGA_RENDER_CONTROL, tmp); | ||
547 | |||
548 | /* blank the display controllers */ | ||
549 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
550 | crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), | ||
551 | CRTC_CONTROL, CRTC_MASTER_EN); | ||
552 | if (crtc_enabled) { | ||
553 | #if 0 | ||
554 | u32 frame_count; | ||
555 | int j; | ||
556 | |||
557 | save->crtc_enabled[i] = true; | ||
558 | tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
559 | if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { | ||
560 | amdgpu_display_vblank_wait(adev, i); | ||
561 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
562 | tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); | ||
563 | WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
564 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
565 | } | ||
566 | /* wait for the next frame */ | ||
567 | frame_count = amdgpu_display_vblank_get_counter(adev, i); | ||
568 | for (j = 0; j < adev->usec_timeout; j++) { | ||
569 | if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) | ||
570 | break; | ||
571 | udelay(1); | ||
572 | } | ||
573 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
574 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { | ||
575 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); | ||
576 | WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); | ||
577 | } | ||
578 | tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); | ||
579 | if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { | ||
580 | tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); | ||
581 | WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); | ||
582 | } | ||
583 | #else | ||
584 | /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ | ||
585 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
586 | tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); | ||
587 | tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); | ||
588 | WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); | ||
589 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
590 | save->crtc_enabled[i] = false; | ||
591 | /* ***** */ | ||
592 | #endif | ||
593 | } else { | ||
594 | save->crtc_enabled[i] = false; | ||
595 | } | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev, | ||
600 | struct amdgpu_mode_mc_save *save) | ||
601 | { | ||
602 | u32 tmp, frame_count; | ||
603 | int i, j; | ||
604 | |||
605 | /* update crtc base addresses */ | ||
606 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
607 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
608 | upper_32_bits(adev->mc.vram_start)); | ||
609 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
610 | upper_32_bits(adev->mc.vram_start)); | ||
611 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
612 | (u32)adev->mc.vram_start); | ||
613 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
614 | (u32)adev->mc.vram_start); | ||
615 | |||
616 | if (save->crtc_enabled[i]) { | ||
617 | tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); | ||
618 | if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { | ||
619 | tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); | ||
620 | WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); | ||
621 | } | ||
622 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
623 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { | ||
624 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); | ||
625 | WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); | ||
626 | } | ||
627 | tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); | ||
628 | if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { | ||
629 | tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); | ||
630 | WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); | ||
631 | } | ||
632 | for (j = 0; j < adev->usec_timeout; j++) { | ||
633 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
634 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) | ||
635 | break; | ||
636 | udelay(1); | ||
637 | } | ||
638 | tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
639 | tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); | ||
640 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
641 | WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
642 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
643 | /* wait for the next frame */ | ||
644 | frame_count = amdgpu_display_vblank_get_counter(adev, i); | ||
645 | for (j = 0; j < adev->usec_timeout; j++) { | ||
646 | if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) | ||
647 | break; | ||
648 | udelay(1); | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); | ||
654 | WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); | ||
655 | |||
656 | /* Unlock vga access */ | ||
657 | WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); | ||
658 | mdelay(1); | ||
659 | WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); | ||
660 | } | ||
661 | |||
662 | static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev, | ||
663 | bool render) | ||
664 | { | ||
665 | u32 tmp; | ||
666 | |||
667 | /* Lockout access through VGA aperture*/ | ||
668 | tmp = RREG32(mmVGA_HDP_CONTROL); | ||
669 | if (render) | ||
670 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); | ||
671 | else | ||
672 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); | ||
673 | WREG32(mmVGA_HDP_CONTROL, tmp); | ||
674 | |||
675 | /* disable VGA render */ | ||
676 | tmp = RREG32(mmVGA_RENDER_CONTROL); | ||
677 | if (render) | ||
678 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); | ||
679 | else | ||
680 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | ||
681 | WREG32(mmVGA_RENDER_CONTROL, tmp); | ||
682 | } | ||
683 | |||
684 | static void dce_v10_0_program_fmt(struct drm_encoder *encoder) | ||
685 | { | ||
686 | struct drm_device *dev = encoder->dev; | ||
687 | struct amdgpu_device *adev = dev->dev_private; | ||
688 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
689 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
690 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
691 | int bpc = 0; | ||
692 | u32 tmp = 0; | ||
693 | enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; | ||
694 | |||
695 | if (connector) { | ||
696 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
697 | bpc = amdgpu_connector_get_monitor_bpc(connector); | ||
698 | dither = amdgpu_connector->dither; | ||
699 | } | ||
700 | |||
701 | /* LVDS/eDP FMT is set up by atom */ | ||
702 | if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) | ||
703 | return; | ||
704 | |||
705 | /* not needed for analog */ | ||
706 | if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || | ||
707 | (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) | ||
708 | return; | ||
709 | |||
710 | if (bpc == 0) | ||
711 | return; | ||
712 | |||
713 | switch (bpc) { | ||
714 | case 6: | ||
715 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
716 | /* XXX sort out optimal dither settings */ | ||
717 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
718 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
719 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
720 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); | ||
721 | } else { | ||
722 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
723 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); | ||
724 | } | ||
725 | break; | ||
726 | case 8: | ||
727 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
728 | /* XXX sort out optimal dither settings */ | ||
729 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
730 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
731 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); | ||
732 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
733 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); | ||
734 | } else { | ||
735 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
736 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); | ||
737 | } | ||
738 | break; | ||
739 | case 10: | ||
740 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
741 | /* XXX sort out optimal dither settings */ | ||
742 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
743 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
744 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); | ||
745 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
746 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); | ||
747 | } else { | ||
748 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
749 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); | ||
750 | } | ||
751 | break; | ||
752 | default: | ||
753 | /* not needed */ | ||
754 | break; | ||
755 | } | ||
756 | |||
757 | WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
758 | } | ||
759 | |||
760 | |||
761 | /* display watermark setup */ | ||
762 | /** | ||
763 | * dce_v10_0_line_buffer_adjust - Set up the line buffer | ||
764 | * | ||
765 | * @adev: amdgpu_device pointer | ||
766 | * @amdgpu_crtc: the selected display controller | ||
767 | * @mode: the current display mode on the selected display | ||
768 | * controller | ||
769 | * | ||
770 | * Setup up the line buffer allocation for | ||
771 | * the selected display controller (CIK). | ||
772 | * Returns the line buffer size in pixels. | ||
773 | */ | ||
774 | static u32 dce_v10_0_line_buffer_adjust(struct amdgpu_device *adev, | ||
775 | struct amdgpu_crtc *amdgpu_crtc, | ||
776 | struct drm_display_mode *mode) | ||
777 | { | ||
778 | u32 tmp, buffer_alloc, i, mem_cfg; | ||
779 | u32 pipe_offset = amdgpu_crtc->crtc_id; | ||
780 | /* | ||
781 | * Line Buffer Setup | ||
782 | * There are 6 line buffers, one for each display controllers. | ||
783 | * There are 3 partitions per LB. Select the number of partitions | ||
784 | * to enable based on the display width. For display widths larger | ||
785 | * than 4096, you need use to use 2 display controllers and combine | ||
786 | * them using the stereo blender. | ||
787 | */ | ||
788 | if (amdgpu_crtc->base.enabled && mode) { | ||
789 | if (mode->crtc_hdisplay < 1920) { | ||
790 | mem_cfg = 1; | ||
791 | buffer_alloc = 2; | ||
792 | } else if (mode->crtc_hdisplay < 2560) { | ||
793 | mem_cfg = 2; | ||
794 | buffer_alloc = 2; | ||
795 | } else if (mode->crtc_hdisplay < 4096) { | ||
796 | mem_cfg = 0; | ||
797 | buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; | ||
798 | } else { | ||
799 | DRM_DEBUG_KMS("Mode too big for LB!\n"); | ||
800 | mem_cfg = 0; | ||
801 | buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; | ||
802 | } | ||
803 | } else { | ||
804 | mem_cfg = 1; | ||
805 | buffer_alloc = 0; | ||
806 | } | ||
807 | |||
808 | tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); | ||
809 | tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); | ||
810 | WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); | ||
811 | |||
812 | tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); | ||
813 | tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); | ||
814 | WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); | ||
815 | |||
816 | for (i = 0; i < adev->usec_timeout; i++) { | ||
817 | tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); | ||
818 | if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) | ||
819 | break; | ||
820 | udelay(1); | ||
821 | } | ||
822 | |||
823 | if (amdgpu_crtc->base.enabled && mode) { | ||
824 | switch (mem_cfg) { | ||
825 | case 0: | ||
826 | default: | ||
827 | return 4096 * 2; | ||
828 | case 1: | ||
829 | return 1920 * 2; | ||
830 | case 2: | ||
831 | return 2560 * 2; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | /* controller not enabled, so no lb used */ | ||
836 | return 0; | ||
837 | } | ||
838 | |||
839 | /** | ||
840 | * cik_get_number_of_dram_channels - get the number of dram channels | ||
841 | * | ||
842 | * @adev: amdgpu_device pointer | ||
843 | * | ||
844 | * Look up the number of video ram channels (CIK). | ||
845 | * Used for display watermark bandwidth calculations | ||
846 | * Returns the number of dram channels | ||
847 | */ | ||
848 | static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) | ||
849 | { | ||
850 | u32 tmp = RREG32(mmMC_SHARED_CHMAP); | ||
851 | |||
852 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | ||
853 | case 0: | ||
854 | default: | ||
855 | return 1; | ||
856 | case 1: | ||
857 | return 2; | ||
858 | case 2: | ||
859 | return 4; | ||
860 | case 3: | ||
861 | return 8; | ||
862 | case 4: | ||
863 | return 3; | ||
864 | case 5: | ||
865 | return 6; | ||
866 | case 6: | ||
867 | return 10; | ||
868 | case 7: | ||
869 | return 12; | ||
870 | case 8: | ||
871 | return 16; | ||
872 | } | ||
873 | } | ||
874 | |||
875 | struct dce10_wm_params { | ||
876 | u32 dram_channels; /* number of dram channels */ | ||
877 | u32 yclk; /* bandwidth per dram data pin in kHz */ | ||
878 | u32 sclk; /* engine clock in kHz */ | ||
879 | u32 disp_clk; /* display clock in kHz */ | ||
880 | u32 src_width; /* viewport width */ | ||
881 | u32 active_time; /* active display time in ns */ | ||
882 | u32 blank_time; /* blank time in ns */ | ||
883 | bool interlaced; /* mode is interlaced */ | ||
884 | fixed20_12 vsc; /* vertical scale ratio */ | ||
885 | u32 num_heads; /* number of active crtcs */ | ||
886 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | ||
887 | u32 lb_size; /* line buffer allocated to pipe */ | ||
888 | u32 vtaps; /* vertical scaler taps */ | ||
889 | }; | ||
890 | |||
891 | /** | ||
892 | * dce_v10_0_dram_bandwidth - get the dram bandwidth | ||
893 | * | ||
894 | * @wm: watermark calculation data | ||
895 | * | ||
896 | * Calculate the raw dram bandwidth (CIK). | ||
897 | * Used for display watermark bandwidth calculations | ||
898 | * Returns the dram bandwidth in MBytes/s | ||
899 | */ | ||
900 | static u32 dce_v10_0_dram_bandwidth(struct dce10_wm_params *wm) | ||
901 | { | ||
902 | /* Calculate raw DRAM Bandwidth */ | ||
903 | fixed20_12 dram_efficiency; /* 0.7 */ | ||
904 | fixed20_12 yclk, dram_channels, bandwidth; | ||
905 | fixed20_12 a; | ||
906 | |||
907 | a.full = dfixed_const(1000); | ||
908 | yclk.full = dfixed_const(wm->yclk); | ||
909 | yclk.full = dfixed_div(yclk, a); | ||
910 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
911 | a.full = dfixed_const(10); | ||
912 | dram_efficiency.full = dfixed_const(7); | ||
913 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | ||
914 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
915 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | ||
916 | |||
917 | return dfixed_trunc(bandwidth); | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * dce_v10_0_dram_bandwidth_for_display - get the dram bandwidth for display | ||
922 | * | ||
923 | * @wm: watermark calculation data | ||
924 | * | ||
925 | * Calculate the dram bandwidth used for display (CIK). | ||
926 | * Used for display watermark bandwidth calculations | ||
927 | * Returns the dram bandwidth for display in MBytes/s | ||
928 | */ | ||
929 | static u32 dce_v10_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) | ||
930 | { | ||
931 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | ||
932 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | ||
933 | fixed20_12 yclk, dram_channels, bandwidth; | ||
934 | fixed20_12 a; | ||
935 | |||
936 | a.full = dfixed_const(1000); | ||
937 | yclk.full = dfixed_const(wm->yclk); | ||
938 | yclk.full = dfixed_div(yclk, a); | ||
939 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
940 | a.full = dfixed_const(10); | ||
941 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | ||
942 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | ||
943 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
944 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | ||
945 | |||
946 | return dfixed_trunc(bandwidth); | ||
947 | } | ||
948 | |||
949 | /** | ||
950 | * dce_v10_0_data_return_bandwidth - get the data return bandwidth | ||
951 | * | ||
952 | * @wm: watermark calculation data | ||
953 | * | ||
954 | * Calculate the data return bandwidth used for display (CIK). | ||
955 | * Used for display watermark bandwidth calculations | ||
956 | * Returns the data return bandwidth in MBytes/s | ||
957 | */ | ||
958 | static u32 dce_v10_0_data_return_bandwidth(struct dce10_wm_params *wm) | ||
959 | { | ||
960 | /* Calculate the display Data return Bandwidth */ | ||
961 | fixed20_12 return_efficiency; /* 0.8 */ | ||
962 | fixed20_12 sclk, bandwidth; | ||
963 | fixed20_12 a; | ||
964 | |||
965 | a.full = dfixed_const(1000); | ||
966 | sclk.full = dfixed_const(wm->sclk); | ||
967 | sclk.full = dfixed_div(sclk, a); | ||
968 | a.full = dfixed_const(10); | ||
969 | return_efficiency.full = dfixed_const(8); | ||
970 | return_efficiency.full = dfixed_div(return_efficiency, a); | ||
971 | a.full = dfixed_const(32); | ||
972 | bandwidth.full = dfixed_mul(a, sclk); | ||
973 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | ||
974 | |||
975 | return dfixed_trunc(bandwidth); | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * dce_v10_0_dmif_request_bandwidth - get the dmif bandwidth | ||
980 | * | ||
981 | * @wm: watermark calculation data | ||
982 | * | ||
983 | * Calculate the dmif bandwidth used for display (CIK). | ||
984 | * Used for display watermark bandwidth calculations | ||
985 | * Returns the dmif bandwidth in MBytes/s | ||
986 | */ | ||
987 | static u32 dce_v10_0_dmif_request_bandwidth(struct dce10_wm_params *wm) | ||
988 | { | ||
989 | /* Calculate the DMIF Request Bandwidth */ | ||
990 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | ||
991 | fixed20_12 disp_clk, bandwidth; | ||
992 | fixed20_12 a, b; | ||
993 | |||
994 | a.full = dfixed_const(1000); | ||
995 | disp_clk.full = dfixed_const(wm->disp_clk); | ||
996 | disp_clk.full = dfixed_div(disp_clk, a); | ||
997 | a.full = dfixed_const(32); | ||
998 | b.full = dfixed_mul(a, disp_clk); | ||
999 | |||
1000 | a.full = dfixed_const(10); | ||
1001 | disp_clk_request_efficiency.full = dfixed_const(8); | ||
1002 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | ||
1003 | |||
1004 | bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); | ||
1005 | |||
1006 | return dfixed_trunc(bandwidth); | ||
1007 | } | ||
1008 | |||
1009 | /** | ||
1010 | * dce_v10_0_available_bandwidth - get the min available bandwidth | ||
1011 | * | ||
1012 | * @wm: watermark calculation data | ||
1013 | * | ||
1014 | * Calculate the min available bandwidth used for display (CIK). | ||
1015 | * Used for display watermark bandwidth calculations | ||
1016 | * Returns the min available bandwidth in MBytes/s | ||
1017 | */ | ||
1018 | static u32 dce_v10_0_available_bandwidth(struct dce10_wm_params *wm) | ||
1019 | { | ||
1020 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | ||
1021 | u32 dram_bandwidth = dce_v10_0_dram_bandwidth(wm); | ||
1022 | u32 data_return_bandwidth = dce_v10_0_data_return_bandwidth(wm); | ||
1023 | u32 dmif_req_bandwidth = dce_v10_0_dmif_request_bandwidth(wm); | ||
1024 | |||
1025 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | ||
1026 | } | ||
1027 | |||
1028 | /** | ||
1029 | * dce_v10_0_average_bandwidth - get the average available bandwidth | ||
1030 | * | ||
1031 | * @wm: watermark calculation data | ||
1032 | * | ||
1033 | * Calculate the average available bandwidth used for display (CIK). | ||
1034 | * Used for display watermark bandwidth calculations | ||
1035 | * Returns the average available bandwidth in MBytes/s | ||
1036 | */ | ||
1037 | static u32 dce_v10_0_average_bandwidth(struct dce10_wm_params *wm) | ||
1038 | { | ||
1039 | /* Calculate the display mode Average Bandwidth | ||
1040 | * DisplayMode should contain the source and destination dimensions, | ||
1041 | * timing, etc. | ||
1042 | */ | ||
1043 | fixed20_12 bpp; | ||
1044 | fixed20_12 line_time; | ||
1045 | fixed20_12 src_width; | ||
1046 | fixed20_12 bandwidth; | ||
1047 | fixed20_12 a; | ||
1048 | |||
1049 | a.full = dfixed_const(1000); | ||
1050 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | ||
1051 | line_time.full = dfixed_div(line_time, a); | ||
1052 | bpp.full = dfixed_const(wm->bytes_per_pixel); | ||
1053 | src_width.full = dfixed_const(wm->src_width); | ||
1054 | bandwidth.full = dfixed_mul(src_width, bpp); | ||
1055 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | ||
1056 | bandwidth.full = dfixed_div(bandwidth, line_time); | ||
1057 | |||
1058 | return dfixed_trunc(bandwidth); | ||
1059 | } | ||
1060 | |||
1061 | /** | ||
1062 | * dce_v10_0_latency_watermark - get the latency watermark | ||
1063 | * | ||
1064 | * @wm: watermark calculation data | ||
1065 | * | ||
1066 | * Calculate the latency watermark (CIK). | ||
1067 | * Used for display watermark bandwidth calculations | ||
1068 | * Returns the latency watermark in ns | ||
1069 | */ | ||
1070 | static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) | ||
1071 | { | ||
1072 | /* First calculate the latency in ns */ | ||
1073 | u32 mc_latency = 2000; /* 2000 ns. */ | ||
1074 | u32 available_bandwidth = dce_v10_0_available_bandwidth(wm); | ||
1075 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | ||
1076 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | ||
1077 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | ||
1078 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | ||
1079 | (wm->num_heads * cursor_line_pair_return_time); | ||
1080 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | ||
1081 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | ||
1082 | u32 tmp, dmif_size = 12288; | ||
1083 | fixed20_12 a, b, c; | ||
1084 | |||
1085 | if (wm->num_heads == 0) | ||
1086 | return 0; | ||
1087 | |||
1088 | a.full = dfixed_const(2); | ||
1089 | b.full = dfixed_const(1); | ||
1090 | if ((wm->vsc.full > a.full) || | ||
1091 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | ||
1092 | (wm->vtaps >= 5) || | ||
1093 | ((wm->vsc.full >= a.full) && wm->interlaced)) | ||
1094 | max_src_lines_per_dst_line = 4; | ||
1095 | else | ||
1096 | max_src_lines_per_dst_line = 2; | ||
1097 | |||
1098 | a.full = dfixed_const(available_bandwidth); | ||
1099 | b.full = dfixed_const(wm->num_heads); | ||
1100 | a.full = dfixed_div(a, b); | ||
1101 | |||
1102 | b.full = dfixed_const(mc_latency + 512); | ||
1103 | c.full = dfixed_const(wm->disp_clk); | ||
1104 | b.full = dfixed_div(b, c); | ||
1105 | |||
1106 | c.full = dfixed_const(dmif_size); | ||
1107 | b.full = dfixed_div(c, b); | ||
1108 | |||
1109 | tmp = min(dfixed_trunc(a), dfixed_trunc(b)); | ||
1110 | |||
1111 | b.full = dfixed_const(1000); | ||
1112 | c.full = dfixed_const(wm->disp_clk); | ||
1113 | b.full = dfixed_div(c, b); | ||
1114 | c.full = dfixed_const(wm->bytes_per_pixel); | ||
1115 | b.full = dfixed_mul(b, c); | ||
1116 | |||
1117 | lb_fill_bw = min(tmp, dfixed_trunc(b)); | ||
1118 | |||
1119 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | ||
1120 | b.full = dfixed_const(1000); | ||
1121 | c.full = dfixed_const(lb_fill_bw); | ||
1122 | b.full = dfixed_div(c, b); | ||
1123 | a.full = dfixed_div(a, b); | ||
1124 | line_fill_time = dfixed_trunc(a); | ||
1125 | |||
1126 | if (line_fill_time < wm->active_time) | ||
1127 | return latency; | ||
1128 | else | ||
1129 | return latency + (line_fill_time - wm->active_time); | ||
1130 | |||
1131 | } | ||
1132 | |||
1133 | /** | ||
1134 | * dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display - check | ||
1135 | * average and available dram bandwidth | ||
1136 | * | ||
1137 | * @wm: watermark calculation data | ||
1138 | * | ||
1139 | * Check if the display average bandwidth fits in the display | ||
1140 | * dram bandwidth (CIK). | ||
1141 | * Used for display watermark bandwidth calculations | ||
1142 | * Returns true if the display fits, false if not. | ||
1143 | */ | ||
1144 | static bool dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) | ||
1145 | { | ||
1146 | if (dce_v10_0_average_bandwidth(wm) <= | ||
1147 | (dce_v10_0_dram_bandwidth_for_display(wm) / wm->num_heads)) | ||
1148 | return true; | ||
1149 | else | ||
1150 | return false; | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * dce_v10_0_average_bandwidth_vs_available_bandwidth - check | ||
1155 | * average and available bandwidth | ||
1156 | * | ||
1157 | * @wm: watermark calculation data | ||
1158 | * | ||
1159 | * Check if the display average bandwidth fits in the display | ||
1160 | * available bandwidth (CIK). | ||
1161 | * Used for display watermark bandwidth calculations | ||
1162 | * Returns true if the display fits, false if not. | ||
1163 | */ | ||
1164 | static bool dce_v10_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) | ||
1165 | { | ||
1166 | if (dce_v10_0_average_bandwidth(wm) <= | ||
1167 | (dce_v10_0_available_bandwidth(wm) / wm->num_heads)) | ||
1168 | return true; | ||
1169 | else | ||
1170 | return false; | ||
1171 | } | ||
1172 | |||
1173 | /** | ||
1174 | * dce_v10_0_check_latency_hiding - check latency hiding | ||
1175 | * | ||
1176 | * @wm: watermark calculation data | ||
1177 | * | ||
1178 | * Check latency hiding (CIK). | ||
1179 | * Used for display watermark bandwidth calculations | ||
1180 | * Returns true if the display fits, false if not. | ||
1181 | */ | ||
1182 | static bool dce_v10_0_check_latency_hiding(struct dce10_wm_params *wm) | ||
1183 | { | ||
1184 | u32 lb_partitions = wm->lb_size / wm->src_width; | ||
1185 | u32 line_time = wm->active_time + wm->blank_time; | ||
1186 | u32 latency_tolerant_lines; | ||
1187 | u32 latency_hiding; | ||
1188 | fixed20_12 a; | ||
1189 | |||
1190 | a.full = dfixed_const(1); | ||
1191 | if (wm->vsc.full > a.full) | ||
1192 | latency_tolerant_lines = 1; | ||
1193 | else { | ||
1194 | if (lb_partitions <= (wm->vtaps + 1)) | ||
1195 | latency_tolerant_lines = 1; | ||
1196 | else | ||
1197 | latency_tolerant_lines = 2; | ||
1198 | } | ||
1199 | |||
1200 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | ||
1201 | |||
1202 | if (dce_v10_0_latency_watermark(wm) <= latency_hiding) | ||
1203 | return true; | ||
1204 | else | ||
1205 | return false; | ||
1206 | } | ||
1207 | |||
1208 | /** | ||
1209 | * dce_v10_0_program_watermarks - program display watermarks | ||
1210 | * | ||
1211 | * @adev: amdgpu_device pointer | ||
1212 | * @amdgpu_crtc: the selected display controller | ||
1213 | * @lb_size: line buffer size | ||
1214 | * @num_heads: number of display controllers in use | ||
1215 | * | ||
1216 | * Calculate and program the display watermarks for the | ||
1217 | * selected display controller (CIK). | ||
1218 | */ | ||
1219 | static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | ||
1220 | struct amdgpu_crtc *amdgpu_crtc, | ||
1221 | u32 lb_size, u32 num_heads) | ||
1222 | { | ||
1223 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; | ||
1224 | struct dce10_wm_params wm_low, wm_high; | ||
1225 | u32 pixel_period; | ||
1226 | u32 line_time = 0; | ||
1227 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | ||
1228 | u32 tmp, wm_mask; | ||
1229 | |||
1230 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | ||
1231 | pixel_period = 1000000 / (u32)mode->clock; | ||
1232 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | ||
1233 | |||
1234 | /* watermark for high clocks */ | ||
1235 | if (adev->pm.dpm_enabled) { | ||
1236 | wm_high.yclk = | ||
1237 | amdgpu_dpm_get_mclk(adev, false) * 10; | ||
1238 | wm_high.sclk = | ||
1239 | amdgpu_dpm_get_sclk(adev, false) * 10; | ||
1240 | } else { | ||
1241 | wm_high.yclk = adev->pm.current_mclk * 10; | ||
1242 | wm_high.sclk = adev->pm.current_sclk * 10; | ||
1243 | } | ||
1244 | |||
1245 | wm_high.disp_clk = mode->clock; | ||
1246 | wm_high.src_width = mode->crtc_hdisplay; | ||
1247 | wm_high.active_time = mode->crtc_hdisplay * pixel_period; | ||
1248 | wm_high.blank_time = line_time - wm_high.active_time; | ||
1249 | wm_high.interlaced = false; | ||
1250 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1251 | wm_high.interlaced = true; | ||
1252 | wm_high.vsc = amdgpu_crtc->vsc; | ||
1253 | wm_high.vtaps = 1; | ||
1254 | if (amdgpu_crtc->rmx_type != RMX_OFF) | ||
1255 | wm_high.vtaps = 2; | ||
1256 | wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ | ||
1257 | wm_high.lb_size = lb_size; | ||
1258 | wm_high.dram_channels = cik_get_number_of_dram_channels(adev); | ||
1259 | wm_high.num_heads = num_heads; | ||
1260 | |||
1261 | /* set for high clocks */ | ||
1262 | latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535); | ||
1263 | |||
1264 | /* possibly force display priority to high */ | ||
1265 | /* should really do this at mode validation time... */ | ||
1266 | if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || | ||
1267 | !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_high) || | ||
1268 | !dce_v10_0_check_latency_hiding(&wm_high) || | ||
1269 | (adev->mode_info.disp_priority == 2)) { | ||
1270 | DRM_DEBUG_KMS("force priority to high\n"); | ||
1271 | } | ||
1272 | |||
1273 | /* watermark for low clocks */ | ||
1274 | if (adev->pm.dpm_enabled) { | ||
1275 | wm_low.yclk = | ||
1276 | amdgpu_dpm_get_mclk(adev, true) * 10; | ||
1277 | wm_low.sclk = | ||
1278 | amdgpu_dpm_get_sclk(adev, true) * 10; | ||
1279 | } else { | ||
1280 | wm_low.yclk = adev->pm.current_mclk * 10; | ||
1281 | wm_low.sclk = adev->pm.current_sclk * 10; | ||
1282 | } | ||
1283 | |||
1284 | wm_low.disp_clk = mode->clock; | ||
1285 | wm_low.src_width = mode->crtc_hdisplay; | ||
1286 | wm_low.active_time = mode->crtc_hdisplay * pixel_period; | ||
1287 | wm_low.blank_time = line_time - wm_low.active_time; | ||
1288 | wm_low.interlaced = false; | ||
1289 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1290 | wm_low.interlaced = true; | ||
1291 | wm_low.vsc = amdgpu_crtc->vsc; | ||
1292 | wm_low.vtaps = 1; | ||
1293 | if (amdgpu_crtc->rmx_type != RMX_OFF) | ||
1294 | wm_low.vtaps = 2; | ||
1295 | wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ | ||
1296 | wm_low.lb_size = lb_size; | ||
1297 | wm_low.dram_channels = cik_get_number_of_dram_channels(adev); | ||
1298 | wm_low.num_heads = num_heads; | ||
1299 | |||
1300 | /* set for low clocks */ | ||
1301 | latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535); | ||
1302 | |||
1303 | /* possibly force display priority to high */ | ||
1304 | /* should really do this at mode validation time... */ | ||
1305 | if (!dce_v10_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || | ||
1306 | !dce_v10_0_average_bandwidth_vs_available_bandwidth(&wm_low) || | ||
1307 | !dce_v10_0_check_latency_hiding(&wm_low) || | ||
1308 | (adev->mode_info.disp_priority == 2)) { | ||
1309 | DRM_DEBUG_KMS("force priority to high\n"); | ||
1310 | } | ||
1311 | } | ||
1312 | |||
1313 | /* select wm A */ | ||
1314 | wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); | ||
1315 | tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); | ||
1316 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1317 | tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); | ||
1318 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); | ||
1319 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); | ||
1320 | WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1321 | /* select wm B */ | ||
1322 | tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); | ||
1323 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1324 | tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); | ||
1325 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); | ||
1326 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); | ||
1327 | WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1328 | /* restore original selection */ | ||
1329 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); | ||
1330 | |||
1331 | /* save values for DPM */ | ||
1332 | amdgpu_crtc->line_time = line_time; | ||
1333 | amdgpu_crtc->wm_high = latency_watermark_a; | ||
1334 | amdgpu_crtc->wm_low = latency_watermark_b; | ||
1335 | } | ||
1336 | |||
1337 | /** | ||
1338 | * dce_v10_0_bandwidth_update - program display watermarks | ||
1339 | * | ||
1340 | * @adev: amdgpu_device pointer | ||
1341 | * | ||
1342 | * Calculate and program the display watermarks and line | ||
1343 | * buffer allocation (CIK). | ||
1344 | */ | ||
1345 | static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev) | ||
1346 | { | ||
1347 | struct drm_display_mode *mode = NULL; | ||
1348 | u32 num_heads = 0, lb_size; | ||
1349 | int i; | ||
1350 | |||
1351 | amdgpu_update_display_priority(adev); | ||
1352 | |||
1353 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
1354 | if (adev->mode_info.crtcs[i]->base.enabled) | ||
1355 | num_heads++; | ||
1356 | } | ||
1357 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
1358 | mode = &adev->mode_info.crtcs[i]->base.mode; | ||
1359 | lb_size = dce_v10_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); | ||
1360 | dce_v10_0_program_watermarks(adev, adev->mode_info.crtcs[i], | ||
1361 | lb_size, num_heads); | ||
1362 | } | ||
1363 | } | ||
1364 | |||
1365 | static void dce_v10_0_audio_get_connected_pins(struct amdgpu_device *adev) | ||
1366 | { | ||
1367 | int i; | ||
1368 | u32 offset, tmp; | ||
1369 | |||
1370 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1371 | offset = adev->mode_info.audio.pin[i].offset; | ||
1372 | tmp = RREG32_AUDIO_ENDPT(offset, | ||
1373 | ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); | ||
1374 | if (((tmp & | ||
1375 | AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> | ||
1376 | AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) | ||
1377 | adev->mode_info.audio.pin[i].connected = false; | ||
1378 | else | ||
1379 | adev->mode_info.audio.pin[i].connected = true; | ||
1380 | } | ||
1381 | } | ||
1382 | |||
1383 | static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *adev) | ||
1384 | { | ||
1385 | int i; | ||
1386 | |||
1387 | dce_v10_0_audio_get_connected_pins(adev); | ||
1388 | |||
1389 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1390 | if (adev->mode_info.audio.pin[i].connected) | ||
1391 | return &adev->mode_info.audio.pin[i]; | ||
1392 | } | ||
1393 | DRM_ERROR("No connected audio pins found!\n"); | ||
1394 | return NULL; | ||
1395 | } | ||
1396 | |||
1397 | static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) | ||
1398 | { | ||
1399 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1400 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1401 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1402 | u32 tmp; | ||
1403 | |||
1404 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1405 | return; | ||
1406 | |||
1407 | tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); | ||
1408 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); | ||
1409 | WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); | ||
1410 | } | ||
1411 | |||
1412 | static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, | ||
1413 | struct drm_display_mode *mode) | ||
1414 | { | ||
1415 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1416 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1417 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1418 | struct drm_connector *connector; | ||
1419 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1420 | u32 tmp; | ||
1421 | int interlace = 0; | ||
1422 | |||
1423 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1424 | return; | ||
1425 | |||
1426 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1427 | if (connector->encoder == encoder) { | ||
1428 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1429 | break; | ||
1430 | } | ||
1431 | } | ||
1432 | |||
1433 | if (!amdgpu_connector) { | ||
1434 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1435 | return; | ||
1436 | } | ||
1437 | |||
1438 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1439 | interlace = 1; | ||
1440 | if (connector->latency_present[interlace]) { | ||
1441 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1442 | VIDEO_LIPSYNC, connector->video_latency[interlace]); | ||
1443 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1444 | AUDIO_LIPSYNC, connector->audio_latency[interlace]); | ||
1445 | } else { | ||
1446 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1447 | VIDEO_LIPSYNC, 0); | ||
1448 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1449 | AUDIO_LIPSYNC, 0); | ||
1450 | } | ||
1451 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1452 | ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); | ||
1453 | } | ||
1454 | |||
1455 | static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) | ||
1456 | { | ||
1457 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1458 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1459 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1460 | struct drm_connector *connector; | ||
1461 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1462 | u32 tmp; | ||
1463 | u8 *sadb = NULL; | ||
1464 | int sad_count; | ||
1465 | |||
1466 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1467 | return; | ||
1468 | |||
1469 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1470 | if (connector->encoder == encoder) { | ||
1471 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1472 | break; | ||
1473 | } | ||
1474 | } | ||
1475 | |||
1476 | if (!amdgpu_connector) { | ||
1477 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1478 | return; | ||
1479 | } | ||
1480 | |||
1481 | sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); | ||
1482 | if (sad_count < 0) { | ||
1483 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | ||
1484 | sad_count = 0; | ||
1485 | } | ||
1486 | |||
1487 | /* program the speaker allocation */ | ||
1488 | tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1489 | ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); | ||
1490 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1491 | DP_CONNECTION, 0); | ||
1492 | /* set HDMI mode */ | ||
1493 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1494 | HDMI_CONNECTION, 1); | ||
1495 | if (sad_count) | ||
1496 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1497 | SPEAKER_ALLOCATION, sadb[0]); | ||
1498 | else | ||
1499 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1500 | SPEAKER_ALLOCATION, 5); /* stereo */ | ||
1501 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1502 | ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); | ||
1503 | |||
1504 | kfree(sadb); | ||
1505 | } | ||
1506 | |||
1507 | static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) | ||
1508 | { | ||
1509 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1510 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1511 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1512 | struct drm_connector *connector; | ||
1513 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1514 | struct cea_sad *sads; | ||
1515 | int i, sad_count; | ||
1516 | |||
1517 | static const u16 eld_reg_to_type[][2] = { | ||
1518 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, | ||
1519 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, | ||
1520 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, | ||
1521 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, | ||
1522 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, | ||
1523 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, | ||
1524 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, | ||
1525 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, | ||
1526 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, | ||
1527 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, | ||
1528 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, | ||
1529 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, | ||
1530 | }; | ||
1531 | |||
1532 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1533 | return; | ||
1534 | |||
1535 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1536 | if (connector->encoder == encoder) { | ||
1537 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1538 | break; | ||
1539 | } | ||
1540 | } | ||
1541 | |||
1542 | if (!amdgpu_connector) { | ||
1543 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1544 | return; | ||
1545 | } | ||
1546 | |||
1547 | sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); | ||
1548 | if (sad_count <= 0) { | ||
1549 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | ||
1550 | return; | ||
1551 | } | ||
1552 | BUG_ON(!sads); | ||
1553 | |||
1554 | for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { | ||
1555 | u32 tmp = 0; | ||
1556 | u8 stereo_freqs = 0; | ||
1557 | int max_channels = -1; | ||
1558 | int j; | ||
1559 | |||
1560 | for (j = 0; j < sad_count; j++) { | ||
1561 | struct cea_sad *sad = &sads[j]; | ||
1562 | |||
1563 | if (sad->format == eld_reg_to_type[i][1]) { | ||
1564 | if (sad->channels > max_channels) { | ||
1565 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1566 | MAX_CHANNELS, sad->channels); | ||
1567 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1568 | DESCRIPTOR_BYTE_2, sad->byte2); | ||
1569 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1570 | SUPPORTED_FREQUENCIES, sad->freq); | ||
1571 | max_channels = sad->channels; | ||
1572 | } | ||
1573 | |||
1574 | if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) | ||
1575 | stereo_freqs |= sad->freq; | ||
1576 | else | ||
1577 | break; | ||
1578 | } | ||
1579 | } | ||
1580 | |||
1581 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1582 | SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); | ||
1583 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); | ||
1584 | } | ||
1585 | |||
1586 | kfree(sads); | ||
1587 | } | ||
1588 | |||
1589 | static void dce_v10_0_audio_enable(struct amdgpu_device *adev, | ||
1590 | struct amdgpu_audio_pin *pin, | ||
1591 | bool enable) | ||
1592 | { | ||
1593 | if (!pin) | ||
1594 | return; | ||
1595 | |||
1596 | WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, | ||
1597 | enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); | ||
1598 | } | ||
1599 | |||
1600 | static const u32 pin_offsets[] = | ||
1601 | { | ||
1602 | AUD0_REGISTER_OFFSET, | ||
1603 | AUD1_REGISTER_OFFSET, | ||
1604 | AUD2_REGISTER_OFFSET, | ||
1605 | AUD3_REGISTER_OFFSET, | ||
1606 | AUD4_REGISTER_OFFSET, | ||
1607 | AUD5_REGISTER_OFFSET, | ||
1608 | AUD6_REGISTER_OFFSET, | ||
1609 | }; | ||
1610 | |||
1611 | static int dce_v10_0_audio_init(struct amdgpu_device *adev) | ||
1612 | { | ||
1613 | int i; | ||
1614 | |||
1615 | if (!amdgpu_audio) | ||
1616 | return 0; | ||
1617 | |||
1618 | adev->mode_info.audio.enabled = true; | ||
1619 | |||
1620 | adev->mode_info.audio.num_pins = 7; | ||
1621 | |||
1622 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1623 | adev->mode_info.audio.pin[i].channels = -1; | ||
1624 | adev->mode_info.audio.pin[i].rate = -1; | ||
1625 | adev->mode_info.audio.pin[i].bits_per_sample = -1; | ||
1626 | adev->mode_info.audio.pin[i].status_bits = 0; | ||
1627 | adev->mode_info.audio.pin[i].category_code = 0; | ||
1628 | adev->mode_info.audio.pin[i].connected = false; | ||
1629 | adev->mode_info.audio.pin[i].offset = pin_offsets[i]; | ||
1630 | adev->mode_info.audio.pin[i].id = i; | ||
1631 | /* disable audio. it will be set up later */ | ||
1632 | /* XXX remove once we switch to ip funcs */ | ||
1633 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
1634 | } | ||
1635 | |||
1636 | return 0; | ||
1637 | } | ||
1638 | |||
1639 | static void dce_v10_0_audio_fini(struct amdgpu_device *adev) | ||
1640 | { | ||
1641 | int i; | ||
1642 | |||
1643 | if (!adev->mode_info.audio.enabled) | ||
1644 | return; | ||
1645 | |||
1646 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) | ||
1647 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
1648 | |||
1649 | adev->mode_info.audio.enabled = false; | ||
1650 | } | ||
1651 | |||
1652 | /* | ||
1653 | * update the N and CTS parameters for a given pixel clock rate | ||
1654 | */ | ||
1655 | static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) | ||
1656 | { | ||
1657 | struct drm_device *dev = encoder->dev; | ||
1658 | struct amdgpu_device *adev = dev->dev_private; | ||
1659 | struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); | ||
1660 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1661 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1662 | u32 tmp; | ||
1663 | |||
1664 | tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); | ||
1665 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); | ||
1666 | WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); | ||
1667 | tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); | ||
1668 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); | ||
1669 | WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); | ||
1670 | |||
1671 | tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); | ||
1672 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); | ||
1673 | WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); | ||
1674 | tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); | ||
1675 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); | ||
1676 | WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); | ||
1677 | |||
1678 | tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); | ||
1679 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); | ||
1680 | WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); | ||
1681 | tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); | ||
1682 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); | ||
1683 | WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); | ||
1684 | |||
1685 | } | ||
1686 | |||
1687 | /* | ||
1688 | * build a HDMI Video Info Frame | ||
1689 | */ | ||
1690 | static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, | ||
1691 | void *buffer, size_t size) | ||
1692 | { | ||
1693 | struct drm_device *dev = encoder->dev; | ||
1694 | struct amdgpu_device *adev = dev->dev_private; | ||
1695 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1696 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1697 | uint8_t *frame = buffer + 3; | ||
1698 | uint8_t *header = buffer; | ||
1699 | |||
1700 | WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, | ||
1701 | frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); | ||
1702 | WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, | ||
1703 | frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); | ||
1704 | WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, | ||
1705 | frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); | ||
1706 | WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, | ||
1707 | frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); | ||
1708 | } | ||
1709 | |||
1710 | static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) | ||
1711 | { | ||
1712 | struct drm_device *dev = encoder->dev; | ||
1713 | struct amdgpu_device *adev = dev->dev_private; | ||
1714 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1715 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1716 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
1717 | u32 dto_phase = 24 * 1000; | ||
1718 | u32 dto_modulo = clock; | ||
1719 | u32 tmp; | ||
1720 | |||
1721 | if (!dig || !dig->afmt) | ||
1722 | return; | ||
1723 | |||
1724 | /* XXX two dtos; generally use dto0 for hdmi */ | ||
1725 | /* Express [24MHz / target pixel clock] as an exact rational | ||
1726 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | ||
1727 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | ||
1728 | */ | ||
1729 | tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); | ||
1730 | tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, | ||
1731 | amdgpu_crtc->crtc_id); | ||
1732 | WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); | ||
1733 | WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
1734 | WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
1735 | } | ||
1736 | |||
1737 | /* | ||
1738 | * update the info frames with the data from the current display mode | ||
1739 | */ | ||
1740 | static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder, | ||
1741 | struct drm_display_mode *mode) | ||
1742 | { | ||
1743 | struct drm_device *dev = encoder->dev; | ||
1744 | struct amdgpu_device *adev = dev->dev_private; | ||
1745 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1746 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1747 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
1748 | u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; | ||
1749 | struct hdmi_avi_infoframe frame; | ||
1750 | ssize_t err; | ||
1751 | u32 tmp; | ||
1752 | int bpc = 8; | ||
1753 | |||
1754 | if (!dig || !dig->afmt) | ||
1755 | return; | ||
1756 | |||
1757 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
1758 | if (!dig->afmt->enabled) | ||
1759 | return; | ||
1760 | |||
1761 | /* hdmi deep color mode general control packets setup, if bpc > 8 */ | ||
1762 | if (encoder->crtc) { | ||
1763 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
1764 | bpc = amdgpu_crtc->bpc; | ||
1765 | } | ||
1766 | |||
1767 | /* disable audio prior to setting up hw */ | ||
1768 | dig->afmt->pin = dce_v10_0_audio_get_pin(adev); | ||
1769 | dce_v10_0_audio_enable(adev, dig->afmt->pin, false); | ||
1770 | |||
1771 | dce_v10_0_audio_set_dto(encoder, mode->clock); | ||
1772 | |||
1773 | tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); | ||
1774 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); | ||
1775 | WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ | ||
1776 | |||
1777 | WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); | ||
1778 | |||
1779 | tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); | ||
1780 | switch (bpc) { | ||
1781 | case 0: | ||
1782 | case 6: | ||
1783 | case 8: | ||
1784 | case 16: | ||
1785 | default: | ||
1786 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); | ||
1787 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); | ||
1788 | DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", | ||
1789 | connector->name, bpc); | ||
1790 | break; | ||
1791 | case 10: | ||
1792 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); | ||
1793 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); | ||
1794 | DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", | ||
1795 | connector->name); | ||
1796 | break; | ||
1797 | case 12: | ||
1798 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); | ||
1799 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); | ||
1800 | DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", | ||
1801 | connector->name); | ||
1802 | break; | ||
1803 | } | ||
1804 | WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); | ||
1805 | |||
1806 | tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); | ||
1807 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ | ||
1808 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ | ||
1809 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ | ||
1810 | WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1811 | |||
1812 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1813 | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
1814 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); | ||
1815 | /* required for audio info values to be updated */ | ||
1816 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); | ||
1817 | WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1818 | |||
1819 | tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1820 | /* required for audio info values to be updated */ | ||
1821 | tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); | ||
1822 | WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1823 | |||
1824 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); | ||
1825 | /* anything other than 0 */ | ||
1826 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); | ||
1827 | WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); | ||
1828 | |||
1829 | WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ | ||
1830 | |||
1831 | tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1832 | /* set the default audio delay */ | ||
1833 | tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); | ||
1834 | /* should be suffient for all audio modes and small enough for all hblanks */ | ||
1835 | tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); | ||
1836 | WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1837 | |||
1838 | tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1839 | /* allow 60958 channel status fields to be updated */ | ||
1840 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); | ||
1841 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1842 | |||
1843 | tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); | ||
1844 | if (bpc > 8) | ||
1845 | /* clear SW CTS value */ | ||
1846 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); | ||
1847 | else | ||
1848 | /* select SW CTS value */ | ||
1849 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); | ||
1850 | /* allow hw to sent ACR packets when required */ | ||
1851 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); | ||
1852 | WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1853 | |||
1854 | dce_v10_0_afmt_update_ACR(encoder, mode->clock); | ||
1855 | |||
1856 | tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); | ||
1857 | tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); | ||
1858 | WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); | ||
1859 | |||
1860 | tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); | ||
1861 | tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); | ||
1862 | WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); | ||
1863 | |||
1864 | tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); | ||
1865 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); | ||
1866 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); | ||
1867 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); | ||
1868 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); | ||
1869 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); | ||
1870 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); | ||
1871 | WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); | ||
1872 | |||
1873 | dce_v10_0_audio_write_speaker_allocation(encoder); | ||
1874 | |||
1875 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, | ||
1876 | (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); | ||
1877 | |||
1878 | dce_v10_0_afmt_audio_select_pin(encoder); | ||
1879 | dce_v10_0_audio_write_sad_regs(encoder); | ||
1880 | dce_v10_0_audio_write_latency_fields(encoder, mode); | ||
1881 | |||
1882 | err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); | ||
1883 | if (err < 0) { | ||
1884 | DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); | ||
1885 | return; | ||
1886 | } | ||
1887 | |||
1888 | err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); | ||
1889 | if (err < 0) { | ||
1890 | DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); | ||
1891 | return; | ||
1892 | } | ||
1893 | |||
1894 | dce_v10_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); | ||
1895 | |||
1896 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1897 | /* enable AVI info frames */ | ||
1898 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); | ||
1899 | /* required for audio info values to be updated */ | ||
1900 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); | ||
1901 | WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1902 | |||
1903 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); | ||
1904 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); | ||
1905 | WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); | ||
1906 | |||
1907 | tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1908 | /* send audio packets */ | ||
1909 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); | ||
1910 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1911 | |||
1912 | WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); | ||
1913 | WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); | ||
1914 | WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); | ||
1915 | WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); | ||
1916 | |||
1917 | /* enable audio after to setting up hw */ | ||
1918 | dce_v10_0_audio_enable(adev, dig->afmt->pin, true); | ||
1919 | } | ||
1920 | |||
1921 | static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable) | ||
1922 | { | ||
1923 | struct drm_device *dev = encoder->dev; | ||
1924 | struct amdgpu_device *adev = dev->dev_private; | ||
1925 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1926 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1927 | |||
1928 | if (!dig || !dig->afmt) | ||
1929 | return; | ||
1930 | |||
1931 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
1932 | if (enable && dig->afmt->enabled) | ||
1933 | return; | ||
1934 | if (!enable && !dig->afmt->enabled) | ||
1935 | return; | ||
1936 | |||
1937 | if (!enable && dig->afmt->pin) { | ||
1938 | dce_v10_0_audio_enable(adev, dig->afmt->pin, false); | ||
1939 | dig->afmt->pin = NULL; | ||
1940 | } | ||
1941 | |||
1942 | dig->afmt->enabled = enable; | ||
1943 | |||
1944 | DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", | ||
1945 | enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); | ||
1946 | } | ||
1947 | |||
1948 | static void dce_v10_0_afmt_init(struct amdgpu_device *adev) | ||
1949 | { | ||
1950 | int i; | ||
1951 | |||
1952 | for (i = 0; i < adev->mode_info.num_dig; i++) | ||
1953 | adev->mode_info.afmt[i] = NULL; | ||
1954 | |||
1955 | /* DCE10 has audio blocks tied to DIG encoders */ | ||
1956 | for (i = 0; i < adev->mode_info.num_dig; i++) { | ||
1957 | adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); | ||
1958 | if (adev->mode_info.afmt[i]) { | ||
1959 | adev->mode_info.afmt[i]->offset = dig_offsets[i]; | ||
1960 | adev->mode_info.afmt[i]->id = i; | ||
1961 | } | ||
1962 | } | ||
1963 | } | ||
1964 | |||
1965 | static void dce_v10_0_afmt_fini(struct amdgpu_device *adev) | ||
1966 | { | ||
1967 | int i; | ||
1968 | |||
1969 | for (i = 0; i < adev->mode_info.num_dig; i++) { | ||
1970 | kfree(adev->mode_info.afmt[i]); | ||
1971 | adev->mode_info.afmt[i] = NULL; | ||
1972 | } | ||
1973 | } | ||
1974 | |||
1975 | static const u32 vga_control_regs[6] = | ||
1976 | { | ||
1977 | mmD1VGA_CONTROL, | ||
1978 | mmD2VGA_CONTROL, | ||
1979 | mmD3VGA_CONTROL, | ||
1980 | mmD4VGA_CONTROL, | ||
1981 | mmD5VGA_CONTROL, | ||
1982 | mmD6VGA_CONTROL, | ||
1983 | }; | ||
1984 | |||
1985 | static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable) | ||
1986 | { | ||
1987 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
1988 | struct drm_device *dev = crtc->dev; | ||
1989 | struct amdgpu_device *adev = dev->dev_private; | ||
1990 | u32 vga_control; | ||
1991 | |||
1992 | vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; | ||
1993 | if (enable) | ||
1994 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); | ||
1995 | else | ||
1996 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); | ||
1997 | } | ||
1998 | |||
1999 | static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable) | ||
2000 | { | ||
2001 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2002 | struct drm_device *dev = crtc->dev; | ||
2003 | struct amdgpu_device *adev = dev->dev_private; | ||
2004 | |||
2005 | if (enable) | ||
2006 | WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); | ||
2007 | else | ||
2008 | WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); | ||
2009 | } | ||
2010 | |||
2011 | static void dce_v10_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, | ||
2012 | unsigned *bankh, unsigned *mtaspect, | ||
2013 | unsigned *tile_split) | ||
2014 | { | ||
2015 | *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; | ||
2016 | *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; | ||
2017 | *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; | ||
2018 | *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; | ||
2019 | switch (*bankw) { | ||
2020 | default: | ||
2021 | case 1: | ||
2022 | *bankw = ADDR_SURF_BANK_WIDTH_1; | ||
2023 | break; | ||
2024 | case 2: | ||
2025 | *bankw = ADDR_SURF_BANK_WIDTH_2; | ||
2026 | break; | ||
2027 | case 4: | ||
2028 | *bankw = ADDR_SURF_BANK_WIDTH_4; | ||
2029 | break; | ||
2030 | case 8: | ||
2031 | *bankw = ADDR_SURF_BANK_WIDTH_8; | ||
2032 | break; | ||
2033 | } | ||
2034 | switch (*bankh) { | ||
2035 | default: | ||
2036 | case 1: | ||
2037 | *bankh = ADDR_SURF_BANK_HEIGHT_1; | ||
2038 | break; | ||
2039 | case 2: | ||
2040 | *bankh = ADDR_SURF_BANK_HEIGHT_2; | ||
2041 | break; | ||
2042 | case 4: | ||
2043 | *bankh = ADDR_SURF_BANK_HEIGHT_4; | ||
2044 | break; | ||
2045 | case 8: | ||
2046 | *bankh = ADDR_SURF_BANK_HEIGHT_8; | ||
2047 | break; | ||
2048 | } | ||
2049 | switch (*mtaspect) { | ||
2050 | default: | ||
2051 | case 1: | ||
2052 | *mtaspect = ADDR_SURF_MACRO_ASPECT_1; | ||
2053 | break; | ||
2054 | case 2: | ||
2055 | *mtaspect = ADDR_SURF_MACRO_ASPECT_2; | ||
2056 | break; | ||
2057 | case 4: | ||
2058 | *mtaspect = ADDR_SURF_MACRO_ASPECT_4; | ||
2059 | break; | ||
2060 | case 8: | ||
2061 | *mtaspect = ADDR_SURF_MACRO_ASPECT_8; | ||
2062 | break; | ||
2063 | } | ||
2064 | } | ||
2065 | |||
2066 | static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, | ||
2067 | struct drm_framebuffer *fb, | ||
2068 | int x, int y, int atomic) | ||
2069 | { | ||
2070 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2071 | struct drm_device *dev = crtc->dev; | ||
2072 | struct amdgpu_device *adev = dev->dev_private; | ||
2073 | struct amdgpu_framebuffer *amdgpu_fb; | ||
2074 | struct drm_framebuffer *target_fb; | ||
2075 | struct drm_gem_object *obj; | ||
2076 | struct amdgpu_bo *rbo; | ||
2077 | uint64_t fb_location, tiling_flags; | ||
2078 | uint32_t fb_format, fb_pitch_pixels; | ||
2079 | unsigned bankw, bankh, mtaspect, tile_split; | ||
2080 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | ||
2081 | /* XXX change to VI */ | ||
2082 | u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; | ||
2083 | u32 tmp, viewport_w, viewport_h; | ||
2084 | int r; | ||
2085 | bool bypass_lut = false; | ||
2086 | |||
2087 | /* no fb bound */ | ||
2088 | if (!atomic && !crtc->primary->fb) { | ||
2089 | DRM_DEBUG_KMS("No FB bound\n"); | ||
2090 | return 0; | ||
2091 | } | ||
2092 | |||
2093 | if (atomic) { | ||
2094 | amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
2095 | target_fb = fb; | ||
2096 | } | ||
2097 | else { | ||
2098 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
2099 | target_fb = crtc->primary->fb; | ||
2100 | } | ||
2101 | |||
2102 | /* If atomic, assume fb object is pinned & idle & fenced and | ||
2103 | * just update base pointers | ||
2104 | */ | ||
2105 | obj = amdgpu_fb->obj; | ||
2106 | rbo = gem_to_amdgpu_bo(obj); | ||
2107 | r = amdgpu_bo_reserve(rbo, false); | ||
2108 | if (unlikely(r != 0)) | ||
2109 | return r; | ||
2110 | |||
2111 | if (atomic) | ||
2112 | fb_location = amdgpu_bo_gpu_offset(rbo); | ||
2113 | else { | ||
2114 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | ||
2115 | if (unlikely(r != 0)) { | ||
2116 | amdgpu_bo_unreserve(rbo); | ||
2117 | return -EINVAL; | ||
2118 | } | ||
2119 | } | ||
2120 | |||
2121 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | ||
2122 | amdgpu_bo_unreserve(rbo); | ||
2123 | |||
2124 | switch (target_fb->pixel_format) { | ||
2125 | case DRM_FORMAT_C8: | ||
2126 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); | ||
2127 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2128 | break; | ||
2129 | case DRM_FORMAT_XRGB4444: | ||
2130 | case DRM_FORMAT_ARGB4444: | ||
2131 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2132 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); | ||
2133 | #ifdef __BIG_ENDIAN | ||
2134 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2135 | ENDIAN_8IN16); | ||
2136 | #endif | ||
2137 | break; | ||
2138 | case DRM_FORMAT_XRGB1555: | ||
2139 | case DRM_FORMAT_ARGB1555: | ||
2140 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2141 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2142 | #ifdef __BIG_ENDIAN | ||
2143 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2144 | ENDIAN_8IN16); | ||
2145 | #endif | ||
2146 | break; | ||
2147 | case DRM_FORMAT_BGRX5551: | ||
2148 | case DRM_FORMAT_BGRA5551: | ||
2149 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2150 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); | ||
2151 | #ifdef __BIG_ENDIAN | ||
2152 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2153 | ENDIAN_8IN16); | ||
2154 | #endif | ||
2155 | break; | ||
2156 | case DRM_FORMAT_RGB565: | ||
2157 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2158 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); | ||
2159 | #ifdef __BIG_ENDIAN | ||
2160 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2161 | ENDIAN_8IN16); | ||
2162 | #endif | ||
2163 | break; | ||
2164 | case DRM_FORMAT_XRGB8888: | ||
2165 | case DRM_FORMAT_ARGB8888: | ||
2166 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2167 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2168 | #ifdef __BIG_ENDIAN | ||
2169 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2170 | ENDIAN_8IN32); | ||
2171 | #endif | ||
2172 | break; | ||
2173 | case DRM_FORMAT_XRGB2101010: | ||
2174 | case DRM_FORMAT_ARGB2101010: | ||
2175 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2176 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); | ||
2177 | #ifdef __BIG_ENDIAN | ||
2178 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2179 | ENDIAN_8IN32); | ||
2180 | #endif | ||
2181 | /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ | ||
2182 | bypass_lut = true; | ||
2183 | break; | ||
2184 | case DRM_FORMAT_BGRX1010102: | ||
2185 | case DRM_FORMAT_BGRA1010102: | ||
2186 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2187 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); | ||
2188 | #ifdef __BIG_ENDIAN | ||
2189 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2190 | ENDIAN_8IN32); | ||
2191 | #endif | ||
2192 | /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ | ||
2193 | bypass_lut = true; | ||
2194 | break; | ||
2195 | default: | ||
2196 | DRM_ERROR("Unsupported screen format %s\n", | ||
2197 | drm_get_format_name(target_fb->pixel_format)); | ||
2198 | return -EINVAL; | ||
2199 | } | ||
2200 | |||
2201 | if (tiling_flags & AMDGPU_TILING_MACRO) { | ||
2202 | unsigned tileb, index, num_banks, tile_split_bytes; | ||
2203 | |||
2204 | dce_v10_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | ||
2205 | /* Set NUM_BANKS. */ | ||
2206 | /* Calculate the macrotile mode index. */ | ||
2207 | tile_split_bytes = 64 << tile_split; | ||
2208 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; | ||
2209 | tileb = min(tile_split_bytes, tileb); | ||
2210 | |||
2211 | for (index = 0; tileb > 64; index++) { | ||
2212 | tileb >>= 1; | ||
2213 | } | ||
2214 | |||
2215 | if (index >= 16) { | ||
2216 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | ||
2217 | target_fb->bits_per_pixel, tile_split); | ||
2218 | return -EINVAL; | ||
2219 | } | ||
2220 | |||
2221 | num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; | ||
2222 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); | ||
2223 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, | ||
2224 | ARRAY_2D_TILED_THIN1); | ||
2225 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, | ||
2226 | tile_split); | ||
2227 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); | ||
2228 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); | ||
2229 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, | ||
2230 | mtaspect); | ||
2231 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, | ||
2232 | ADDR_SURF_MICRO_TILING_DISPLAY); | ||
2233 | } else if (tiling_flags & AMDGPU_TILING_MICRO) { | ||
2234 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, | ||
2235 | ARRAY_1D_TILED_THIN1); | ||
2236 | } | ||
2237 | |||
2238 | /* Read the pipe config from the 2D TILED SCANOUT mode. | ||
2239 | * It should be the same for the other modes too, but not all | ||
2240 | * modes set the pipe config field. */ | ||
2241 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, | ||
2242 | pipe_config); | ||
2243 | |||
2244 | dce_v10_0_vga_enable(crtc, false); | ||
2245 | |||
2246 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2247 | upper_32_bits(fb_location)); | ||
2248 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2249 | upper_32_bits(fb_location)); | ||
2250 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2251 | (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); | ||
2252 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2253 | (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); | ||
2254 | WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); | ||
2255 | WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); | ||
2256 | |||
2257 | /* | ||
2258 | * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT | ||
2259 | * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to | ||
2260 | * retain the full precision throughout the pipeline. | ||
2261 | */ | ||
2262 | tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); | ||
2263 | if (bypass_lut) | ||
2264 | tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); | ||
2265 | else | ||
2266 | tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); | ||
2267 | WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); | ||
2268 | |||
2269 | if (bypass_lut) | ||
2270 | DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); | ||
2271 | |||
2272 | WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); | ||
2273 | WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); | ||
2274 | WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); | ||
2275 | WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); | ||
2276 | WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); | ||
2277 | WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); | ||
2278 | |||
2279 | fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); | ||
2280 | WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); | ||
2281 | |||
2282 | dce_v10_0_grph_enable(crtc, true); | ||
2283 | |||
2284 | WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, | ||
2285 | target_fb->height); | ||
2286 | |||
2287 | x &= ~3; | ||
2288 | y &= ~1; | ||
2289 | WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, | ||
2290 | (x << 16) | y); | ||
2291 | viewport_w = crtc->mode.hdisplay; | ||
2292 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | ||
2293 | WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, | ||
2294 | (viewport_w << 16) | viewport_h); | ||
2295 | |||
2296 | /* pageflip setup */ | ||
2297 | /* make sure flip is at vb rather than hb */ | ||
2298 | tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); | ||
2299 | tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, | ||
2300 | GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); | ||
2301 | WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2302 | |||
2303 | /* set pageflip to happen only at start of vblank interval (front porch) */ | ||
2304 | WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); | ||
2305 | |||
2306 | if (!atomic && fb && fb != crtc->primary->fb) { | ||
2307 | amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
2308 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | ||
2309 | r = amdgpu_bo_reserve(rbo, false); | ||
2310 | if (unlikely(r != 0)) | ||
2311 | return r; | ||
2312 | amdgpu_bo_unpin(rbo); | ||
2313 | amdgpu_bo_unreserve(rbo); | ||
2314 | } | ||
2315 | |||
2316 | /* Bytes per pixel may have changed */ | ||
2317 | dce_v10_0_bandwidth_update(adev); | ||
2318 | |||
2319 | return 0; | ||
2320 | } | ||
2321 | |||
2322 | static void dce_v10_0_set_interleave(struct drm_crtc *crtc, | ||
2323 | struct drm_display_mode *mode) | ||
2324 | { | ||
2325 | struct drm_device *dev = crtc->dev; | ||
2326 | struct amdgpu_device *adev = dev->dev_private; | ||
2327 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2328 | u32 tmp; | ||
2329 | |||
2330 | tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); | ||
2331 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
2332 | tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); | ||
2333 | else | ||
2334 | tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); | ||
2335 | WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); | ||
2336 | } | ||
2337 | |||
2338 | static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc) | ||
2339 | { | ||
2340 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2341 | struct drm_device *dev = crtc->dev; | ||
2342 | struct amdgpu_device *adev = dev->dev_private; | ||
2343 | int i; | ||
2344 | u32 tmp; | ||
2345 | |||
2346 | DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); | ||
2347 | |||
2348 | tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); | ||
2349 | tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); | ||
2350 | tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_OVL_MODE, 0); | ||
2351 | WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2352 | |||
2353 | tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); | ||
2354 | tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); | ||
2355 | WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2356 | |||
2357 | tmp = RREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset); | ||
2358 | tmp = REG_SET_FIELD(tmp, PRESCALE_OVL_CONTROL, OVL_PRESCALE_BYPASS, 1); | ||
2359 | WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2360 | |||
2361 | tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2362 | tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); | ||
2363 | tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, OVL_INPUT_GAMMA_MODE, 0); | ||
2364 | WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2365 | |||
2366 | WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); | ||
2367 | |||
2368 | WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); | ||
2369 | WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); | ||
2370 | WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); | ||
2371 | |||
2372 | WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); | ||
2373 | WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); | ||
2374 | WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); | ||
2375 | |||
2376 | WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); | ||
2377 | WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); | ||
2378 | |||
2379 | WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); | ||
2380 | for (i = 0; i < 256; i++) { | ||
2381 | WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, | ||
2382 | (amdgpu_crtc->lut_r[i] << 20) | | ||
2383 | (amdgpu_crtc->lut_g[i] << 10) | | ||
2384 | (amdgpu_crtc->lut_b[i] << 0)); | ||
2385 | } | ||
2386 | |||
2387 | tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2388 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); | ||
2389 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, OVL_DEGAMMA_MODE, 0); | ||
2390 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); | ||
2391 | WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2392 | |||
2393 | tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); | ||
2394 | tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); | ||
2395 | tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, OVL_GAMUT_REMAP_MODE, 0); | ||
2396 | WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2397 | |||
2398 | tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2399 | tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); | ||
2400 | tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, OVL_REGAMMA_MODE, 0); | ||
2401 | WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2402 | |||
2403 | tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); | ||
2404 | tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); | ||
2405 | tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_OVL_MODE, 0); | ||
2406 | WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2407 | |||
2408 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ | ||
2409 | WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); | ||
2410 | /* XXX this only needs to be programmed once per crtc at startup, | ||
2411 | * not sure where the best place for it is | ||
2412 | */ | ||
2413 | tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2414 | tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); | ||
2415 | WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2416 | } | ||
2417 | |||
2418 | static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder) | ||
2419 | { | ||
2420 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
2421 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
2422 | |||
2423 | switch (amdgpu_encoder->encoder_id) { | ||
2424 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2425 | if (dig->linkb) | ||
2426 | return 1; | ||
2427 | else | ||
2428 | return 0; | ||
2429 | break; | ||
2430 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2431 | if (dig->linkb) | ||
2432 | return 3; | ||
2433 | else | ||
2434 | return 2; | ||
2435 | break; | ||
2436 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2437 | if (dig->linkb) | ||
2438 | return 5; | ||
2439 | else | ||
2440 | return 4; | ||
2441 | break; | ||
2442 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
2443 | return 6; | ||
2444 | break; | ||
2445 | default: | ||
2446 | DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); | ||
2447 | return 0; | ||
2448 | } | ||
2449 | } | ||
2450 | |||
2451 | /** | ||
2452 | * dce_v10_0_pick_pll - Allocate a PPLL for use by the crtc. | ||
2453 | * | ||
2454 | * @crtc: drm crtc | ||
2455 | * | ||
2456 | * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors | ||
2457 | * a single PPLL can be used for all DP crtcs/encoders. For non-DP | ||
2458 | * monitors a dedicated PPLL must be used. If a particular board has | ||
2459 | * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming | ||
2460 | * as there is no need to program the PLL itself. If we are not able to | ||
2461 | * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to | ||
2462 | * avoid messing up an existing monitor. | ||
2463 | * | ||
2464 | * Asic specific PLL information | ||
2465 | * | ||
2466 | * DCE 10.x | ||
2467 | * Tonga | ||
2468 | * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) | ||
2469 | * CI | ||
2470 | * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC | ||
2471 | * | ||
2472 | */ | ||
2473 | static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc) | ||
2474 | { | ||
2475 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2476 | struct drm_device *dev = crtc->dev; | ||
2477 | struct amdgpu_device *adev = dev->dev_private; | ||
2478 | u32 pll_in_use; | ||
2479 | int pll; | ||
2480 | |||
2481 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { | ||
2482 | if (adev->clock.dp_extclk) | ||
2483 | /* skip PPLL programming if using ext clock */ | ||
2484 | return ATOM_PPLL_INVALID; | ||
2485 | else { | ||
2486 | /* use the same PPLL for all DP monitors */ | ||
2487 | pll = amdgpu_pll_get_shared_dp_ppll(crtc); | ||
2488 | if (pll != ATOM_PPLL_INVALID) | ||
2489 | return pll; | ||
2490 | } | ||
2491 | } else { | ||
2492 | /* use the same PPLL for all monitors with the same clock */ | ||
2493 | pll = amdgpu_pll_get_shared_nondp_ppll(crtc); | ||
2494 | if (pll != ATOM_PPLL_INVALID) | ||
2495 | return pll; | ||
2496 | } | ||
2497 | |||
2498 | /* DCE10 has PPLL0, PPLL1, and PPLL2 */ | ||
2499 | pll_in_use = amdgpu_pll_get_use_mask(crtc); | ||
2500 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
2501 | return ATOM_PPLL2; | ||
2502 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | ||
2503 | return ATOM_PPLL1; | ||
2504 | if (!(pll_in_use & (1 << ATOM_PPLL0))) | ||
2505 | return ATOM_PPLL0; | ||
2506 | DRM_ERROR("unable to allocate a PPLL\n"); | ||
2507 | return ATOM_PPLL_INVALID; | ||
2508 | } | ||
2509 | |||
2510 | static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock) | ||
2511 | { | ||
2512 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2513 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2514 | uint32_t cur_lock; | ||
2515 | |||
2516 | cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); | ||
2517 | if (lock) | ||
2518 | cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); | ||
2519 | else | ||
2520 | cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); | ||
2521 | WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); | ||
2522 | } | ||
2523 | |||
2524 | static void dce_v10_0_hide_cursor(struct drm_crtc *crtc) | ||
2525 | { | ||
2526 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2527 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2528 | u32 tmp; | ||
2529 | |||
2530 | tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); | ||
2531 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); | ||
2532 | WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2533 | } | ||
2534 | |||
2535 | static void dce_v10_0_show_cursor(struct drm_crtc *crtc) | ||
2536 | { | ||
2537 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2538 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2539 | u32 tmp; | ||
2540 | |||
2541 | tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); | ||
2542 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); | ||
2543 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); | ||
2544 | WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2545 | } | ||
2546 | |||
2547 | static void dce_v10_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | ||
2548 | uint64_t gpu_addr) | ||
2549 | { | ||
2550 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2551 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2552 | |||
2553 | WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2554 | upper_32_bits(gpu_addr)); | ||
2555 | WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2556 | lower_32_bits(gpu_addr)); | ||
2557 | } | ||
2558 | |||
2559 | static int dce_v10_0_crtc_cursor_move(struct drm_crtc *crtc, | ||
2560 | int x, int y) | ||
2561 | { | ||
2562 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2563 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2564 | int xorigin = 0, yorigin = 0; | ||
2565 | |||
2566 | /* avivo cursor are offset into the total surface */ | ||
2567 | x += crtc->x; | ||
2568 | y += crtc->y; | ||
2569 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
2570 | |||
2571 | if (x < 0) { | ||
2572 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); | ||
2573 | x = 0; | ||
2574 | } | ||
2575 | if (y < 0) { | ||
2576 | yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); | ||
2577 | y = 0; | ||
2578 | } | ||
2579 | |||
2580 | dce_v10_0_lock_cursor(crtc, true); | ||
2581 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | ||
2582 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | ||
2583 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2584 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
2585 | dce_v10_0_lock_cursor(crtc, false); | ||
2586 | |||
2587 | return 0; | ||
2588 | } | ||
2589 | |||
2590 | static int dce_v10_0_crtc_cursor_set(struct drm_crtc *crtc, | ||
2591 | struct drm_file *file_priv, | ||
2592 | uint32_t handle, | ||
2593 | uint32_t width, | ||
2594 | uint32_t height) | ||
2595 | { | ||
2596 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2597 | struct drm_gem_object *obj; | ||
2598 | struct amdgpu_bo *robj; | ||
2599 | uint64_t gpu_addr; | ||
2600 | int ret; | ||
2601 | |||
2602 | if (!handle) { | ||
2603 | /* turn off cursor */ | ||
2604 | dce_v10_0_hide_cursor(crtc); | ||
2605 | obj = NULL; | ||
2606 | goto unpin; | ||
2607 | } | ||
2608 | |||
2609 | if ((width > amdgpu_crtc->max_cursor_width) || | ||
2610 | (height > amdgpu_crtc->max_cursor_height)) { | ||
2611 | DRM_ERROR("bad cursor width or height %d x %d\n", width, height); | ||
2612 | return -EINVAL; | ||
2613 | } | ||
2614 | |||
2615 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | ||
2616 | if (!obj) { | ||
2617 | DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); | ||
2618 | return -ENOENT; | ||
2619 | } | ||
2620 | |||
2621 | robj = gem_to_amdgpu_bo(obj); | ||
2622 | ret = amdgpu_bo_reserve(robj, false); | ||
2623 | if (unlikely(ret != 0)) | ||
2624 | goto fail; | ||
2625 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, | ||
2626 | 0, &gpu_addr); | ||
2627 | amdgpu_bo_unreserve(robj); | ||
2628 | if (ret) | ||
2629 | goto fail; | ||
2630 | |||
2631 | amdgpu_crtc->cursor_width = width; | ||
2632 | amdgpu_crtc->cursor_height = height; | ||
2633 | |||
2634 | dce_v10_0_lock_cursor(crtc, true); | ||
2635 | dce_v10_0_set_cursor(crtc, obj, gpu_addr); | ||
2636 | dce_v10_0_show_cursor(crtc); | ||
2637 | dce_v10_0_lock_cursor(crtc, false); | ||
2638 | |||
2639 | unpin: | ||
2640 | if (amdgpu_crtc->cursor_bo) { | ||
2641 | robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | ||
2642 | ret = amdgpu_bo_reserve(robj, false); | ||
2643 | if (likely(ret == 0)) { | ||
2644 | amdgpu_bo_unpin(robj); | ||
2645 | amdgpu_bo_unreserve(robj); | ||
2646 | } | ||
2647 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | ||
2648 | } | ||
2649 | |||
2650 | amdgpu_crtc->cursor_bo = obj; | ||
2651 | return 0; | ||
2652 | fail: | ||
2653 | drm_gem_object_unreference_unlocked(obj); | ||
2654 | |||
2655 | return ret; | ||
2656 | } | ||
2657 | |||
2658 | static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
2659 | u16 *blue, uint32_t start, uint32_t size) | ||
2660 | { | ||
2661 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2662 | int end = (start + size > 256) ? 256 : start + size, i; | ||
2663 | |||
2664 | /* userspace palettes are always correct as is */ | ||
2665 | for (i = start; i < end; i++) { | ||
2666 | amdgpu_crtc->lut_r[i] = red[i] >> 6; | ||
2667 | amdgpu_crtc->lut_g[i] = green[i] >> 6; | ||
2668 | amdgpu_crtc->lut_b[i] = blue[i] >> 6; | ||
2669 | } | ||
2670 | dce_v10_0_crtc_load_lut(crtc); | ||
2671 | } | ||
2672 | |||
2673 | static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) | ||
2674 | { | ||
2675 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2676 | |||
2677 | drm_crtc_cleanup(crtc); | ||
2678 | destroy_workqueue(amdgpu_crtc->pflip_queue); | ||
2679 | kfree(amdgpu_crtc); | ||
2680 | } | ||
2681 | |||
2682 | static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = { | ||
2683 | .cursor_set = dce_v10_0_crtc_cursor_set, | ||
2684 | .cursor_move = dce_v10_0_crtc_cursor_move, | ||
2685 | .gamma_set = dce_v10_0_crtc_gamma_set, | ||
2686 | .set_config = amdgpu_crtc_set_config, | ||
2687 | .destroy = dce_v10_0_crtc_destroy, | ||
2688 | .page_flip = amdgpu_crtc_page_flip, | ||
2689 | }; | ||
2690 | |||
2691 | static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
2692 | { | ||
2693 | struct drm_device *dev = crtc->dev; | ||
2694 | struct amdgpu_device *adev = dev->dev_private; | ||
2695 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2696 | |||
2697 | switch (mode) { | ||
2698 | case DRM_MODE_DPMS_ON: | ||
2699 | amdgpu_crtc->enabled = true; | ||
2700 | amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); | ||
2701 | dce_v10_0_vga_enable(crtc, true); | ||
2702 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | ||
2703 | dce_v10_0_vga_enable(crtc, false); | ||
2704 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | ||
2705 | dce_v10_0_crtc_load_lut(crtc); | ||
2706 | break; | ||
2707 | case DRM_MODE_DPMS_STANDBY: | ||
2708 | case DRM_MODE_DPMS_SUSPEND: | ||
2709 | case DRM_MODE_DPMS_OFF: | ||
2710 | drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); | ||
2711 | if (amdgpu_crtc->enabled) { | ||
2712 | dce_v10_0_vga_enable(crtc, true); | ||
2713 | amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); | ||
2714 | dce_v10_0_vga_enable(crtc, false); | ||
2715 | } | ||
2716 | amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); | ||
2717 | amdgpu_crtc->enabled = false; | ||
2718 | break; | ||
2719 | } | ||
2720 | /* adjust pm to dpms */ | ||
2721 | amdgpu_pm_compute_clocks(adev); | ||
2722 | } | ||
2723 | |||
2724 | static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc) | ||
2725 | { | ||
2726 | /* disable crtc pair power gating before programming */ | ||
2727 | amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); | ||
2728 | amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); | ||
2729 | dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
2730 | } | ||
2731 | |||
2732 | static void dce_v10_0_crtc_commit(struct drm_crtc *crtc) | ||
2733 | { | ||
2734 | dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
2735 | amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); | ||
2736 | } | ||
2737 | |||
2738 | static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) | ||
2739 | { | ||
2740 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2741 | struct drm_device *dev = crtc->dev; | ||
2742 | struct amdgpu_device *adev = dev->dev_private; | ||
2743 | struct amdgpu_atom_ss ss; | ||
2744 | int i; | ||
2745 | |||
2746 | dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
2747 | if (crtc->primary->fb) { | ||
2748 | int r; | ||
2749 | struct amdgpu_framebuffer *amdgpu_fb; | ||
2750 | struct amdgpu_bo *rbo; | ||
2751 | |||
2752 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
2753 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | ||
2754 | r = amdgpu_bo_reserve(rbo, false); | ||
2755 | if (unlikely(r)) | ||
2756 | DRM_ERROR("failed to reserve rbo before unpin\n"); | ||
2757 | else { | ||
2758 | amdgpu_bo_unpin(rbo); | ||
2759 | amdgpu_bo_unreserve(rbo); | ||
2760 | } | ||
2761 | } | ||
2762 | /* disable the GRPH */ | ||
2763 | dce_v10_0_grph_enable(crtc, false); | ||
2764 | |||
2765 | amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); | ||
2766 | |||
2767 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
2768 | if (adev->mode_info.crtcs[i] && | ||
2769 | adev->mode_info.crtcs[i]->enabled && | ||
2770 | i != amdgpu_crtc->crtc_id && | ||
2771 | amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { | ||
2772 | /* one other crtc is using this pll don't turn | ||
2773 | * off the pll | ||
2774 | */ | ||
2775 | goto done; | ||
2776 | } | ||
2777 | } | ||
2778 | |||
2779 | switch (amdgpu_crtc->pll_id) { | ||
2780 | case ATOM_PPLL0: | ||
2781 | case ATOM_PPLL1: | ||
2782 | case ATOM_PPLL2: | ||
2783 | /* disable the ppll */ | ||
2784 | amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, | ||
2785 | 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); | ||
2786 | break; | ||
2787 | default: | ||
2788 | break; | ||
2789 | } | ||
2790 | done: | ||
2791 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | ||
2792 | amdgpu_crtc->adjusted_clock = 0; | ||
2793 | amdgpu_crtc->encoder = NULL; | ||
2794 | amdgpu_crtc->connector = NULL; | ||
2795 | } | ||
2796 | |||
2797 | static int dce_v10_0_crtc_mode_set(struct drm_crtc *crtc, | ||
2798 | struct drm_display_mode *mode, | ||
2799 | struct drm_display_mode *adjusted_mode, | ||
2800 | int x, int y, struct drm_framebuffer *old_fb) | ||
2801 | { | ||
2802 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2803 | |||
2804 | if (!amdgpu_crtc->adjusted_clock) | ||
2805 | return -EINVAL; | ||
2806 | |||
2807 | amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); | ||
2808 | amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); | ||
2809 | dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); | ||
2810 | amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); | ||
2811 | amdgpu_atombios_crtc_scaler_setup(crtc); | ||
2812 | /* update the hw version fpr dpm */ | ||
2813 | amdgpu_crtc->hw_mode = *adjusted_mode; | ||
2814 | |||
2815 | return 0; | ||
2816 | } | ||
2817 | |||
2818 | static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc, | ||
2819 | const struct drm_display_mode *mode, | ||
2820 | struct drm_display_mode *adjusted_mode) | ||
2821 | { | ||
2822 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2823 | struct drm_device *dev = crtc->dev; | ||
2824 | struct drm_encoder *encoder; | ||
2825 | |||
2826 | /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ | ||
2827 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
2828 | if (encoder->crtc == crtc) { | ||
2829 | amdgpu_crtc->encoder = encoder; | ||
2830 | amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); | ||
2831 | break; | ||
2832 | } | ||
2833 | } | ||
2834 | if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { | ||
2835 | amdgpu_crtc->encoder = NULL; | ||
2836 | amdgpu_crtc->connector = NULL; | ||
2837 | return false; | ||
2838 | } | ||
2839 | if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
2840 | return false; | ||
2841 | if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) | ||
2842 | return false; | ||
2843 | /* pick pll */ | ||
2844 | amdgpu_crtc->pll_id = dce_v10_0_pick_pll(crtc); | ||
2845 | /* if we can't get a PPLL for a non-DP encoder, fail */ | ||
2846 | if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && | ||
2847 | !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) | ||
2848 | return false; | ||
2849 | |||
2850 | return true; | ||
2851 | } | ||
2852 | |||
2853 | static int dce_v10_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, | ||
2854 | struct drm_framebuffer *old_fb) | ||
2855 | { | ||
2856 | return dce_v10_0_crtc_do_set_base(crtc, old_fb, x, y, 0); | ||
2857 | } | ||
2858 | |||
2859 | static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc, | ||
2860 | struct drm_framebuffer *fb, | ||
2861 | int x, int y, enum mode_set_atomic state) | ||
2862 | { | ||
2863 | return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1); | ||
2864 | } | ||
2865 | |||
2866 | static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = { | ||
2867 | .dpms = dce_v10_0_crtc_dpms, | ||
2868 | .mode_fixup = dce_v10_0_crtc_mode_fixup, | ||
2869 | .mode_set = dce_v10_0_crtc_mode_set, | ||
2870 | .mode_set_base = dce_v10_0_crtc_set_base, | ||
2871 | .mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic, | ||
2872 | .prepare = dce_v10_0_crtc_prepare, | ||
2873 | .commit = dce_v10_0_crtc_commit, | ||
2874 | .load_lut = dce_v10_0_crtc_load_lut, | ||
2875 | .disable = dce_v10_0_crtc_disable, | ||
2876 | }; | ||
2877 | |||
2878 | static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index) | ||
2879 | { | ||
2880 | struct amdgpu_crtc *amdgpu_crtc; | ||
2881 | int i; | ||
2882 | |||
2883 | amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + | ||
2884 | (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); | ||
2885 | if (amdgpu_crtc == NULL) | ||
2886 | return -ENOMEM; | ||
2887 | |||
2888 | drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs); | ||
2889 | |||
2890 | drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); | ||
2891 | amdgpu_crtc->crtc_id = index; | ||
2892 | amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); | ||
2893 | adev->mode_info.crtcs[index] = amdgpu_crtc; | ||
2894 | |||
2895 | amdgpu_crtc->max_cursor_width = 128; | ||
2896 | amdgpu_crtc->max_cursor_height = 128; | ||
2897 | adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; | ||
2898 | adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; | ||
2899 | |||
2900 | for (i = 0; i < 256; i++) { | ||
2901 | amdgpu_crtc->lut_r[i] = i << 2; | ||
2902 | amdgpu_crtc->lut_g[i] = i << 2; | ||
2903 | amdgpu_crtc->lut_b[i] = i << 2; | ||
2904 | } | ||
2905 | |||
2906 | switch (amdgpu_crtc->crtc_id) { | ||
2907 | case 0: | ||
2908 | default: | ||
2909 | amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; | ||
2910 | break; | ||
2911 | case 1: | ||
2912 | amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; | ||
2913 | break; | ||
2914 | case 2: | ||
2915 | amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; | ||
2916 | break; | ||
2917 | case 3: | ||
2918 | amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; | ||
2919 | break; | ||
2920 | case 4: | ||
2921 | amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; | ||
2922 | break; | ||
2923 | case 5: | ||
2924 | amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; | ||
2925 | break; | ||
2926 | } | ||
2927 | |||
2928 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | ||
2929 | amdgpu_crtc->adjusted_clock = 0; | ||
2930 | amdgpu_crtc->encoder = NULL; | ||
2931 | amdgpu_crtc->connector = NULL; | ||
2932 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs); | ||
2933 | |||
2934 | return 0; | ||
2935 | } | ||
2936 | |||
2937 | static int dce_v10_0_early_init(struct amdgpu_device *adev) | ||
2938 | { | ||
2939 | adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg; | ||
2940 | adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg; | ||
2941 | |||
2942 | dce_v10_0_set_display_funcs(adev); | ||
2943 | dce_v10_0_set_irq_funcs(adev); | ||
2944 | |||
2945 | switch (adev->asic_type) { | ||
2946 | case CHIP_TONGA: | ||
2947 | adev->mode_info.num_crtc = 6; /* XXX 7??? */ | ||
2948 | adev->mode_info.num_hpd = 6; | ||
2949 | adev->mode_info.num_dig = 7; | ||
2950 | break; | ||
2951 | default: | ||
2952 | /* FIXME: not supported yet */ | ||
2953 | return -EINVAL; | ||
2954 | } | ||
2955 | |||
2956 | return 0; | ||
2957 | } | ||
2958 | |||
2959 | static int dce_v10_0_sw_init(struct amdgpu_device *adev) | ||
2960 | { | ||
2961 | int r, i; | ||
2962 | |||
2963 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
2964 | r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); | ||
2965 | if (r) | ||
2966 | return r; | ||
2967 | } | ||
2968 | |||
2969 | for (i = 8; i < 20; i += 2) { | ||
2970 | r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); | ||
2971 | if (r) | ||
2972 | return r; | ||
2973 | } | ||
2974 | |||
2975 | /* HPD hotplug */ | ||
2976 | r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); | ||
2977 | if (r) | ||
2978 | return r; | ||
2979 | |||
2980 | adev->mode_info.mode_config_initialized = true; | ||
2981 | |||
2982 | adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; | ||
2983 | |||
2984 | adev->ddev->mode_config.max_width = 16384; | ||
2985 | adev->ddev->mode_config.max_height = 16384; | ||
2986 | |||
2987 | adev->ddev->mode_config.preferred_depth = 24; | ||
2988 | adev->ddev->mode_config.prefer_shadow = 1; | ||
2989 | |||
2990 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; | ||
2991 | |||
2992 | r = amdgpu_modeset_create_props(adev); | ||
2993 | if (r) | ||
2994 | return r; | ||
2995 | |||
2996 | adev->ddev->mode_config.max_width = 16384; | ||
2997 | adev->ddev->mode_config.max_height = 16384; | ||
2998 | |||
2999 | /* allocate crtcs */ | ||
3000 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
3001 | r = dce_v10_0_crtc_init(adev, i); | ||
3002 | if (r) | ||
3003 | return r; | ||
3004 | } | ||
3005 | |||
3006 | if (amdgpu_atombios_get_connector_info_from_object_table(adev)) | ||
3007 | amdgpu_print_display_setup(adev->ddev); | ||
3008 | else | ||
3009 | return -EINVAL; | ||
3010 | |||
3011 | /* setup afmt */ | ||
3012 | dce_v10_0_afmt_init(adev); | ||
3013 | |||
3014 | r = dce_v10_0_audio_init(adev); | ||
3015 | if (r) | ||
3016 | return r; | ||
3017 | |||
3018 | drm_kms_helper_poll_init(adev->ddev); | ||
3019 | |||
3020 | return r; | ||
3021 | } | ||
3022 | |||
3023 | static int dce_v10_0_sw_fini(struct amdgpu_device *adev) | ||
3024 | { | ||
3025 | kfree(adev->mode_info.bios_hardcoded_edid); | ||
3026 | |||
3027 | drm_kms_helper_poll_fini(adev->ddev); | ||
3028 | |||
3029 | dce_v10_0_audio_fini(adev); | ||
3030 | |||
3031 | dce_v10_0_afmt_fini(adev); | ||
3032 | |||
3033 | drm_mode_config_cleanup(adev->ddev); | ||
3034 | adev->mode_info.mode_config_initialized = false; | ||
3035 | |||
3036 | return 0; | ||
3037 | } | ||
3038 | |||
3039 | static int dce_v10_0_hw_init(struct amdgpu_device *adev) | ||
3040 | { | ||
3041 | int i; | ||
3042 | |||
3043 | dce_v10_0_init_golden_registers(adev); | ||
3044 | |||
3045 | /* init dig PHYs, disp eng pll */ | ||
3046 | amdgpu_atombios_encoder_init_dig(adev); | ||
3047 | amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); | ||
3048 | |||
3049 | /* initialize hpd */ | ||
3050 | dce_v10_0_hpd_init(adev); | ||
3051 | |||
3052 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
3053 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
3054 | } | ||
3055 | |||
3056 | return 0; | ||
3057 | } | ||
3058 | |||
3059 | static int dce_v10_0_hw_fini(struct amdgpu_device *adev) | ||
3060 | { | ||
3061 | int i; | ||
3062 | |||
3063 | dce_v10_0_hpd_fini(adev); | ||
3064 | |||
3065 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
3066 | dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
3067 | } | ||
3068 | |||
3069 | return 0; | ||
3070 | } | ||
3071 | |||
3072 | static int dce_v10_0_suspend(struct amdgpu_device *adev) | ||
3073 | { | ||
3074 | struct drm_connector *connector; | ||
3075 | |||
3076 | drm_kms_helper_poll_disable(adev->ddev); | ||
3077 | |||
3078 | /* turn off display hw */ | ||
3079 | list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { | ||
3080 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
3081 | } | ||
3082 | |||
3083 | amdgpu_atombios_scratch_regs_save(adev); | ||
3084 | |||
3085 | dce_v10_0_hpd_fini(adev); | ||
3086 | |||
3087 | return 0; | ||
3088 | } | ||
3089 | |||
3090 | static int dce_v10_0_resume(struct amdgpu_device *adev) | ||
3091 | { | ||
3092 | struct drm_connector *connector; | ||
3093 | |||
3094 | dce_v10_0_init_golden_registers(adev); | ||
3095 | |||
3096 | amdgpu_atombios_scratch_regs_restore(adev); | ||
3097 | |||
3098 | /* init dig PHYs, disp eng pll */ | ||
3099 | amdgpu_atombios_encoder_init_dig(adev); | ||
3100 | amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); | ||
3101 | /* turn on the BL */ | ||
3102 | if (adev->mode_info.bl_encoder) { | ||
3103 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | ||
3104 | adev->mode_info.bl_encoder); | ||
3105 | amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, | ||
3106 | bl_level); | ||
3107 | } | ||
3108 | |||
3109 | /* initialize hpd */ | ||
3110 | dce_v10_0_hpd_init(adev); | ||
3111 | |||
3112 | /* blat the mode back in */ | ||
3113 | drm_helper_resume_force_mode(adev->ddev); | ||
3114 | /* turn on display hw */ | ||
3115 | list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { | ||
3116 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
3117 | } | ||
3118 | |||
3119 | drm_kms_helper_poll_enable(adev->ddev); | ||
3120 | |||
3121 | return 0; | ||
3122 | } | ||
3123 | |||
3124 | static bool dce_v10_0_is_idle(struct amdgpu_device *adev) | ||
3125 | { | ||
3126 | /* XXX todo */ | ||
3127 | return true; | ||
3128 | } | ||
3129 | |||
3130 | static int dce_v10_0_wait_for_idle(struct amdgpu_device *adev) | ||
3131 | { | ||
3132 | /* XXX todo */ | ||
3133 | return 0; | ||
3134 | } | ||
3135 | |||
3136 | static void dce_v10_0_print_status(struct amdgpu_device *adev) | ||
3137 | { | ||
3138 | dev_info(adev->dev, "DCE 10.x registers\n"); | ||
3139 | /* XXX todo */ | ||
3140 | } | ||
3141 | |||
3142 | static int dce_v10_0_soft_reset(struct amdgpu_device *adev) | ||
3143 | { | ||
3144 | u32 srbm_soft_reset = 0, tmp; | ||
3145 | |||
3146 | if (dce_v10_0_is_display_hung(adev)) | ||
3147 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; | ||
3148 | |||
3149 | if (srbm_soft_reset) { | ||
3150 | dce_v10_0_print_status(adev); | ||
3151 | |||
3152 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3153 | tmp |= srbm_soft_reset; | ||
3154 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
3155 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3156 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3157 | |||
3158 | udelay(50); | ||
3159 | |||
3160 | tmp &= ~srbm_soft_reset; | ||
3161 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3162 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3163 | |||
3164 | /* Wait a little for things to settle down */ | ||
3165 | udelay(50); | ||
3166 | dce_v10_0_print_status(adev); | ||
3167 | } | ||
3168 | return 0; | ||
3169 | } | ||
3170 | |||
3171 | static void dce_v10_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
3172 | int crtc, | ||
3173 | enum amdgpu_interrupt_state state) | ||
3174 | { | ||
3175 | u32 lb_interrupt_mask; | ||
3176 | |||
3177 | if (crtc >= adev->mode_info.num_crtc) { | ||
3178 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3179 | return; | ||
3180 | } | ||
3181 | |||
3182 | switch (state) { | ||
3183 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3184 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3185 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3186 | VBLANK_INTERRUPT_MASK, 0); | ||
3187 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3188 | break; | ||
3189 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3190 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3191 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3192 | VBLANK_INTERRUPT_MASK, 1); | ||
3193 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3194 | break; | ||
3195 | default: | ||
3196 | break; | ||
3197 | } | ||
3198 | } | ||
3199 | |||
3200 | static void dce_v10_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, | ||
3201 | int crtc, | ||
3202 | enum amdgpu_interrupt_state state) | ||
3203 | { | ||
3204 | u32 lb_interrupt_mask; | ||
3205 | |||
3206 | if (crtc >= adev->mode_info.num_crtc) { | ||
3207 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3208 | return; | ||
3209 | } | ||
3210 | |||
3211 | switch (state) { | ||
3212 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3213 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3214 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3215 | VLINE_INTERRUPT_MASK, 0); | ||
3216 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3217 | break; | ||
3218 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3219 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3220 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3221 | VLINE_INTERRUPT_MASK, 1); | ||
3222 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3223 | break; | ||
3224 | default: | ||
3225 | break; | ||
3226 | } | ||
3227 | } | ||
3228 | |||
3229 | static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev, | ||
3230 | struct amdgpu_irq_src *source, | ||
3231 | unsigned hpd, | ||
3232 | enum amdgpu_interrupt_state state) | ||
3233 | { | ||
3234 | u32 tmp; | ||
3235 | |||
3236 | if (hpd >= adev->mode_info.num_hpd) { | ||
3237 | DRM_DEBUG("invalid hdp %d\n", hpd); | ||
3238 | return 0; | ||
3239 | } | ||
3240 | |||
3241 | switch (state) { | ||
3242 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3243 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3244 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | ||
3245 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3246 | break; | ||
3247 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3248 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3249 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); | ||
3250 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3251 | break; | ||
3252 | default: | ||
3253 | break; | ||
3254 | } | ||
3255 | |||
3256 | return 0; | ||
3257 | } | ||
3258 | |||
3259 | static int dce_v10_0_set_crtc_irq_state(struct amdgpu_device *adev, | ||
3260 | struct amdgpu_irq_src *source, | ||
3261 | unsigned type, | ||
3262 | enum amdgpu_interrupt_state state) | ||
3263 | { | ||
3264 | switch (type) { | ||
3265 | case AMDGPU_CRTC_IRQ_VBLANK1: | ||
3266 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 0, state); | ||
3267 | break; | ||
3268 | case AMDGPU_CRTC_IRQ_VBLANK2: | ||
3269 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 1, state); | ||
3270 | break; | ||
3271 | case AMDGPU_CRTC_IRQ_VBLANK3: | ||
3272 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 2, state); | ||
3273 | break; | ||
3274 | case AMDGPU_CRTC_IRQ_VBLANK4: | ||
3275 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 3, state); | ||
3276 | break; | ||
3277 | case AMDGPU_CRTC_IRQ_VBLANK5: | ||
3278 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 4, state); | ||
3279 | break; | ||
3280 | case AMDGPU_CRTC_IRQ_VBLANK6: | ||
3281 | dce_v10_0_set_crtc_vblank_interrupt_state(adev, 5, state); | ||
3282 | break; | ||
3283 | case AMDGPU_CRTC_IRQ_VLINE1: | ||
3284 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 0, state); | ||
3285 | break; | ||
3286 | case AMDGPU_CRTC_IRQ_VLINE2: | ||
3287 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 1, state); | ||
3288 | break; | ||
3289 | case AMDGPU_CRTC_IRQ_VLINE3: | ||
3290 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 2, state); | ||
3291 | break; | ||
3292 | case AMDGPU_CRTC_IRQ_VLINE4: | ||
3293 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 3, state); | ||
3294 | break; | ||
3295 | case AMDGPU_CRTC_IRQ_VLINE5: | ||
3296 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 4, state); | ||
3297 | break; | ||
3298 | case AMDGPU_CRTC_IRQ_VLINE6: | ||
3299 | dce_v10_0_set_crtc_vline_interrupt_state(adev, 5, state); | ||
3300 | break; | ||
3301 | default: | ||
3302 | break; | ||
3303 | } | ||
3304 | return 0; | ||
3305 | } | ||
3306 | |||
3307 | static int dce_v10_0_set_pageflip_irq_state(struct amdgpu_device *adev, | ||
3308 | struct amdgpu_irq_src *src, | ||
3309 | unsigned type, | ||
3310 | enum amdgpu_interrupt_state state) | ||
3311 | { | ||
3312 | u32 reg, reg_block; | ||
3313 | /* now deal with page flip IRQ */ | ||
3314 | switch (type) { | ||
3315 | case AMDGPU_PAGEFLIP_IRQ_D1: | ||
3316 | reg_block = CRTC0_REGISTER_OFFSET; | ||
3317 | break; | ||
3318 | case AMDGPU_PAGEFLIP_IRQ_D2: | ||
3319 | reg_block = CRTC1_REGISTER_OFFSET; | ||
3320 | break; | ||
3321 | case AMDGPU_PAGEFLIP_IRQ_D3: | ||
3322 | reg_block = CRTC2_REGISTER_OFFSET; | ||
3323 | break; | ||
3324 | case AMDGPU_PAGEFLIP_IRQ_D4: | ||
3325 | reg_block = CRTC3_REGISTER_OFFSET; | ||
3326 | break; | ||
3327 | case AMDGPU_PAGEFLIP_IRQ_D5: | ||
3328 | reg_block = CRTC4_REGISTER_OFFSET; | ||
3329 | break; | ||
3330 | case AMDGPU_PAGEFLIP_IRQ_D6: | ||
3331 | reg_block = CRTC5_REGISTER_OFFSET; | ||
3332 | break; | ||
3333 | default: | ||
3334 | DRM_ERROR("invalid pageflip crtc %d\n", type); | ||
3335 | return -EINVAL; | ||
3336 | } | ||
3337 | |||
3338 | reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); | ||
3339 | if (state == AMDGPU_IRQ_STATE_DISABLE) | ||
3340 | WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); | ||
3341 | else | ||
3342 | WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); | ||
3343 | |||
3344 | return 0; | ||
3345 | } | ||
3346 | |||
3347 | static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev, | ||
3348 | struct amdgpu_irq_src *source, | ||
3349 | struct amdgpu_iv_entry *entry) | ||
3350 | { | ||
3351 | int reg_block; | ||
3352 | unsigned long flags; | ||
3353 | unsigned crtc_id; | ||
3354 | struct amdgpu_crtc *amdgpu_crtc; | ||
3355 | struct amdgpu_flip_work *works; | ||
3356 | |||
3357 | crtc_id = (entry->src_id - 8) >> 1; | ||
3358 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | ||
3359 | |||
3360 | /* ack the interrupt */ | ||
3361 | switch(crtc_id){ | ||
3362 | case AMDGPU_PAGEFLIP_IRQ_D1: | ||
3363 | reg_block = CRTC0_REGISTER_OFFSET; | ||
3364 | break; | ||
3365 | case AMDGPU_PAGEFLIP_IRQ_D2: | ||
3366 | reg_block = CRTC1_REGISTER_OFFSET; | ||
3367 | break; | ||
3368 | case AMDGPU_PAGEFLIP_IRQ_D3: | ||
3369 | reg_block = CRTC2_REGISTER_OFFSET; | ||
3370 | break; | ||
3371 | case AMDGPU_PAGEFLIP_IRQ_D4: | ||
3372 | reg_block = CRTC3_REGISTER_OFFSET; | ||
3373 | break; | ||
3374 | case AMDGPU_PAGEFLIP_IRQ_D5: | ||
3375 | reg_block = CRTC4_REGISTER_OFFSET; | ||
3376 | break; | ||
3377 | case AMDGPU_PAGEFLIP_IRQ_D6: | ||
3378 | reg_block = CRTC5_REGISTER_OFFSET; | ||
3379 | break; | ||
3380 | default: | ||
3381 | DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); | ||
3382 | return -EINVAL; | ||
3383 | } | ||
3384 | |||
3385 | if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) | ||
3386 | WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); | ||
3387 | |||
3388 | /* IRQ could occur when in initial stage */ | ||
3389 | if (amdgpu_crtc == NULL) | ||
3390 | return 0; | ||
3391 | |||
3392 | spin_lock_irqsave(&adev->ddev->event_lock, flags); | ||
3393 | works = amdgpu_crtc->pflip_works; | ||
3394 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { | ||
3395 | DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " | ||
3396 | "AMDGPU_FLIP_SUBMITTED(%d)\n", | ||
3397 | amdgpu_crtc->pflip_status, | ||
3398 | AMDGPU_FLIP_SUBMITTED); | ||
3399 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | ||
3400 | return 0; | ||
3401 | } | ||
3402 | |||
3403 | /* page flip completed. clean up */ | ||
3404 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; | ||
3405 | amdgpu_crtc->pflip_works = NULL; | ||
3406 | |||
3407 | /* wakeup usersapce */ | ||
3408 | if (works->event) | ||
3409 | drm_send_vblank_event(adev->ddev, crtc_id, works->event); | ||
3410 | |||
3411 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | ||
3412 | |||
3413 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); | ||
3414 | amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); | ||
3415 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); | ||
3416 | |||
3417 | return 0; | ||
3418 | } | ||
3419 | |||
3420 | static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, | ||
3421 | int hpd) | ||
3422 | { | ||
3423 | u32 tmp; | ||
3424 | |||
3425 | if (hpd >= adev->mode_info.num_hpd) { | ||
3426 | DRM_DEBUG("invalid hdp %d\n", hpd); | ||
3427 | return; | ||
3428 | } | ||
3429 | |||
3430 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3431 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); | ||
3432 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3433 | } | ||
3434 | |||
3435 | static void dce_v10_0_crtc_vblank_int_ack(struct amdgpu_device *adev, | ||
3436 | int crtc) | ||
3437 | { | ||
3438 | u32 tmp; | ||
3439 | |||
3440 | if (crtc >= adev->mode_info.num_crtc) { | ||
3441 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3442 | return; | ||
3443 | } | ||
3444 | |||
3445 | tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); | ||
3446 | tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); | ||
3447 | WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); | ||
3448 | } | ||
3449 | |||
3450 | static void dce_v10_0_crtc_vline_int_ack(struct amdgpu_device *adev, | ||
3451 | int crtc) | ||
3452 | { | ||
3453 | u32 tmp; | ||
3454 | |||
3455 | if (crtc >= adev->mode_info.num_crtc) { | ||
3456 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3457 | return; | ||
3458 | } | ||
3459 | |||
3460 | tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); | ||
3461 | tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); | ||
3462 | WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); | ||
3463 | } | ||
3464 | |||
3465 | static int dce_v10_0_crtc_irq(struct amdgpu_device *adev, | ||
3466 | struct amdgpu_irq_src *source, | ||
3467 | struct amdgpu_iv_entry *entry) | ||
3468 | { | ||
3469 | unsigned crtc = entry->src_id - 1; | ||
3470 | uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); | ||
3471 | unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
3472 | |||
3473 | switch (entry->src_data) { | ||
3474 | case 0: /* vblank */ | ||
3475 | if (disp_int & interrupt_status_offsets[crtc].vblank) { | ||
3476 | dce_v10_0_crtc_vblank_int_ack(adev, crtc); | ||
3477 | if (amdgpu_irq_enabled(adev, source, irq_type)) { | ||
3478 | drm_handle_vblank(adev->ddev, crtc); | ||
3479 | } | ||
3480 | DRM_DEBUG("IH: D%d vblank\n", crtc + 1); | ||
3481 | } | ||
3482 | break; | ||
3483 | case 1: /* vline */ | ||
3484 | if (disp_int & interrupt_status_offsets[crtc].vline) { | ||
3485 | dce_v10_0_crtc_vline_int_ack(adev, crtc); | ||
3486 | DRM_DEBUG("IH: D%d vline\n", crtc + 1); | ||
3487 | } | ||
3488 | break; | ||
3489 | default: | ||
3490 | DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); | ||
3491 | break; | ||
3492 | } | ||
3493 | |||
3494 | return 0; | ||
3495 | } | ||
3496 | |||
3497 | static int dce_v10_0_hpd_irq(struct amdgpu_device *adev, | ||
3498 | struct amdgpu_irq_src *source, | ||
3499 | struct amdgpu_iv_entry *entry) | ||
3500 | { | ||
3501 | uint32_t disp_int, mask; | ||
3502 | unsigned hpd; | ||
3503 | |||
3504 | if (entry->src_data >= adev->mode_info.num_hpd) { | ||
3505 | DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); | ||
3506 | return 0; | ||
3507 | } | ||
3508 | |||
3509 | hpd = entry->src_data; | ||
3510 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | ||
3511 | mask = interrupt_status_offsets[hpd].hpd; | ||
3512 | |||
3513 | if (disp_int & mask) { | ||
3514 | dce_v10_0_hpd_int_ack(adev, hpd); | ||
3515 | schedule_work(&adev->hotplug_work); | ||
3516 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); | ||
3517 | } | ||
3518 | |||
3519 | return 0; | ||
3520 | } | ||
3521 | |||
3522 | static int dce_v10_0_set_clockgating_state(struct amdgpu_device *adev, | ||
3523 | enum amdgpu_clockgating_state state) | ||
3524 | { | ||
3525 | return 0; | ||
3526 | } | ||
3527 | |||
3528 | static int dce_v10_0_set_powergating_state(struct amdgpu_device *adev, | ||
3529 | enum amdgpu_powergating_state state) | ||
3530 | { | ||
3531 | return 0; | ||
3532 | } | ||
3533 | |||
3534 | const struct amdgpu_ip_funcs dce_v10_0_ip_funcs = { | ||
3535 | .early_init = dce_v10_0_early_init, | ||
3536 | .late_init = NULL, | ||
3537 | .sw_init = dce_v10_0_sw_init, | ||
3538 | .sw_fini = dce_v10_0_sw_fini, | ||
3539 | .hw_init = dce_v10_0_hw_init, | ||
3540 | .hw_fini = dce_v10_0_hw_fini, | ||
3541 | .suspend = dce_v10_0_suspend, | ||
3542 | .resume = dce_v10_0_resume, | ||
3543 | .is_idle = dce_v10_0_is_idle, | ||
3544 | .wait_for_idle = dce_v10_0_wait_for_idle, | ||
3545 | .soft_reset = dce_v10_0_soft_reset, | ||
3546 | .print_status = dce_v10_0_print_status, | ||
3547 | .set_clockgating_state = dce_v10_0_set_clockgating_state, | ||
3548 | .set_powergating_state = dce_v10_0_set_powergating_state, | ||
3549 | }; | ||
3550 | |||
3551 | static void | ||
3552 | dce_v10_0_encoder_mode_set(struct drm_encoder *encoder, | ||
3553 | struct drm_display_mode *mode, | ||
3554 | struct drm_display_mode *adjusted_mode) | ||
3555 | { | ||
3556 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3557 | |||
3558 | amdgpu_encoder->pixel_clock = adjusted_mode->clock; | ||
3559 | |||
3560 | /* need to call this here rather than in prepare() since we need some crtc info */ | ||
3561 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
3562 | |||
3563 | /* set scaler clears this on some chips */ | ||
3564 | dce_v10_0_set_interleave(encoder->crtc, mode); | ||
3565 | |||
3566 | if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | ||
3567 | dce_v10_0_afmt_enable(encoder, true); | ||
3568 | dce_v10_0_afmt_setmode(encoder, adjusted_mode); | ||
3569 | } | ||
3570 | } | ||
3571 | |||
3572 | static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder) | ||
3573 | { | ||
3574 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
3575 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3576 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
3577 | |||
3578 | if ((amdgpu_encoder->active_device & | ||
3579 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
3580 | (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != | ||
3581 | ENCODER_OBJECT_ID_NONE)) { | ||
3582 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
3583 | if (dig) { | ||
3584 | dig->dig_encoder = dce_v10_0_pick_dig_encoder(encoder); | ||
3585 | if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) | ||
3586 | dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; | ||
3587 | } | ||
3588 | } | ||
3589 | |||
3590 | amdgpu_atombios_scratch_regs_lock(adev, true); | ||
3591 | |||
3592 | if (connector) { | ||
3593 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
3594 | |||
3595 | /* select the clock/data port if it uses a router */ | ||
3596 | if (amdgpu_connector->router.cd_valid) | ||
3597 | amdgpu_i2c_router_select_cd_port(amdgpu_connector); | ||
3598 | |||
3599 | /* turn eDP panel on for mode set */ | ||
3600 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
3601 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
3602 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
3603 | } | ||
3604 | |||
3605 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
3606 | amdgpu_atombios_encoder_set_crtc_source(encoder); | ||
3607 | /* set up the FMT blocks */ | ||
3608 | dce_v10_0_program_fmt(encoder); | ||
3609 | } | ||
3610 | |||
3611 | static void dce_v10_0_encoder_commit(struct drm_encoder *encoder) | ||
3612 | { | ||
3613 | struct drm_device *dev = encoder->dev; | ||
3614 | struct amdgpu_device *adev = dev->dev_private; | ||
3615 | |||
3616 | /* need to call this here as we need the crtc set up */ | ||
3617 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
3618 | amdgpu_atombios_scratch_regs_lock(adev, false); | ||
3619 | } | ||
3620 | |||
3621 | static void dce_v10_0_encoder_disable(struct drm_encoder *encoder) | ||
3622 | { | ||
3623 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3624 | struct amdgpu_encoder_atom_dig *dig; | ||
3625 | |||
3626 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
3627 | |||
3628 | if (amdgpu_atombios_encoder_is_digital(encoder)) { | ||
3629 | if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
3630 | dce_v10_0_afmt_enable(encoder, false); | ||
3631 | dig = amdgpu_encoder->enc_priv; | ||
3632 | dig->dig_encoder = -1; | ||
3633 | } | ||
3634 | amdgpu_encoder->active_device = 0; | ||
3635 | } | ||
3636 | |||
3637 | /* these are handled by the primary encoders */ | ||
3638 | static void dce_v10_0_ext_prepare(struct drm_encoder *encoder) | ||
3639 | { | ||
3640 | |||
3641 | } | ||
3642 | |||
3643 | static void dce_v10_0_ext_commit(struct drm_encoder *encoder) | ||
3644 | { | ||
3645 | |||
3646 | } | ||
3647 | |||
3648 | static void | ||
3649 | dce_v10_0_ext_mode_set(struct drm_encoder *encoder, | ||
3650 | struct drm_display_mode *mode, | ||
3651 | struct drm_display_mode *adjusted_mode) | ||
3652 | { | ||
3653 | |||
3654 | } | ||
3655 | |||
3656 | static void dce_v10_0_ext_disable(struct drm_encoder *encoder) | ||
3657 | { | ||
3658 | |||
3659 | } | ||
3660 | |||
3661 | static void | ||
3662 | dce_v10_0_ext_dpms(struct drm_encoder *encoder, int mode) | ||
3663 | { | ||
3664 | |||
3665 | } | ||
3666 | |||
3667 | static bool dce_v10_0_ext_mode_fixup(struct drm_encoder *encoder, | ||
3668 | const struct drm_display_mode *mode, | ||
3669 | struct drm_display_mode *adjusted_mode) | ||
3670 | { | ||
3671 | return true; | ||
3672 | } | ||
3673 | |||
3674 | static const struct drm_encoder_helper_funcs dce_v10_0_ext_helper_funcs = { | ||
3675 | .dpms = dce_v10_0_ext_dpms, | ||
3676 | .mode_fixup = dce_v10_0_ext_mode_fixup, | ||
3677 | .prepare = dce_v10_0_ext_prepare, | ||
3678 | .mode_set = dce_v10_0_ext_mode_set, | ||
3679 | .commit = dce_v10_0_ext_commit, | ||
3680 | .disable = dce_v10_0_ext_disable, | ||
3681 | /* no detect for TMDS/LVDS yet */ | ||
3682 | }; | ||
3683 | |||
3684 | static const struct drm_encoder_helper_funcs dce_v10_0_dig_helper_funcs = { | ||
3685 | .dpms = amdgpu_atombios_encoder_dpms, | ||
3686 | .mode_fixup = amdgpu_atombios_encoder_mode_fixup, | ||
3687 | .prepare = dce_v10_0_encoder_prepare, | ||
3688 | .mode_set = dce_v10_0_encoder_mode_set, | ||
3689 | .commit = dce_v10_0_encoder_commit, | ||
3690 | .disable = dce_v10_0_encoder_disable, | ||
3691 | .detect = amdgpu_atombios_encoder_dig_detect, | ||
3692 | }; | ||
3693 | |||
3694 | static const struct drm_encoder_helper_funcs dce_v10_0_dac_helper_funcs = { | ||
3695 | .dpms = amdgpu_atombios_encoder_dpms, | ||
3696 | .mode_fixup = amdgpu_atombios_encoder_mode_fixup, | ||
3697 | .prepare = dce_v10_0_encoder_prepare, | ||
3698 | .mode_set = dce_v10_0_encoder_mode_set, | ||
3699 | .commit = dce_v10_0_encoder_commit, | ||
3700 | .detect = amdgpu_atombios_encoder_dac_detect, | ||
3701 | }; | ||
3702 | |||
3703 | static void dce_v10_0_encoder_destroy(struct drm_encoder *encoder) | ||
3704 | { | ||
3705 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3706 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
3707 | amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); | ||
3708 | kfree(amdgpu_encoder->enc_priv); | ||
3709 | drm_encoder_cleanup(encoder); | ||
3710 | kfree(amdgpu_encoder); | ||
3711 | } | ||
3712 | |||
3713 | static const struct drm_encoder_funcs dce_v10_0_encoder_funcs = { | ||
3714 | .destroy = dce_v10_0_encoder_destroy, | ||
3715 | }; | ||
3716 | |||
3717 | static void dce_v10_0_encoder_add(struct amdgpu_device *adev, | ||
3718 | uint32_t encoder_enum, | ||
3719 | uint32_t supported_device, | ||
3720 | u16 caps) | ||
3721 | { | ||
3722 | struct drm_device *dev = adev->ddev; | ||
3723 | struct drm_encoder *encoder; | ||
3724 | struct amdgpu_encoder *amdgpu_encoder; | ||
3725 | |||
3726 | /* see if we already added it */ | ||
3727 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
3728 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3729 | if (amdgpu_encoder->encoder_enum == encoder_enum) { | ||
3730 | amdgpu_encoder->devices |= supported_device; | ||
3731 | return; | ||
3732 | } | ||
3733 | |||
3734 | } | ||
3735 | |||
3736 | /* add a new one */ | ||
3737 | amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); | ||
3738 | if (!amdgpu_encoder) | ||
3739 | return; | ||
3740 | |||
3741 | encoder = &amdgpu_encoder->base; | ||
3742 | switch (adev->mode_info.num_crtc) { | ||
3743 | case 1: | ||
3744 | encoder->possible_crtcs = 0x1; | ||
3745 | break; | ||
3746 | case 2: | ||
3747 | default: | ||
3748 | encoder->possible_crtcs = 0x3; | ||
3749 | break; | ||
3750 | case 4: | ||
3751 | encoder->possible_crtcs = 0xf; | ||
3752 | break; | ||
3753 | case 6: | ||
3754 | encoder->possible_crtcs = 0x3f; | ||
3755 | break; | ||
3756 | } | ||
3757 | |||
3758 | amdgpu_encoder->enc_priv = NULL; | ||
3759 | |||
3760 | amdgpu_encoder->encoder_enum = encoder_enum; | ||
3761 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
3762 | amdgpu_encoder->devices = supported_device; | ||
3763 | amdgpu_encoder->rmx_type = RMX_OFF; | ||
3764 | amdgpu_encoder->underscan_type = UNDERSCAN_OFF; | ||
3765 | amdgpu_encoder->is_ext_encoder = false; | ||
3766 | amdgpu_encoder->caps = caps; | ||
3767 | |||
3768 | switch (amdgpu_encoder->encoder_id) { | ||
3769 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
3770 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
3771 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3772 | DRM_MODE_ENCODER_DAC); | ||
3773 | drm_encoder_helper_add(encoder, &dce_v10_0_dac_helper_funcs); | ||
3774 | break; | ||
3775 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
3776 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
3777 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
3778 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
3779 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
3780 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
3781 | amdgpu_encoder->rmx_type = RMX_FULL; | ||
3782 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3783 | DRM_MODE_ENCODER_LVDS); | ||
3784 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); | ||
3785 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
3786 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3787 | DRM_MODE_ENCODER_DAC); | ||
3788 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); | ||
3789 | } else { | ||
3790 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3791 | DRM_MODE_ENCODER_TMDS); | ||
3792 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); | ||
3793 | } | ||
3794 | drm_encoder_helper_add(encoder, &dce_v10_0_dig_helper_funcs); | ||
3795 | break; | ||
3796 | case ENCODER_OBJECT_ID_SI170B: | ||
3797 | case ENCODER_OBJECT_ID_CH7303: | ||
3798 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
3799 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
3800 | case ENCODER_OBJECT_ID_TITFP513: | ||
3801 | case ENCODER_OBJECT_ID_VT1623: | ||
3802 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
3803 | case ENCODER_OBJECT_ID_TRAVIS: | ||
3804 | case ENCODER_OBJECT_ID_NUTMEG: | ||
3805 | /* these are handled by the primary encoders */ | ||
3806 | amdgpu_encoder->is_ext_encoder = true; | ||
3807 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
3808 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3809 | DRM_MODE_ENCODER_LVDS); | ||
3810 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
3811 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3812 | DRM_MODE_ENCODER_DAC); | ||
3813 | else | ||
3814 | drm_encoder_init(dev, encoder, &dce_v10_0_encoder_funcs, | ||
3815 | DRM_MODE_ENCODER_TMDS); | ||
3816 | drm_encoder_helper_add(encoder, &dce_v10_0_ext_helper_funcs); | ||
3817 | break; | ||
3818 | } | ||
3819 | } | ||
3820 | |||
3821 | static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { | ||
3822 | .set_vga_render_state = &dce_v10_0_set_vga_render_state, | ||
3823 | .bandwidth_update = &dce_v10_0_bandwidth_update, | ||
3824 | .vblank_get_counter = &dce_v10_0_vblank_get_counter, | ||
3825 | .vblank_wait = &dce_v10_0_vblank_wait, | ||
3826 | .is_display_hung = &dce_v10_0_is_display_hung, | ||
3827 | .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, | ||
3828 | .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, | ||
3829 | .hpd_sense = &dce_v10_0_hpd_sense, | ||
3830 | .hpd_set_polarity = &dce_v10_0_hpd_set_polarity, | ||
3831 | .hpd_get_gpio_reg = &dce_v10_0_hpd_get_gpio_reg, | ||
3832 | .page_flip = &dce_v10_0_page_flip, | ||
3833 | .page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos, | ||
3834 | .add_encoder = &dce_v10_0_encoder_add, | ||
3835 | .add_connector = &amdgpu_connector_add, | ||
3836 | .stop_mc_access = &dce_v10_0_stop_mc_access, | ||
3837 | .resume_mc_access = &dce_v10_0_resume_mc_access, | ||
3838 | }; | ||
3839 | |||
3840 | static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) | ||
3841 | { | ||
3842 | if (adev->mode_info.funcs == NULL) | ||
3843 | adev->mode_info.funcs = &dce_v10_0_display_funcs; | ||
3844 | } | ||
3845 | |||
3846 | static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = { | ||
3847 | .set = dce_v10_0_set_crtc_irq_state, | ||
3848 | .process = dce_v10_0_crtc_irq, | ||
3849 | }; | ||
3850 | |||
3851 | static const struct amdgpu_irq_src_funcs dce_v10_0_pageflip_irq_funcs = { | ||
3852 | .set = dce_v10_0_set_pageflip_irq_state, | ||
3853 | .process = dce_v10_0_pageflip_irq, | ||
3854 | }; | ||
3855 | |||
3856 | static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = { | ||
3857 | .set = dce_v10_0_set_hpd_irq_state, | ||
3858 | .process = dce_v10_0_hpd_irq, | ||
3859 | }; | ||
3860 | |||
3861 | static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev) | ||
3862 | { | ||
3863 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; | ||
3864 | adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs; | ||
3865 | |||
3866 | adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; | ||
3867 | adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs; | ||
3868 | |||
3869 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | ||
3870 | adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs; | ||
3871 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h new file mode 100644 index 000000000000..72ca20d1793c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __DCE_V10_0_H__ | ||
25 | #define __DCE_V10_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs dce_v10_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c new file mode 100644 index 000000000000..55fef15a4fcf --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -0,0 +1,3871 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "drmP.h" | ||
24 | #include "amdgpu.h" | ||
25 | #include "amdgpu_pm.h" | ||
26 | #include "amdgpu_i2c.h" | ||
27 | #include "vid.h" | ||
28 | #include "atom.h" | ||
29 | #include "amdgpu_atombios.h" | ||
30 | #include "atombios_crtc.h" | ||
31 | #include "atombios_encoders.h" | ||
32 | #include "amdgpu_pll.h" | ||
33 | #include "amdgpu_connectors.h" | ||
34 | |||
35 | #include "dce/dce_11_0_d.h" | ||
36 | #include "dce/dce_11_0_sh_mask.h" | ||
37 | #include "dce/dce_11_0_enum.h" | ||
38 | #include "oss/oss_3_0_d.h" | ||
39 | #include "oss/oss_3_0_sh_mask.h" | ||
40 | #include "gmc/gmc_8_1_d.h" | ||
41 | #include "gmc/gmc_8_1_sh_mask.h" | ||
42 | |||
43 | static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); | ||
44 | static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); | ||
45 | |||
46 | static const u32 crtc_offsets[] = | ||
47 | { | ||
48 | CRTC0_REGISTER_OFFSET, | ||
49 | CRTC1_REGISTER_OFFSET, | ||
50 | CRTC2_REGISTER_OFFSET, | ||
51 | CRTC3_REGISTER_OFFSET, | ||
52 | CRTC4_REGISTER_OFFSET, | ||
53 | CRTC5_REGISTER_OFFSET, | ||
54 | CRTC6_REGISTER_OFFSET | ||
55 | }; | ||
56 | |||
57 | static const u32 hpd_offsets[] = | ||
58 | { | ||
59 | HPD0_REGISTER_OFFSET, | ||
60 | HPD1_REGISTER_OFFSET, | ||
61 | HPD2_REGISTER_OFFSET, | ||
62 | HPD3_REGISTER_OFFSET, | ||
63 | HPD4_REGISTER_OFFSET, | ||
64 | HPD5_REGISTER_OFFSET | ||
65 | }; | ||
66 | |||
67 | static const uint32_t dig_offsets[] = { | ||
68 | DIG0_REGISTER_OFFSET, | ||
69 | DIG1_REGISTER_OFFSET, | ||
70 | DIG2_REGISTER_OFFSET, | ||
71 | DIG3_REGISTER_OFFSET, | ||
72 | DIG4_REGISTER_OFFSET, | ||
73 | DIG5_REGISTER_OFFSET, | ||
74 | DIG6_REGISTER_OFFSET, | ||
75 | DIG7_REGISTER_OFFSET, | ||
76 | DIG8_REGISTER_OFFSET | ||
77 | }; | ||
78 | |||
79 | static const struct { | ||
80 | uint32_t reg; | ||
81 | uint32_t vblank; | ||
82 | uint32_t vline; | ||
83 | uint32_t hpd; | ||
84 | |||
85 | } interrupt_status_offsets[] = { { | ||
86 | .reg = mmDISP_INTERRUPT_STATUS, | ||
87 | .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, | ||
88 | .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, | ||
89 | .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK | ||
90 | }, { | ||
91 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, | ||
92 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, | ||
93 | .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, | ||
94 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK | ||
95 | }, { | ||
96 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, | ||
97 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, | ||
98 | .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, | ||
99 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK | ||
100 | }, { | ||
101 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, | ||
102 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, | ||
103 | .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, | ||
104 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK | ||
105 | }, { | ||
106 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, | ||
107 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, | ||
108 | .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, | ||
109 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK | ||
110 | }, { | ||
111 | .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, | ||
112 | .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, | ||
113 | .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, | ||
114 | .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK | ||
115 | } }; | ||
116 | |||
117 | static const u32 cz_golden_settings_a11[] = | ||
118 | { | ||
119 | mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, | ||
120 | mmFBC_MISC, 0x1f311fff, 0x14300000, | ||
121 | }; | ||
122 | |||
123 | static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) | ||
124 | { | ||
125 | switch (adev->asic_type) { | ||
126 | case CHIP_CARRIZO: | ||
127 | amdgpu_program_register_sequence(adev, | ||
128 | cz_golden_settings_a11, | ||
129 | (const u32)ARRAY_SIZE(cz_golden_settings_a11)); | ||
130 | break; | ||
131 | default: | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, | ||
137 | u32 block_offset, u32 reg) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | u32 r; | ||
141 | |||
142 | spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); | ||
143 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | ||
144 | r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); | ||
145 | spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); | ||
146 | |||
147 | return r; | ||
148 | } | ||
149 | |||
150 | static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, | ||
151 | u32 block_offset, u32 reg, u32 v) | ||
152 | { | ||
153 | unsigned long flags; | ||
154 | |||
155 | spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); | ||
156 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); | ||
157 | WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); | ||
158 | spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); | ||
159 | } | ||
160 | |||
161 | static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc) | ||
162 | { | ||
163 | if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & | ||
164 | CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK) | ||
165 | return true; | ||
166 | else | ||
167 | return false; | ||
168 | } | ||
169 | |||
170 | static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc) | ||
171 | { | ||
172 | u32 pos1, pos2; | ||
173 | |||
174 | pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
175 | pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
176 | |||
177 | if (pos1 != pos2) | ||
178 | return true; | ||
179 | else | ||
180 | return false; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * dce_v11_0_vblank_wait - vblank wait asic callback. | ||
185 | * | ||
186 | * @adev: amdgpu_device pointer | ||
187 | * @crtc: crtc to wait for vblank on | ||
188 | * | ||
189 | * Wait for vblank on the requested crtc (evergreen+). | ||
190 | */ | ||
191 | static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc) | ||
192 | { | ||
193 | unsigned i = 0; | ||
194 | |||
195 | if (crtc >= adev->mode_info.num_crtc) | ||
196 | return; | ||
197 | |||
198 | if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK)) | ||
199 | return; | ||
200 | |||
201 | /* depending on when we hit vblank, we may be close to active; if so, | ||
202 | * wait for another frame. | ||
203 | */ | ||
204 | while (dce_v11_0_is_in_vblank(adev, crtc)) { | ||
205 | if (i++ % 100 == 0) { | ||
206 | if (!dce_v11_0_is_counter_moving(adev, crtc)) | ||
207 | break; | ||
208 | } | ||
209 | } | ||
210 | |||
211 | while (!dce_v11_0_is_in_vblank(adev, crtc)) { | ||
212 | if (i++ % 100 == 0) { | ||
213 | if (!dce_v11_0_is_counter_moving(adev, crtc)) | ||
214 | break; | ||
215 | } | ||
216 | } | ||
217 | } | ||
218 | |||
219 | static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) | ||
220 | { | ||
221 | if (crtc >= adev->mode_info.num_crtc) | ||
222 | return 0; | ||
223 | else | ||
224 | return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * dce_v11_0_page_flip - pageflip callback. | ||
229 | * | ||
230 | * @adev: amdgpu_device pointer | ||
231 | * @crtc_id: crtc to cleanup pageflip on | ||
232 | * @crtc_base: new address of the crtc (GPU MC address) | ||
233 | * | ||
234 | * Does the actual pageflip (evergreen+). | ||
235 | * During vblank we take the crtc lock and wait for the update_pending | ||
236 | * bit to go high, when it does, we release the lock, and allow the | ||
237 | * double buffered update to take place. | ||
238 | * Returns the current update pending status. | ||
239 | */ | ||
240 | static void dce_v11_0_page_flip(struct amdgpu_device *adev, | ||
241 | int crtc_id, u64 crtc_base) | ||
242 | { | ||
243 | struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | ||
244 | u32 tmp = RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset); | ||
245 | int i; | ||
246 | |||
247 | /* Lock the graphics update lock */ | ||
248 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); | ||
249 | WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); | ||
250 | |||
251 | /* update the scanout addresses */ | ||
252 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
253 | upper_32_bits(crtc_base)); | ||
254 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
255 | lower_32_bits(crtc_base)); | ||
256 | |||
257 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
258 | upper_32_bits(crtc_base)); | ||
259 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
260 | lower_32_bits(crtc_base)); | ||
261 | |||
262 | /* Wait for update_pending to go high. */ | ||
263 | for (i = 0; i < adev->usec_timeout; i++) { | ||
264 | if (RREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset) & | ||
265 | GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) | ||
266 | break; | ||
267 | udelay(1); | ||
268 | } | ||
269 | DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); | ||
270 | |||
271 | /* Unlock the lock, so double-buffering can take place inside vblank */ | ||
272 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); | ||
273 | WREG32(mmGRPH_UPDATE + amdgpu_crtc->crtc_offset, tmp); | ||
274 | } | ||
275 | |||
276 | static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, | ||
277 | u32 *vbl, u32 *position) | ||
278 | { | ||
279 | if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) | ||
280 | return -EINVAL; | ||
281 | |||
282 | *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); | ||
283 | *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | /** | ||
289 | * dce_v11_0_hpd_sense - hpd sense callback. | ||
290 | * | ||
291 | * @adev: amdgpu_device pointer | ||
292 | * @hpd: hpd (hotplug detect) pin | ||
293 | * | ||
294 | * Checks if a digital monitor is connected (evergreen+). | ||
295 | * Returns true if connected, false if not connected. | ||
296 | */ | ||
297 | static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, | ||
298 | enum amdgpu_hpd_id hpd) | ||
299 | { | ||
300 | int idx; | ||
301 | bool connected = false; | ||
302 | |||
303 | switch (hpd) { | ||
304 | case AMDGPU_HPD_1: | ||
305 | idx = 0; | ||
306 | break; | ||
307 | case AMDGPU_HPD_2: | ||
308 | idx = 1; | ||
309 | break; | ||
310 | case AMDGPU_HPD_3: | ||
311 | idx = 2; | ||
312 | break; | ||
313 | case AMDGPU_HPD_4: | ||
314 | idx = 3; | ||
315 | break; | ||
316 | case AMDGPU_HPD_5: | ||
317 | idx = 4; | ||
318 | break; | ||
319 | case AMDGPU_HPD_6: | ||
320 | idx = 5; | ||
321 | break; | ||
322 | default: | ||
323 | return connected; | ||
324 | } | ||
325 | |||
326 | if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[idx]) & | ||
327 | DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) | ||
328 | connected = true; | ||
329 | |||
330 | return connected; | ||
331 | } | ||
332 | |||
333 | /** | ||
334 | * dce_v11_0_hpd_set_polarity - hpd set polarity callback. | ||
335 | * | ||
336 | * @adev: amdgpu_device pointer | ||
337 | * @hpd: hpd (hotplug detect) pin | ||
338 | * | ||
339 | * Set the polarity of the hpd pin (evergreen+). | ||
340 | */ | ||
341 | static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, | ||
342 | enum amdgpu_hpd_id hpd) | ||
343 | { | ||
344 | u32 tmp; | ||
345 | bool connected = dce_v11_0_hpd_sense(adev, hpd); | ||
346 | int idx; | ||
347 | |||
348 | switch (hpd) { | ||
349 | case AMDGPU_HPD_1: | ||
350 | idx = 0; | ||
351 | break; | ||
352 | case AMDGPU_HPD_2: | ||
353 | idx = 1; | ||
354 | break; | ||
355 | case AMDGPU_HPD_3: | ||
356 | idx = 2; | ||
357 | break; | ||
358 | case AMDGPU_HPD_4: | ||
359 | idx = 3; | ||
360 | break; | ||
361 | case AMDGPU_HPD_5: | ||
362 | idx = 4; | ||
363 | break; | ||
364 | case AMDGPU_HPD_6: | ||
365 | idx = 5; | ||
366 | break; | ||
367 | default: | ||
368 | return; | ||
369 | } | ||
370 | |||
371 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx]); | ||
372 | if (connected) | ||
373 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); | ||
374 | else | ||
375 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); | ||
376 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[idx], tmp); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * dce_v11_0_hpd_init - hpd setup callback. | ||
381 | * | ||
382 | * @adev: amdgpu_device pointer | ||
383 | * | ||
384 | * Setup the hpd pins used by the card (evergreen+). | ||
385 | * Enable the pin, set the polarity, and enable the hpd interrupts. | ||
386 | */ | ||
387 | static void dce_v11_0_hpd_init(struct amdgpu_device *adev) | ||
388 | { | ||
389 | struct drm_device *dev = adev->ddev; | ||
390 | struct drm_connector *connector; | ||
391 | u32 tmp; | ||
392 | int idx; | ||
393 | |||
394 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
395 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
396 | |||
397 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || | ||
398 | connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { | ||
399 | /* don't try to enable hpd on eDP or LVDS avoid breaking the | ||
400 | * aux dp channel on imac and help (but not completely fix) | ||
401 | * https://bugzilla.redhat.com/show_bug.cgi?id=726143 | ||
402 | * also avoid interrupt storms during dpms. | ||
403 | */ | ||
404 | continue; | ||
405 | } | ||
406 | |||
407 | switch (amdgpu_connector->hpd.hpd) { | ||
408 | case AMDGPU_HPD_1: | ||
409 | idx = 0; | ||
410 | break; | ||
411 | case AMDGPU_HPD_2: | ||
412 | idx = 1; | ||
413 | break; | ||
414 | case AMDGPU_HPD_3: | ||
415 | idx = 2; | ||
416 | break; | ||
417 | case AMDGPU_HPD_4: | ||
418 | idx = 3; | ||
419 | break; | ||
420 | case AMDGPU_HPD_5: | ||
421 | idx = 4; | ||
422 | break; | ||
423 | case AMDGPU_HPD_6: | ||
424 | idx = 5; | ||
425 | break; | ||
426 | default: | ||
427 | continue; | ||
428 | } | ||
429 | |||
430 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | ||
431 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); | ||
432 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | ||
433 | |||
434 | tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx]); | ||
435 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | ||
436 | DC_HPD_CONNECT_INT_DELAY, | ||
437 | AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); | ||
438 | tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, | ||
439 | DC_HPD_DISCONNECT_INT_DELAY, | ||
440 | AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); | ||
441 | WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[idx], tmp); | ||
442 | |||
443 | dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); | ||
444 | amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | ||
445 | } | ||
446 | } | ||
447 | |||
448 | /** | ||
449 | * dce_v11_0_hpd_fini - hpd tear down callback. | ||
450 | * | ||
451 | * @adev: amdgpu_device pointer | ||
452 | * | ||
453 | * Tear down the hpd pins used by the card (evergreen+). | ||
454 | * Disable the hpd interrupts. | ||
455 | */ | ||
456 | static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) | ||
457 | { | ||
458 | struct drm_device *dev = adev->ddev; | ||
459 | struct drm_connector *connector; | ||
460 | u32 tmp; | ||
461 | int idx; | ||
462 | |||
463 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
464 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
465 | |||
466 | switch (amdgpu_connector->hpd.hpd) { | ||
467 | case AMDGPU_HPD_1: | ||
468 | idx = 0; | ||
469 | break; | ||
470 | case AMDGPU_HPD_2: | ||
471 | idx = 1; | ||
472 | break; | ||
473 | case AMDGPU_HPD_3: | ||
474 | idx = 2; | ||
475 | break; | ||
476 | case AMDGPU_HPD_4: | ||
477 | idx = 3; | ||
478 | break; | ||
479 | case AMDGPU_HPD_5: | ||
480 | idx = 4; | ||
481 | break; | ||
482 | case AMDGPU_HPD_6: | ||
483 | idx = 5; | ||
484 | break; | ||
485 | default: | ||
486 | continue; | ||
487 | } | ||
488 | |||
489 | tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[idx]); | ||
490 | tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); | ||
491 | WREG32(mmDC_HPD_CONTROL + hpd_offsets[idx], tmp); | ||
492 | |||
493 | amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); | ||
494 | } | ||
495 | } | ||
496 | |||
497 | static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) | ||
498 | { | ||
499 | return mmDC_GPIO_HPD_A; | ||
500 | } | ||
501 | |||
502 | static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) | ||
503 | { | ||
504 | u32 crtc_hung = 0; | ||
505 | u32 crtc_status[6]; | ||
506 | u32 i, j, tmp; | ||
507 | |||
508 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
509 | tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); | ||
510 | if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { | ||
511 | crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); | ||
512 | crtc_hung |= (1 << i); | ||
513 | } | ||
514 | } | ||
515 | |||
516 | for (j = 0; j < 10; j++) { | ||
517 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
518 | if (crtc_hung & (1 << i)) { | ||
519 | tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); | ||
520 | if (tmp != crtc_status[i]) | ||
521 | crtc_hung &= ~(1 << i); | ||
522 | } | ||
523 | } | ||
524 | if (crtc_hung == 0) | ||
525 | return false; | ||
526 | udelay(100); | ||
527 | } | ||
528 | |||
529 | return true; | ||
530 | } | ||
531 | |||
532 | static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev, | ||
533 | struct amdgpu_mode_mc_save *save) | ||
534 | { | ||
535 | u32 crtc_enabled, tmp; | ||
536 | int i; | ||
537 | |||
538 | save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL); | ||
539 | save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL); | ||
540 | |||
541 | /* disable VGA render */ | ||
542 | tmp = RREG32(mmVGA_RENDER_CONTROL); | ||
543 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | ||
544 | WREG32(mmVGA_RENDER_CONTROL, tmp); | ||
545 | |||
546 | /* blank the display controllers */ | ||
547 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
548 | crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), | ||
549 | CRTC_CONTROL, CRTC_MASTER_EN); | ||
550 | if (crtc_enabled) { | ||
551 | #if 0 | ||
552 | u32 frame_count; | ||
553 | int j; | ||
554 | |||
555 | save->crtc_enabled[i] = true; | ||
556 | tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
557 | if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { | ||
558 | amdgpu_display_vblank_wait(adev, i); | ||
559 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
560 | tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); | ||
561 | WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
562 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
563 | } | ||
564 | /* wait for the next frame */ | ||
565 | frame_count = amdgpu_display_vblank_get_counter(adev, i); | ||
566 | for (j = 0; j < adev->usec_timeout; j++) { | ||
567 | if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) | ||
568 | break; | ||
569 | udelay(1); | ||
570 | } | ||
571 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
572 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { | ||
573 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); | ||
574 | WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); | ||
575 | } | ||
576 | tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); | ||
577 | if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { | ||
578 | tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); | ||
579 | WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); | ||
580 | } | ||
581 | #else | ||
582 | /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ | ||
583 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
584 | tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); | ||
585 | tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); | ||
586 | WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); | ||
587 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
588 | save->crtc_enabled[i] = false; | ||
589 | /* ***** */ | ||
590 | #endif | ||
591 | } else { | ||
592 | save->crtc_enabled[i] = false; | ||
593 | } | ||
594 | } | ||
595 | } | ||
596 | |||
597 | static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev, | ||
598 | struct amdgpu_mode_mc_save *save) | ||
599 | { | ||
600 | u32 tmp, frame_count; | ||
601 | int i, j; | ||
602 | |||
603 | /* update crtc base addresses */ | ||
604 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
605 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
606 | upper_32_bits(adev->mc.vram_start)); | ||
607 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
608 | upper_32_bits(adev->mc.vram_start)); | ||
609 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
610 | (u32)adev->mc.vram_start); | ||
611 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
612 | (u32)adev->mc.vram_start); | ||
613 | |||
614 | if (save->crtc_enabled[i]) { | ||
615 | tmp = RREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i]); | ||
616 | if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { | ||
617 | tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); | ||
618 | WREG32(mmCRTC_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); | ||
619 | } | ||
620 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
621 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { | ||
622 | tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); | ||
623 | WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); | ||
624 | } | ||
625 | tmp = RREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i]); | ||
626 | if (REG_GET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { | ||
627 | tmp = REG_SET_FIELD(tmp, CRTC_MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); | ||
628 | WREG32(mmCRTC_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); | ||
629 | } | ||
630 | for (j = 0; j < adev->usec_timeout; j++) { | ||
631 | tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); | ||
632 | if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) | ||
633 | break; | ||
634 | udelay(1); | ||
635 | } | ||
636 | tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
637 | tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); | ||
638 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); | ||
639 | WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
640 | WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); | ||
641 | /* wait for the next frame */ | ||
642 | frame_count = amdgpu_display_vblank_get_counter(adev, i); | ||
643 | for (j = 0; j < adev->usec_timeout; j++) { | ||
644 | if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) | ||
645 | break; | ||
646 | udelay(1); | ||
647 | } | ||
648 | } | ||
649 | } | ||
650 | |||
651 | WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); | ||
652 | WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start)); | ||
653 | |||
654 | /* Unlock vga access */ | ||
655 | WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control); | ||
656 | mdelay(1); | ||
657 | WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control); | ||
658 | } | ||
659 | |||
660 | static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, | ||
661 | bool render) | ||
662 | { | ||
663 | u32 tmp; | ||
664 | |||
665 | /* Lockout access through VGA aperture*/ | ||
666 | tmp = RREG32(mmVGA_HDP_CONTROL); | ||
667 | if (render) | ||
668 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); | ||
669 | else | ||
670 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); | ||
671 | WREG32(mmVGA_HDP_CONTROL, tmp); | ||
672 | |||
673 | /* disable VGA render */ | ||
674 | tmp = RREG32(mmVGA_RENDER_CONTROL); | ||
675 | if (render) | ||
676 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); | ||
677 | else | ||
678 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); | ||
679 | WREG32(mmVGA_RENDER_CONTROL, tmp); | ||
680 | } | ||
681 | |||
682 | static void dce_v11_0_program_fmt(struct drm_encoder *encoder) | ||
683 | { | ||
684 | struct drm_device *dev = encoder->dev; | ||
685 | struct amdgpu_device *adev = dev->dev_private; | ||
686 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
687 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
688 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
689 | int bpc = 0; | ||
690 | u32 tmp = 0; | ||
691 | enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; | ||
692 | |||
693 | if (connector) { | ||
694 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
695 | bpc = amdgpu_connector_get_monitor_bpc(connector); | ||
696 | dither = amdgpu_connector->dither; | ||
697 | } | ||
698 | |||
699 | /* LVDS/eDP FMT is set up by atom */ | ||
700 | if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) | ||
701 | return; | ||
702 | |||
703 | /* not needed for analog */ | ||
704 | if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || | ||
705 | (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) | ||
706 | return; | ||
707 | |||
708 | if (bpc == 0) | ||
709 | return; | ||
710 | |||
711 | switch (bpc) { | ||
712 | case 6: | ||
713 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
714 | /* XXX sort out optimal dither settings */ | ||
715 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
716 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
717 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
718 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); | ||
719 | } else { | ||
720 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
721 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); | ||
722 | } | ||
723 | break; | ||
724 | case 8: | ||
725 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
726 | /* XXX sort out optimal dither settings */ | ||
727 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
728 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
729 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); | ||
730 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
731 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); | ||
732 | } else { | ||
733 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
734 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); | ||
735 | } | ||
736 | break; | ||
737 | case 10: | ||
738 | if (dither == AMDGPU_FMT_DITHER_ENABLE) { | ||
739 | /* XXX sort out optimal dither settings */ | ||
740 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); | ||
741 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); | ||
742 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); | ||
743 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); | ||
744 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); | ||
745 | } else { | ||
746 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); | ||
747 | tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); | ||
748 | } | ||
749 | break; | ||
750 | default: | ||
751 | /* not needed */ | ||
752 | break; | ||
753 | } | ||
754 | |||
755 | WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
756 | } | ||
757 | |||
758 | |||
759 | /* display watermark setup */ | ||
760 | /** | ||
761 | * dce_v11_0_line_buffer_adjust - Set up the line buffer | ||
762 | * | ||
763 | * @adev: amdgpu_device pointer | ||
764 | * @amdgpu_crtc: the selected display controller | ||
765 | * @mode: the current display mode on the selected display | ||
766 | * controller | ||
767 | * | ||
768 | * Setup up the line buffer allocation for | ||
769 | * the selected display controller (CIK). | ||
770 | * Returns the line buffer size in pixels. | ||
771 | */ | ||
772 | static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, | ||
773 | struct amdgpu_crtc *amdgpu_crtc, | ||
774 | struct drm_display_mode *mode) | ||
775 | { | ||
776 | u32 tmp, buffer_alloc, i, mem_cfg; | ||
777 | u32 pipe_offset = amdgpu_crtc->crtc_id; | ||
778 | /* | ||
779 | * Line Buffer Setup | ||
780 | * There are 6 line buffers, one for each display controllers. | ||
781 | * There are 3 partitions per LB. Select the number of partitions | ||
782 | * to enable based on the display width. For display widths larger | ||
783 | * than 4096, you need use to use 2 display controllers and combine | ||
784 | * them using the stereo blender. | ||
785 | */ | ||
786 | if (amdgpu_crtc->base.enabled && mode) { | ||
787 | if (mode->crtc_hdisplay < 1920) { | ||
788 | mem_cfg = 1; | ||
789 | buffer_alloc = 2; | ||
790 | } else if (mode->crtc_hdisplay < 2560) { | ||
791 | mem_cfg = 2; | ||
792 | buffer_alloc = 2; | ||
793 | } else if (mode->crtc_hdisplay < 4096) { | ||
794 | mem_cfg = 0; | ||
795 | buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; | ||
796 | } else { | ||
797 | DRM_DEBUG_KMS("Mode too big for LB!\n"); | ||
798 | mem_cfg = 0; | ||
799 | buffer_alloc = (adev->flags & AMDGPU_IS_APU) ? 2 : 4; | ||
800 | } | ||
801 | } else { | ||
802 | mem_cfg = 1; | ||
803 | buffer_alloc = 0; | ||
804 | } | ||
805 | |||
806 | tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); | ||
807 | tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); | ||
808 | WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); | ||
809 | |||
810 | tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); | ||
811 | tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); | ||
812 | WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); | ||
813 | |||
814 | for (i = 0; i < adev->usec_timeout; i++) { | ||
815 | tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); | ||
816 | if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) | ||
817 | break; | ||
818 | udelay(1); | ||
819 | } | ||
820 | |||
821 | if (amdgpu_crtc->base.enabled && mode) { | ||
822 | switch (mem_cfg) { | ||
823 | case 0: | ||
824 | default: | ||
825 | return 4096 * 2; | ||
826 | case 1: | ||
827 | return 1920 * 2; | ||
828 | case 2: | ||
829 | return 2560 * 2; | ||
830 | } | ||
831 | } | ||
832 | |||
833 | /* controller not enabled, so no lb used */ | ||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | /** | ||
838 | * cik_get_number_of_dram_channels - get the number of dram channels | ||
839 | * | ||
840 | * @adev: amdgpu_device pointer | ||
841 | * | ||
842 | * Look up the number of video ram channels (CIK). | ||
843 | * Used for display watermark bandwidth calculations | ||
844 | * Returns the number of dram channels | ||
845 | */ | ||
846 | static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) | ||
847 | { | ||
848 | u32 tmp = RREG32(mmMC_SHARED_CHMAP); | ||
849 | |||
850 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | ||
851 | case 0: | ||
852 | default: | ||
853 | return 1; | ||
854 | case 1: | ||
855 | return 2; | ||
856 | case 2: | ||
857 | return 4; | ||
858 | case 3: | ||
859 | return 8; | ||
860 | case 4: | ||
861 | return 3; | ||
862 | case 5: | ||
863 | return 6; | ||
864 | case 6: | ||
865 | return 10; | ||
866 | case 7: | ||
867 | return 12; | ||
868 | case 8: | ||
869 | return 16; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | struct dce10_wm_params { | ||
874 | u32 dram_channels; /* number of dram channels */ | ||
875 | u32 yclk; /* bandwidth per dram data pin in kHz */ | ||
876 | u32 sclk; /* engine clock in kHz */ | ||
877 | u32 disp_clk; /* display clock in kHz */ | ||
878 | u32 src_width; /* viewport width */ | ||
879 | u32 active_time; /* active display time in ns */ | ||
880 | u32 blank_time; /* blank time in ns */ | ||
881 | bool interlaced; /* mode is interlaced */ | ||
882 | fixed20_12 vsc; /* vertical scale ratio */ | ||
883 | u32 num_heads; /* number of active crtcs */ | ||
884 | u32 bytes_per_pixel; /* bytes per pixel display + overlay */ | ||
885 | u32 lb_size; /* line buffer allocated to pipe */ | ||
886 | u32 vtaps; /* vertical scaler taps */ | ||
887 | }; | ||
888 | |||
889 | /** | ||
890 | * dce_v11_0_dram_bandwidth - get the dram bandwidth | ||
891 | * | ||
892 | * @wm: watermark calculation data | ||
893 | * | ||
894 | * Calculate the raw dram bandwidth (CIK). | ||
895 | * Used for display watermark bandwidth calculations | ||
896 | * Returns the dram bandwidth in MBytes/s | ||
897 | */ | ||
898 | static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) | ||
899 | { | ||
900 | /* Calculate raw DRAM Bandwidth */ | ||
901 | fixed20_12 dram_efficiency; /* 0.7 */ | ||
902 | fixed20_12 yclk, dram_channels, bandwidth; | ||
903 | fixed20_12 a; | ||
904 | |||
905 | a.full = dfixed_const(1000); | ||
906 | yclk.full = dfixed_const(wm->yclk); | ||
907 | yclk.full = dfixed_div(yclk, a); | ||
908 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
909 | a.full = dfixed_const(10); | ||
910 | dram_efficiency.full = dfixed_const(7); | ||
911 | dram_efficiency.full = dfixed_div(dram_efficiency, a); | ||
912 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
913 | bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); | ||
914 | |||
915 | return dfixed_trunc(bandwidth); | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display | ||
920 | * | ||
921 | * @wm: watermark calculation data | ||
922 | * | ||
923 | * Calculate the dram bandwidth used for display (CIK). | ||
924 | * Used for display watermark bandwidth calculations | ||
925 | * Returns the dram bandwidth for display in MBytes/s | ||
926 | */ | ||
927 | static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) | ||
928 | { | ||
929 | /* Calculate DRAM Bandwidth and the part allocated to display. */ | ||
930 | fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ | ||
931 | fixed20_12 yclk, dram_channels, bandwidth; | ||
932 | fixed20_12 a; | ||
933 | |||
934 | a.full = dfixed_const(1000); | ||
935 | yclk.full = dfixed_const(wm->yclk); | ||
936 | yclk.full = dfixed_div(yclk, a); | ||
937 | dram_channels.full = dfixed_const(wm->dram_channels * 4); | ||
938 | a.full = dfixed_const(10); | ||
939 | disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ | ||
940 | disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); | ||
941 | bandwidth.full = dfixed_mul(dram_channels, yclk); | ||
942 | bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); | ||
943 | |||
944 | return dfixed_trunc(bandwidth); | ||
945 | } | ||
946 | |||
947 | /** | ||
948 | * dce_v11_0_data_return_bandwidth - get the data return bandwidth | ||
949 | * | ||
950 | * @wm: watermark calculation data | ||
951 | * | ||
952 | * Calculate the data return bandwidth used for display (CIK). | ||
953 | * Used for display watermark bandwidth calculations | ||
954 | * Returns the data return bandwidth in MBytes/s | ||
955 | */ | ||
956 | static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) | ||
957 | { | ||
958 | /* Calculate the display Data return Bandwidth */ | ||
959 | fixed20_12 return_efficiency; /* 0.8 */ | ||
960 | fixed20_12 sclk, bandwidth; | ||
961 | fixed20_12 a; | ||
962 | |||
963 | a.full = dfixed_const(1000); | ||
964 | sclk.full = dfixed_const(wm->sclk); | ||
965 | sclk.full = dfixed_div(sclk, a); | ||
966 | a.full = dfixed_const(10); | ||
967 | return_efficiency.full = dfixed_const(8); | ||
968 | return_efficiency.full = dfixed_div(return_efficiency, a); | ||
969 | a.full = dfixed_const(32); | ||
970 | bandwidth.full = dfixed_mul(a, sclk); | ||
971 | bandwidth.full = dfixed_mul(bandwidth, return_efficiency); | ||
972 | |||
973 | return dfixed_trunc(bandwidth); | ||
974 | } | ||
975 | |||
976 | /** | ||
977 | * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth | ||
978 | * | ||
979 | * @wm: watermark calculation data | ||
980 | * | ||
981 | * Calculate the dmif bandwidth used for display (CIK). | ||
982 | * Used for display watermark bandwidth calculations | ||
983 | * Returns the dmif bandwidth in MBytes/s | ||
984 | */ | ||
985 | static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) | ||
986 | { | ||
987 | /* Calculate the DMIF Request Bandwidth */ | ||
988 | fixed20_12 disp_clk_request_efficiency; /* 0.8 */ | ||
989 | fixed20_12 disp_clk, bandwidth; | ||
990 | fixed20_12 a, b; | ||
991 | |||
992 | a.full = dfixed_const(1000); | ||
993 | disp_clk.full = dfixed_const(wm->disp_clk); | ||
994 | disp_clk.full = dfixed_div(disp_clk, a); | ||
995 | a.full = dfixed_const(32); | ||
996 | b.full = dfixed_mul(a, disp_clk); | ||
997 | |||
998 | a.full = dfixed_const(10); | ||
999 | disp_clk_request_efficiency.full = dfixed_const(8); | ||
1000 | disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); | ||
1001 | |||
1002 | bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); | ||
1003 | |||
1004 | return dfixed_trunc(bandwidth); | ||
1005 | } | ||
1006 | |||
1007 | /** | ||
1008 | * dce_v11_0_available_bandwidth - get the min available bandwidth | ||
1009 | * | ||
1010 | * @wm: watermark calculation data | ||
1011 | * | ||
1012 | * Calculate the min available bandwidth used for display (CIK). | ||
1013 | * Used for display watermark bandwidth calculations | ||
1014 | * Returns the min available bandwidth in MBytes/s | ||
1015 | */ | ||
1016 | static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) | ||
1017 | { | ||
1018 | /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ | ||
1019 | u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); | ||
1020 | u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); | ||
1021 | u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); | ||
1022 | |||
1023 | return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); | ||
1024 | } | ||
1025 | |||
1026 | /** | ||
1027 | * dce_v11_0_average_bandwidth - get the average available bandwidth | ||
1028 | * | ||
1029 | * @wm: watermark calculation data | ||
1030 | * | ||
1031 | * Calculate the average available bandwidth used for display (CIK). | ||
1032 | * Used for display watermark bandwidth calculations | ||
1033 | * Returns the average available bandwidth in MBytes/s | ||
1034 | */ | ||
1035 | static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) | ||
1036 | { | ||
1037 | /* Calculate the display mode Average Bandwidth | ||
1038 | * DisplayMode should contain the source and destination dimensions, | ||
1039 | * timing, etc. | ||
1040 | */ | ||
1041 | fixed20_12 bpp; | ||
1042 | fixed20_12 line_time; | ||
1043 | fixed20_12 src_width; | ||
1044 | fixed20_12 bandwidth; | ||
1045 | fixed20_12 a; | ||
1046 | |||
1047 | a.full = dfixed_const(1000); | ||
1048 | line_time.full = dfixed_const(wm->active_time + wm->blank_time); | ||
1049 | line_time.full = dfixed_div(line_time, a); | ||
1050 | bpp.full = dfixed_const(wm->bytes_per_pixel); | ||
1051 | src_width.full = dfixed_const(wm->src_width); | ||
1052 | bandwidth.full = dfixed_mul(src_width, bpp); | ||
1053 | bandwidth.full = dfixed_mul(bandwidth, wm->vsc); | ||
1054 | bandwidth.full = dfixed_div(bandwidth, line_time); | ||
1055 | |||
1056 | return dfixed_trunc(bandwidth); | ||
1057 | } | ||
1058 | |||
1059 | /** | ||
1060 | * dce_v11_0_latency_watermark - get the latency watermark | ||
1061 | * | ||
1062 | * @wm: watermark calculation data | ||
1063 | * | ||
1064 | * Calculate the latency watermark (CIK). | ||
1065 | * Used for display watermark bandwidth calculations | ||
1066 | * Returns the latency watermark in ns | ||
1067 | */ | ||
1068 | static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) | ||
1069 | { | ||
1070 | /* First calculate the latency in ns */ | ||
1071 | u32 mc_latency = 2000; /* 2000 ns. */ | ||
1072 | u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); | ||
1073 | u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; | ||
1074 | u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; | ||
1075 | u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ | ||
1076 | u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + | ||
1077 | (wm->num_heads * cursor_line_pair_return_time); | ||
1078 | u32 latency = mc_latency + other_heads_data_return_time + dc_latency; | ||
1079 | u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; | ||
1080 | u32 tmp, dmif_size = 12288; | ||
1081 | fixed20_12 a, b, c; | ||
1082 | |||
1083 | if (wm->num_heads == 0) | ||
1084 | return 0; | ||
1085 | |||
1086 | a.full = dfixed_const(2); | ||
1087 | b.full = dfixed_const(1); | ||
1088 | if ((wm->vsc.full > a.full) || | ||
1089 | ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || | ||
1090 | (wm->vtaps >= 5) || | ||
1091 | ((wm->vsc.full >= a.full) && wm->interlaced)) | ||
1092 | max_src_lines_per_dst_line = 4; | ||
1093 | else | ||
1094 | max_src_lines_per_dst_line = 2; | ||
1095 | |||
1096 | a.full = dfixed_const(available_bandwidth); | ||
1097 | b.full = dfixed_const(wm->num_heads); | ||
1098 | a.full = dfixed_div(a, b); | ||
1099 | |||
1100 | b.full = dfixed_const(mc_latency + 512); | ||
1101 | c.full = dfixed_const(wm->disp_clk); | ||
1102 | b.full = dfixed_div(b, c); | ||
1103 | |||
1104 | c.full = dfixed_const(dmif_size); | ||
1105 | b.full = dfixed_div(c, b); | ||
1106 | |||
1107 | tmp = min(dfixed_trunc(a), dfixed_trunc(b)); | ||
1108 | |||
1109 | b.full = dfixed_const(1000); | ||
1110 | c.full = dfixed_const(wm->disp_clk); | ||
1111 | b.full = dfixed_div(c, b); | ||
1112 | c.full = dfixed_const(wm->bytes_per_pixel); | ||
1113 | b.full = dfixed_mul(b, c); | ||
1114 | |||
1115 | lb_fill_bw = min(tmp, dfixed_trunc(b)); | ||
1116 | |||
1117 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); | ||
1118 | b.full = dfixed_const(1000); | ||
1119 | c.full = dfixed_const(lb_fill_bw); | ||
1120 | b.full = dfixed_div(c, b); | ||
1121 | a.full = dfixed_div(a, b); | ||
1122 | line_fill_time = dfixed_trunc(a); | ||
1123 | |||
1124 | if (line_fill_time < wm->active_time) | ||
1125 | return latency; | ||
1126 | else | ||
1127 | return latency + (line_fill_time - wm->active_time); | ||
1128 | |||
1129 | } | ||
1130 | |||
1131 | /** | ||
1132 | * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check | ||
1133 | * average and available dram bandwidth | ||
1134 | * | ||
1135 | * @wm: watermark calculation data | ||
1136 | * | ||
1137 | * Check if the display average bandwidth fits in the display | ||
1138 | * dram bandwidth (CIK). | ||
1139 | * Used for display watermark bandwidth calculations | ||
1140 | * Returns true if the display fits, false if not. | ||
1141 | */ | ||
1142 | static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) | ||
1143 | { | ||
1144 | if (dce_v11_0_average_bandwidth(wm) <= | ||
1145 | (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) | ||
1146 | return true; | ||
1147 | else | ||
1148 | return false; | ||
1149 | } | ||
1150 | |||
1151 | /** | ||
1152 | * dce_v11_0_average_bandwidth_vs_available_bandwidth - check | ||
1153 | * average and available bandwidth | ||
1154 | * | ||
1155 | * @wm: watermark calculation data | ||
1156 | * | ||
1157 | * Check if the display average bandwidth fits in the display | ||
1158 | * available bandwidth (CIK). | ||
1159 | * Used for display watermark bandwidth calculations | ||
1160 | * Returns true if the display fits, false if not. | ||
1161 | */ | ||
1162 | static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) | ||
1163 | { | ||
1164 | if (dce_v11_0_average_bandwidth(wm) <= | ||
1165 | (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) | ||
1166 | return true; | ||
1167 | else | ||
1168 | return false; | ||
1169 | } | ||
1170 | |||
1171 | /** | ||
1172 | * dce_v11_0_check_latency_hiding - check latency hiding | ||
1173 | * | ||
1174 | * @wm: watermark calculation data | ||
1175 | * | ||
1176 | * Check latency hiding (CIK). | ||
1177 | * Used for display watermark bandwidth calculations | ||
1178 | * Returns true if the display fits, false if not. | ||
1179 | */ | ||
1180 | static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) | ||
1181 | { | ||
1182 | u32 lb_partitions = wm->lb_size / wm->src_width; | ||
1183 | u32 line_time = wm->active_time + wm->blank_time; | ||
1184 | u32 latency_tolerant_lines; | ||
1185 | u32 latency_hiding; | ||
1186 | fixed20_12 a; | ||
1187 | |||
1188 | a.full = dfixed_const(1); | ||
1189 | if (wm->vsc.full > a.full) | ||
1190 | latency_tolerant_lines = 1; | ||
1191 | else { | ||
1192 | if (lb_partitions <= (wm->vtaps + 1)) | ||
1193 | latency_tolerant_lines = 1; | ||
1194 | else | ||
1195 | latency_tolerant_lines = 2; | ||
1196 | } | ||
1197 | |||
1198 | latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); | ||
1199 | |||
1200 | if (dce_v11_0_latency_watermark(wm) <= latency_hiding) | ||
1201 | return true; | ||
1202 | else | ||
1203 | return false; | ||
1204 | } | ||
1205 | |||
1206 | /** | ||
1207 | * dce_v11_0_program_watermarks - program display watermarks | ||
1208 | * | ||
1209 | * @adev: amdgpu_device pointer | ||
1210 | * @amdgpu_crtc: the selected display controller | ||
1211 | * @lb_size: line buffer size | ||
1212 | * @num_heads: number of display controllers in use | ||
1213 | * | ||
1214 | * Calculate and program the display watermarks for the | ||
1215 | * selected display controller (CIK). | ||
1216 | */ | ||
1217 | static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | ||
1218 | struct amdgpu_crtc *amdgpu_crtc, | ||
1219 | u32 lb_size, u32 num_heads) | ||
1220 | { | ||
1221 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; | ||
1222 | struct dce10_wm_params wm_low, wm_high; | ||
1223 | u32 pixel_period; | ||
1224 | u32 line_time = 0; | ||
1225 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | ||
1226 | u32 tmp, wm_mask; | ||
1227 | |||
1228 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | ||
1229 | pixel_period = 1000000 / (u32)mode->clock; | ||
1230 | line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); | ||
1231 | |||
1232 | /* watermark for high clocks */ | ||
1233 | if (adev->pm.dpm_enabled) { | ||
1234 | wm_high.yclk = | ||
1235 | amdgpu_dpm_get_mclk(adev, false) * 10; | ||
1236 | wm_high.sclk = | ||
1237 | amdgpu_dpm_get_sclk(adev, false) * 10; | ||
1238 | } else { | ||
1239 | wm_high.yclk = adev->pm.current_mclk * 10; | ||
1240 | wm_high.sclk = adev->pm.current_sclk * 10; | ||
1241 | } | ||
1242 | |||
1243 | wm_high.disp_clk = mode->clock; | ||
1244 | wm_high.src_width = mode->crtc_hdisplay; | ||
1245 | wm_high.active_time = mode->crtc_hdisplay * pixel_period; | ||
1246 | wm_high.blank_time = line_time - wm_high.active_time; | ||
1247 | wm_high.interlaced = false; | ||
1248 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1249 | wm_high.interlaced = true; | ||
1250 | wm_high.vsc = amdgpu_crtc->vsc; | ||
1251 | wm_high.vtaps = 1; | ||
1252 | if (amdgpu_crtc->rmx_type != RMX_OFF) | ||
1253 | wm_high.vtaps = 2; | ||
1254 | wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ | ||
1255 | wm_high.lb_size = lb_size; | ||
1256 | wm_high.dram_channels = cik_get_number_of_dram_channels(adev); | ||
1257 | wm_high.num_heads = num_heads; | ||
1258 | |||
1259 | /* set for high clocks */ | ||
1260 | latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535); | ||
1261 | |||
1262 | /* possibly force display priority to high */ | ||
1263 | /* should really do this at mode validation time... */ | ||
1264 | if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || | ||
1265 | !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || | ||
1266 | !dce_v11_0_check_latency_hiding(&wm_high) || | ||
1267 | (adev->mode_info.disp_priority == 2)) { | ||
1268 | DRM_DEBUG_KMS("force priority to high\n"); | ||
1269 | } | ||
1270 | |||
1271 | /* watermark for low clocks */ | ||
1272 | if (adev->pm.dpm_enabled) { | ||
1273 | wm_low.yclk = | ||
1274 | amdgpu_dpm_get_mclk(adev, true) * 10; | ||
1275 | wm_low.sclk = | ||
1276 | amdgpu_dpm_get_sclk(adev, true) * 10; | ||
1277 | } else { | ||
1278 | wm_low.yclk = adev->pm.current_mclk * 10; | ||
1279 | wm_low.sclk = adev->pm.current_sclk * 10; | ||
1280 | } | ||
1281 | |||
1282 | wm_low.disp_clk = mode->clock; | ||
1283 | wm_low.src_width = mode->crtc_hdisplay; | ||
1284 | wm_low.active_time = mode->crtc_hdisplay * pixel_period; | ||
1285 | wm_low.blank_time = line_time - wm_low.active_time; | ||
1286 | wm_low.interlaced = false; | ||
1287 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1288 | wm_low.interlaced = true; | ||
1289 | wm_low.vsc = amdgpu_crtc->vsc; | ||
1290 | wm_low.vtaps = 1; | ||
1291 | if (amdgpu_crtc->rmx_type != RMX_OFF) | ||
1292 | wm_low.vtaps = 2; | ||
1293 | wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ | ||
1294 | wm_low.lb_size = lb_size; | ||
1295 | wm_low.dram_channels = cik_get_number_of_dram_channels(adev); | ||
1296 | wm_low.num_heads = num_heads; | ||
1297 | |||
1298 | /* set for low clocks */ | ||
1299 | latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535); | ||
1300 | |||
1301 | /* possibly force display priority to high */ | ||
1302 | /* should really do this at mode validation time... */ | ||
1303 | if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || | ||
1304 | !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || | ||
1305 | !dce_v11_0_check_latency_hiding(&wm_low) || | ||
1306 | (adev->mode_info.disp_priority == 2)) { | ||
1307 | DRM_DEBUG_KMS("force priority to high\n"); | ||
1308 | } | ||
1309 | } | ||
1310 | |||
1311 | /* select wm A */ | ||
1312 | wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); | ||
1313 | tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); | ||
1314 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1315 | tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); | ||
1316 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); | ||
1317 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); | ||
1318 | WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1319 | /* select wm B */ | ||
1320 | tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); | ||
1321 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1322 | tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); | ||
1323 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); | ||
1324 | tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); | ||
1325 | WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
1326 | /* restore original selection */ | ||
1327 | WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); | ||
1328 | |||
1329 | /* save values for DPM */ | ||
1330 | amdgpu_crtc->line_time = line_time; | ||
1331 | amdgpu_crtc->wm_high = latency_watermark_a; | ||
1332 | amdgpu_crtc->wm_low = latency_watermark_b; | ||
1333 | } | ||
1334 | |||
1335 | /** | ||
1336 | * dce_v11_0_bandwidth_update - program display watermarks | ||
1337 | * | ||
1338 | * @adev: amdgpu_device pointer | ||
1339 | * | ||
1340 | * Calculate and program the display watermarks and line | ||
1341 | * buffer allocation (CIK). | ||
1342 | */ | ||
1343 | static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) | ||
1344 | { | ||
1345 | struct drm_display_mode *mode = NULL; | ||
1346 | u32 num_heads = 0, lb_size; | ||
1347 | int i; | ||
1348 | |||
1349 | amdgpu_update_display_priority(adev); | ||
1350 | |||
1351 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
1352 | if (adev->mode_info.crtcs[i]->base.enabled) | ||
1353 | num_heads++; | ||
1354 | } | ||
1355 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
1356 | mode = &adev->mode_info.crtcs[i]->base.mode; | ||
1357 | lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); | ||
1358 | dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], | ||
1359 | lb_size, num_heads); | ||
1360 | } | ||
1361 | } | ||
1362 | |||
1363 | static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) | ||
1364 | { | ||
1365 | int i; | ||
1366 | u32 offset, tmp; | ||
1367 | |||
1368 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1369 | offset = adev->mode_info.audio.pin[i].offset; | ||
1370 | tmp = RREG32_AUDIO_ENDPT(offset, | ||
1371 | ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); | ||
1372 | if (((tmp & | ||
1373 | AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> | ||
1374 | AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) | ||
1375 | adev->mode_info.audio.pin[i].connected = false; | ||
1376 | else | ||
1377 | adev->mode_info.audio.pin[i].connected = true; | ||
1378 | } | ||
1379 | } | ||
1380 | |||
1381 | static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) | ||
1382 | { | ||
1383 | int i; | ||
1384 | |||
1385 | dce_v11_0_audio_get_connected_pins(adev); | ||
1386 | |||
1387 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1388 | if (adev->mode_info.audio.pin[i].connected) | ||
1389 | return &adev->mode_info.audio.pin[i]; | ||
1390 | } | ||
1391 | DRM_ERROR("No connected audio pins found!\n"); | ||
1392 | return NULL; | ||
1393 | } | ||
1394 | |||
1395 | static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) | ||
1396 | { | ||
1397 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1398 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1399 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1400 | u32 tmp; | ||
1401 | |||
1402 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1403 | return; | ||
1404 | |||
1405 | tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); | ||
1406 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); | ||
1407 | WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); | ||
1408 | } | ||
1409 | |||
1410 | static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, | ||
1411 | struct drm_display_mode *mode) | ||
1412 | { | ||
1413 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1414 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1415 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1416 | struct drm_connector *connector; | ||
1417 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1418 | u32 tmp; | ||
1419 | int interlace = 0; | ||
1420 | |||
1421 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1422 | return; | ||
1423 | |||
1424 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1425 | if (connector->encoder == encoder) { | ||
1426 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1427 | break; | ||
1428 | } | ||
1429 | } | ||
1430 | |||
1431 | if (!amdgpu_connector) { | ||
1432 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1433 | return; | ||
1434 | } | ||
1435 | |||
1436 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
1437 | interlace = 1; | ||
1438 | if (connector->latency_present[interlace]) { | ||
1439 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1440 | VIDEO_LIPSYNC, connector->video_latency[interlace]); | ||
1441 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1442 | AUDIO_LIPSYNC, connector->audio_latency[interlace]); | ||
1443 | } else { | ||
1444 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1445 | VIDEO_LIPSYNC, 0); | ||
1446 | tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, | ||
1447 | AUDIO_LIPSYNC, 0); | ||
1448 | } | ||
1449 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1450 | ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); | ||
1451 | } | ||
1452 | |||
1453 | static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) | ||
1454 | { | ||
1455 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1456 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1457 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1458 | struct drm_connector *connector; | ||
1459 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1460 | u32 tmp; | ||
1461 | u8 *sadb = NULL; | ||
1462 | int sad_count; | ||
1463 | |||
1464 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1465 | return; | ||
1466 | |||
1467 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1468 | if (connector->encoder == encoder) { | ||
1469 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1470 | break; | ||
1471 | } | ||
1472 | } | ||
1473 | |||
1474 | if (!amdgpu_connector) { | ||
1475 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1476 | return; | ||
1477 | } | ||
1478 | |||
1479 | sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb); | ||
1480 | if (sad_count < 0) { | ||
1481 | DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); | ||
1482 | sad_count = 0; | ||
1483 | } | ||
1484 | |||
1485 | /* program the speaker allocation */ | ||
1486 | tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1487 | ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); | ||
1488 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1489 | DP_CONNECTION, 0); | ||
1490 | /* set HDMI mode */ | ||
1491 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1492 | HDMI_CONNECTION, 1); | ||
1493 | if (sad_count) | ||
1494 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1495 | SPEAKER_ALLOCATION, sadb[0]); | ||
1496 | else | ||
1497 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, | ||
1498 | SPEAKER_ALLOCATION, 5); /* stereo */ | ||
1499 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, | ||
1500 | ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); | ||
1501 | |||
1502 | kfree(sadb); | ||
1503 | } | ||
1504 | |||
1505 | static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) | ||
1506 | { | ||
1507 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
1508 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1509 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1510 | struct drm_connector *connector; | ||
1511 | struct amdgpu_connector *amdgpu_connector = NULL; | ||
1512 | struct cea_sad *sads; | ||
1513 | int i, sad_count; | ||
1514 | |||
1515 | static const u16 eld_reg_to_type[][2] = { | ||
1516 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, | ||
1517 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, | ||
1518 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, | ||
1519 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, | ||
1520 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, | ||
1521 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, | ||
1522 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, | ||
1523 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, | ||
1524 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, | ||
1525 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, | ||
1526 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, | ||
1527 | { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, | ||
1528 | }; | ||
1529 | |||
1530 | if (!dig || !dig->afmt || !dig->afmt->pin) | ||
1531 | return; | ||
1532 | |||
1533 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { | ||
1534 | if (connector->encoder == encoder) { | ||
1535 | amdgpu_connector = to_amdgpu_connector(connector); | ||
1536 | break; | ||
1537 | } | ||
1538 | } | ||
1539 | |||
1540 | if (!amdgpu_connector) { | ||
1541 | DRM_ERROR("Couldn't find encoder's connector\n"); | ||
1542 | return; | ||
1543 | } | ||
1544 | |||
1545 | sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); | ||
1546 | if (sad_count <= 0) { | ||
1547 | DRM_ERROR("Couldn't read SADs: %d\n", sad_count); | ||
1548 | return; | ||
1549 | } | ||
1550 | BUG_ON(!sads); | ||
1551 | |||
1552 | for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { | ||
1553 | u32 tmp = 0; | ||
1554 | u8 stereo_freqs = 0; | ||
1555 | int max_channels = -1; | ||
1556 | int j; | ||
1557 | |||
1558 | for (j = 0; j < sad_count; j++) { | ||
1559 | struct cea_sad *sad = &sads[j]; | ||
1560 | |||
1561 | if (sad->format == eld_reg_to_type[i][1]) { | ||
1562 | if (sad->channels > max_channels) { | ||
1563 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1564 | MAX_CHANNELS, sad->channels); | ||
1565 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1566 | DESCRIPTOR_BYTE_2, sad->byte2); | ||
1567 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1568 | SUPPORTED_FREQUENCIES, sad->freq); | ||
1569 | max_channels = sad->channels; | ||
1570 | } | ||
1571 | |||
1572 | if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) | ||
1573 | stereo_freqs |= sad->freq; | ||
1574 | else | ||
1575 | break; | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, | ||
1580 | SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); | ||
1581 | WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); | ||
1582 | } | ||
1583 | |||
1584 | kfree(sads); | ||
1585 | } | ||
1586 | |||
1587 | static void dce_v11_0_audio_enable(struct amdgpu_device *adev, | ||
1588 | struct amdgpu_audio_pin *pin, | ||
1589 | bool enable) | ||
1590 | { | ||
1591 | if (!pin) | ||
1592 | return; | ||
1593 | |||
1594 | WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, | ||
1595 | enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); | ||
1596 | } | ||
1597 | |||
1598 | static const u32 pin_offsets[] = | ||
1599 | { | ||
1600 | AUD0_REGISTER_OFFSET, | ||
1601 | AUD1_REGISTER_OFFSET, | ||
1602 | AUD2_REGISTER_OFFSET, | ||
1603 | AUD3_REGISTER_OFFSET, | ||
1604 | AUD4_REGISTER_OFFSET, | ||
1605 | AUD5_REGISTER_OFFSET, | ||
1606 | AUD6_REGISTER_OFFSET, | ||
1607 | }; | ||
1608 | |||
1609 | static int dce_v11_0_audio_init(struct amdgpu_device *adev) | ||
1610 | { | ||
1611 | int i; | ||
1612 | |||
1613 | if (!amdgpu_audio) | ||
1614 | return 0; | ||
1615 | |||
1616 | adev->mode_info.audio.enabled = true; | ||
1617 | |||
1618 | adev->mode_info.audio.num_pins = 7; | ||
1619 | |||
1620 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
1621 | adev->mode_info.audio.pin[i].channels = -1; | ||
1622 | adev->mode_info.audio.pin[i].rate = -1; | ||
1623 | adev->mode_info.audio.pin[i].bits_per_sample = -1; | ||
1624 | adev->mode_info.audio.pin[i].status_bits = 0; | ||
1625 | adev->mode_info.audio.pin[i].category_code = 0; | ||
1626 | adev->mode_info.audio.pin[i].connected = false; | ||
1627 | adev->mode_info.audio.pin[i].offset = pin_offsets[i]; | ||
1628 | adev->mode_info.audio.pin[i].id = i; | ||
1629 | /* disable audio. it will be set up later */ | ||
1630 | /* XXX remove once we switch to ip funcs */ | ||
1631 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
1632 | } | ||
1633 | |||
1634 | return 0; | ||
1635 | } | ||
1636 | |||
1637 | static void dce_v11_0_audio_fini(struct amdgpu_device *adev) | ||
1638 | { | ||
1639 | int i; | ||
1640 | |||
1641 | if (!adev->mode_info.audio.enabled) | ||
1642 | return; | ||
1643 | |||
1644 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) | ||
1645 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
1646 | |||
1647 | adev->mode_info.audio.enabled = false; | ||
1648 | } | ||
1649 | |||
1650 | /* | ||
1651 | * update the N and CTS parameters for a given pixel clock rate | ||
1652 | */ | ||
1653 | static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) | ||
1654 | { | ||
1655 | struct drm_device *dev = encoder->dev; | ||
1656 | struct amdgpu_device *adev = dev->dev_private; | ||
1657 | struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); | ||
1658 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1659 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1660 | u32 tmp; | ||
1661 | |||
1662 | tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); | ||
1663 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); | ||
1664 | WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); | ||
1665 | tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); | ||
1666 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); | ||
1667 | WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); | ||
1668 | |||
1669 | tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); | ||
1670 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); | ||
1671 | WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); | ||
1672 | tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); | ||
1673 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); | ||
1674 | WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); | ||
1675 | |||
1676 | tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); | ||
1677 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); | ||
1678 | WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); | ||
1679 | tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); | ||
1680 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); | ||
1681 | WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); | ||
1682 | |||
1683 | } | ||
1684 | |||
1685 | /* | ||
1686 | * build a HDMI Video Info Frame | ||
1687 | */ | ||
1688 | static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, | ||
1689 | void *buffer, size_t size) | ||
1690 | { | ||
1691 | struct drm_device *dev = encoder->dev; | ||
1692 | struct amdgpu_device *adev = dev->dev_private; | ||
1693 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1694 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1695 | uint8_t *frame = buffer + 3; | ||
1696 | uint8_t *header = buffer; | ||
1697 | |||
1698 | WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, | ||
1699 | frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); | ||
1700 | WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, | ||
1701 | frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); | ||
1702 | WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, | ||
1703 | frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); | ||
1704 | WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, | ||
1705 | frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); | ||
1706 | } | ||
1707 | |||
1708 | static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) | ||
1709 | { | ||
1710 | struct drm_device *dev = encoder->dev; | ||
1711 | struct amdgpu_device *adev = dev->dev_private; | ||
1712 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1713 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1714 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
1715 | u32 dto_phase = 24 * 1000; | ||
1716 | u32 dto_modulo = clock; | ||
1717 | u32 tmp; | ||
1718 | |||
1719 | if (!dig || !dig->afmt) | ||
1720 | return; | ||
1721 | |||
1722 | /* XXX two dtos; generally use dto0 for hdmi */ | ||
1723 | /* Express [24MHz / target pixel clock] as an exact rational | ||
1724 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | ||
1725 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | ||
1726 | */ | ||
1727 | tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); | ||
1728 | tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, | ||
1729 | amdgpu_crtc->crtc_id); | ||
1730 | WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); | ||
1731 | WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
1732 | WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
1733 | } | ||
1734 | |||
1735 | /* | ||
1736 | * update the info frames with the data from the current display mode | ||
1737 | */ | ||
1738 | static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, | ||
1739 | struct drm_display_mode *mode) | ||
1740 | { | ||
1741 | struct drm_device *dev = encoder->dev; | ||
1742 | struct amdgpu_device *adev = dev->dev_private; | ||
1743 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1744 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1745 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
1746 | u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; | ||
1747 | struct hdmi_avi_infoframe frame; | ||
1748 | ssize_t err; | ||
1749 | u32 tmp; | ||
1750 | int bpc = 8; | ||
1751 | |||
1752 | if (!dig || !dig->afmt) | ||
1753 | return; | ||
1754 | |||
1755 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
1756 | if (!dig->afmt->enabled) | ||
1757 | return; | ||
1758 | |||
1759 | /* hdmi deep color mode general control packets setup, if bpc > 8 */ | ||
1760 | if (encoder->crtc) { | ||
1761 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); | ||
1762 | bpc = amdgpu_crtc->bpc; | ||
1763 | } | ||
1764 | |||
1765 | /* disable audio prior to setting up hw */ | ||
1766 | dig->afmt->pin = dce_v11_0_audio_get_pin(adev); | ||
1767 | dce_v11_0_audio_enable(adev, dig->afmt->pin, false); | ||
1768 | |||
1769 | dce_v11_0_audio_set_dto(encoder, mode->clock); | ||
1770 | |||
1771 | tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); | ||
1772 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); | ||
1773 | WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ | ||
1774 | |||
1775 | WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); | ||
1776 | |||
1777 | tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); | ||
1778 | switch (bpc) { | ||
1779 | case 0: | ||
1780 | case 6: | ||
1781 | case 8: | ||
1782 | case 16: | ||
1783 | default: | ||
1784 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); | ||
1785 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); | ||
1786 | DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", | ||
1787 | connector->name, bpc); | ||
1788 | break; | ||
1789 | case 10: | ||
1790 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); | ||
1791 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); | ||
1792 | DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", | ||
1793 | connector->name); | ||
1794 | break; | ||
1795 | case 12: | ||
1796 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); | ||
1797 | tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); | ||
1798 | DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", | ||
1799 | connector->name); | ||
1800 | break; | ||
1801 | } | ||
1802 | WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); | ||
1803 | |||
1804 | tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); | ||
1805 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ | ||
1806 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ | ||
1807 | tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ | ||
1808 | WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1809 | |||
1810 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1811 | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
1812 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); | ||
1813 | /* required for audio info values to be updated */ | ||
1814 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); | ||
1815 | WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1816 | |||
1817 | tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1818 | /* required for audio info values to be updated */ | ||
1819 | tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); | ||
1820 | WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1821 | |||
1822 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); | ||
1823 | /* anything other than 0 */ | ||
1824 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); | ||
1825 | WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); | ||
1826 | |||
1827 | WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ | ||
1828 | |||
1829 | tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1830 | /* set the default audio delay */ | ||
1831 | tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); | ||
1832 | /* should be suffient for all audio modes and small enough for all hblanks */ | ||
1833 | tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); | ||
1834 | WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1835 | |||
1836 | tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1837 | /* allow 60958 channel status fields to be updated */ | ||
1838 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); | ||
1839 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1840 | |||
1841 | tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); | ||
1842 | if (bpc > 8) | ||
1843 | /* clear SW CTS value */ | ||
1844 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); | ||
1845 | else | ||
1846 | /* select SW CTS value */ | ||
1847 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); | ||
1848 | /* allow hw to sent ACR packets when required */ | ||
1849 | tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); | ||
1850 | WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1851 | |||
1852 | dce_v11_0_afmt_update_ACR(encoder, mode->clock); | ||
1853 | |||
1854 | tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); | ||
1855 | tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); | ||
1856 | WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); | ||
1857 | |||
1858 | tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); | ||
1859 | tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); | ||
1860 | WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); | ||
1861 | |||
1862 | tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); | ||
1863 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); | ||
1864 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); | ||
1865 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); | ||
1866 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); | ||
1867 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); | ||
1868 | tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); | ||
1869 | WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); | ||
1870 | |||
1871 | dce_v11_0_audio_write_speaker_allocation(encoder); | ||
1872 | |||
1873 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, | ||
1874 | (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); | ||
1875 | |||
1876 | dce_v11_0_afmt_audio_select_pin(encoder); | ||
1877 | dce_v11_0_audio_write_sad_regs(encoder); | ||
1878 | dce_v11_0_audio_write_latency_fields(encoder, mode); | ||
1879 | |||
1880 | err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); | ||
1881 | if (err < 0) { | ||
1882 | DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); | ||
1883 | return; | ||
1884 | } | ||
1885 | |||
1886 | err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); | ||
1887 | if (err < 0) { | ||
1888 | DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); | ||
1889 | return; | ||
1890 | } | ||
1891 | |||
1892 | dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); | ||
1893 | |||
1894 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); | ||
1895 | /* enable AVI info frames */ | ||
1896 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); | ||
1897 | /* required for audio info values to be updated */ | ||
1898 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); | ||
1899 | WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); | ||
1900 | |||
1901 | tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); | ||
1902 | tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); | ||
1903 | WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); | ||
1904 | |||
1905 | tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); | ||
1906 | /* send audio packets */ | ||
1907 | tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); | ||
1908 | WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); | ||
1909 | |||
1910 | WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); | ||
1911 | WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); | ||
1912 | WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); | ||
1913 | WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); | ||
1914 | |||
1915 | /* enable audio after to setting up hw */ | ||
1916 | dce_v11_0_audio_enable(adev, dig->afmt->pin, true); | ||
1917 | } | ||
1918 | |||
1919 | static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) | ||
1920 | { | ||
1921 | struct drm_device *dev = encoder->dev; | ||
1922 | struct amdgpu_device *adev = dev->dev_private; | ||
1923 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
1924 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
1925 | |||
1926 | if (!dig || !dig->afmt) | ||
1927 | return; | ||
1928 | |||
1929 | /* Silent, r600_hdmi_enable will raise WARN for us */ | ||
1930 | if (enable && dig->afmt->enabled) | ||
1931 | return; | ||
1932 | if (!enable && !dig->afmt->enabled) | ||
1933 | return; | ||
1934 | |||
1935 | if (!enable && dig->afmt->pin) { | ||
1936 | dce_v11_0_audio_enable(adev, dig->afmt->pin, false); | ||
1937 | dig->afmt->pin = NULL; | ||
1938 | } | ||
1939 | |||
1940 | dig->afmt->enabled = enable; | ||
1941 | |||
1942 | DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", | ||
1943 | enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); | ||
1944 | } | ||
1945 | |||
1946 | static void dce_v11_0_afmt_init(struct amdgpu_device *adev) | ||
1947 | { | ||
1948 | int i; | ||
1949 | |||
1950 | for (i = 0; i < adev->mode_info.num_dig; i++) | ||
1951 | adev->mode_info.afmt[i] = NULL; | ||
1952 | |||
1953 | /* DCE11 has audio blocks tied to DIG encoders */ | ||
1954 | for (i = 0; i < adev->mode_info.num_dig; i++) { | ||
1955 | adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); | ||
1956 | if (adev->mode_info.afmt[i]) { | ||
1957 | adev->mode_info.afmt[i]->offset = dig_offsets[i]; | ||
1958 | adev->mode_info.afmt[i]->id = i; | ||
1959 | } | ||
1960 | } | ||
1961 | } | ||
1962 | |||
1963 | static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) | ||
1964 | { | ||
1965 | int i; | ||
1966 | |||
1967 | for (i = 0; i < adev->mode_info.num_dig; i++) { | ||
1968 | kfree(adev->mode_info.afmt[i]); | ||
1969 | adev->mode_info.afmt[i] = NULL; | ||
1970 | } | ||
1971 | } | ||
1972 | |||
1973 | static const u32 vga_control_regs[6] = | ||
1974 | { | ||
1975 | mmD1VGA_CONTROL, | ||
1976 | mmD2VGA_CONTROL, | ||
1977 | mmD3VGA_CONTROL, | ||
1978 | mmD4VGA_CONTROL, | ||
1979 | mmD5VGA_CONTROL, | ||
1980 | mmD6VGA_CONTROL, | ||
1981 | }; | ||
1982 | |||
1983 | static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) | ||
1984 | { | ||
1985 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
1986 | struct drm_device *dev = crtc->dev; | ||
1987 | struct amdgpu_device *adev = dev->dev_private; | ||
1988 | u32 vga_control; | ||
1989 | |||
1990 | vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; | ||
1991 | if (enable) | ||
1992 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); | ||
1993 | else | ||
1994 | WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); | ||
1995 | } | ||
1996 | |||
1997 | static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) | ||
1998 | { | ||
1999 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2000 | struct drm_device *dev = crtc->dev; | ||
2001 | struct amdgpu_device *adev = dev->dev_private; | ||
2002 | |||
2003 | if (enable) | ||
2004 | WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); | ||
2005 | else | ||
2006 | WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); | ||
2007 | } | ||
2008 | |||
2009 | static void dce_v11_0_tiling_fields(uint64_t tiling_flags, unsigned *bankw, | ||
2010 | unsigned *bankh, unsigned *mtaspect, | ||
2011 | unsigned *tile_split) | ||
2012 | { | ||
2013 | *bankw = (tiling_flags >> AMDGPU_TILING_EG_BANKW_SHIFT) & AMDGPU_TILING_EG_BANKW_MASK; | ||
2014 | *bankh = (tiling_flags >> AMDGPU_TILING_EG_BANKH_SHIFT) & AMDGPU_TILING_EG_BANKH_MASK; | ||
2015 | *mtaspect = (tiling_flags >> AMDGPU_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & AMDGPU_TILING_EG_MACRO_TILE_ASPECT_MASK; | ||
2016 | *tile_split = (tiling_flags >> AMDGPU_TILING_EG_TILE_SPLIT_SHIFT) & AMDGPU_TILING_EG_TILE_SPLIT_MASK; | ||
2017 | switch (*bankw) { | ||
2018 | default: | ||
2019 | case 1: | ||
2020 | *bankw = ADDR_SURF_BANK_WIDTH_1; | ||
2021 | break; | ||
2022 | case 2: | ||
2023 | *bankw = ADDR_SURF_BANK_WIDTH_2; | ||
2024 | break; | ||
2025 | case 4: | ||
2026 | *bankw = ADDR_SURF_BANK_WIDTH_4; | ||
2027 | break; | ||
2028 | case 8: | ||
2029 | *bankw = ADDR_SURF_BANK_WIDTH_8; | ||
2030 | break; | ||
2031 | } | ||
2032 | switch (*bankh) { | ||
2033 | default: | ||
2034 | case 1: | ||
2035 | *bankh = ADDR_SURF_BANK_HEIGHT_1; | ||
2036 | break; | ||
2037 | case 2: | ||
2038 | *bankh = ADDR_SURF_BANK_HEIGHT_2; | ||
2039 | break; | ||
2040 | case 4: | ||
2041 | *bankh = ADDR_SURF_BANK_HEIGHT_4; | ||
2042 | break; | ||
2043 | case 8: | ||
2044 | *bankh = ADDR_SURF_BANK_HEIGHT_8; | ||
2045 | break; | ||
2046 | } | ||
2047 | switch (*mtaspect) { | ||
2048 | default: | ||
2049 | case 1: | ||
2050 | *mtaspect = ADDR_SURF_MACRO_ASPECT_1; | ||
2051 | break; | ||
2052 | case 2: | ||
2053 | *mtaspect = ADDR_SURF_MACRO_ASPECT_2; | ||
2054 | break; | ||
2055 | case 4: | ||
2056 | *mtaspect = ADDR_SURF_MACRO_ASPECT_4; | ||
2057 | break; | ||
2058 | case 8: | ||
2059 | *mtaspect = ADDR_SURF_MACRO_ASPECT_8; | ||
2060 | break; | ||
2061 | } | ||
2062 | } | ||
2063 | |||
2064 | static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, | ||
2065 | struct drm_framebuffer *fb, | ||
2066 | int x, int y, int atomic) | ||
2067 | { | ||
2068 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2069 | struct drm_device *dev = crtc->dev; | ||
2070 | struct amdgpu_device *adev = dev->dev_private; | ||
2071 | struct amdgpu_framebuffer *amdgpu_fb; | ||
2072 | struct drm_framebuffer *target_fb; | ||
2073 | struct drm_gem_object *obj; | ||
2074 | struct amdgpu_bo *rbo; | ||
2075 | uint64_t fb_location, tiling_flags; | ||
2076 | uint32_t fb_format, fb_pitch_pixels; | ||
2077 | unsigned bankw, bankh, mtaspect, tile_split; | ||
2078 | u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); | ||
2079 | /* XXX change to VI */ | ||
2080 | u32 pipe_config = (adev->gfx.config.tile_mode_array[10] >> 6) & 0x1f; | ||
2081 | u32 tmp, viewport_w, viewport_h; | ||
2082 | int r; | ||
2083 | bool bypass_lut = false; | ||
2084 | |||
2085 | /* no fb bound */ | ||
2086 | if (!atomic && !crtc->primary->fb) { | ||
2087 | DRM_DEBUG_KMS("No FB bound\n"); | ||
2088 | return 0; | ||
2089 | } | ||
2090 | |||
2091 | if (atomic) { | ||
2092 | amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
2093 | target_fb = fb; | ||
2094 | } | ||
2095 | else { | ||
2096 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
2097 | target_fb = crtc->primary->fb; | ||
2098 | } | ||
2099 | |||
2100 | /* If atomic, assume fb object is pinned & idle & fenced and | ||
2101 | * just update base pointers | ||
2102 | */ | ||
2103 | obj = amdgpu_fb->obj; | ||
2104 | rbo = gem_to_amdgpu_bo(obj); | ||
2105 | r = amdgpu_bo_reserve(rbo, false); | ||
2106 | if (unlikely(r != 0)) | ||
2107 | return r; | ||
2108 | |||
2109 | if (atomic) | ||
2110 | fb_location = amdgpu_bo_gpu_offset(rbo); | ||
2111 | else { | ||
2112 | r = amdgpu_bo_pin(rbo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location); | ||
2113 | if (unlikely(r != 0)) { | ||
2114 | amdgpu_bo_unreserve(rbo); | ||
2115 | return -EINVAL; | ||
2116 | } | ||
2117 | } | ||
2118 | |||
2119 | amdgpu_bo_get_tiling_flags(rbo, &tiling_flags); | ||
2120 | amdgpu_bo_unreserve(rbo); | ||
2121 | |||
2122 | switch (target_fb->pixel_format) { | ||
2123 | case DRM_FORMAT_C8: | ||
2124 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); | ||
2125 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2126 | break; | ||
2127 | case DRM_FORMAT_XRGB4444: | ||
2128 | case DRM_FORMAT_ARGB4444: | ||
2129 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2130 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); | ||
2131 | #ifdef __BIG_ENDIAN | ||
2132 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2133 | ENDIAN_8IN16); | ||
2134 | #endif | ||
2135 | break; | ||
2136 | case DRM_FORMAT_XRGB1555: | ||
2137 | case DRM_FORMAT_ARGB1555: | ||
2138 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2139 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2140 | #ifdef __BIG_ENDIAN | ||
2141 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2142 | ENDIAN_8IN16); | ||
2143 | #endif | ||
2144 | break; | ||
2145 | case DRM_FORMAT_BGRX5551: | ||
2146 | case DRM_FORMAT_BGRA5551: | ||
2147 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2148 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); | ||
2149 | #ifdef __BIG_ENDIAN | ||
2150 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2151 | ENDIAN_8IN16); | ||
2152 | #endif | ||
2153 | break; | ||
2154 | case DRM_FORMAT_RGB565: | ||
2155 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); | ||
2156 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); | ||
2157 | #ifdef __BIG_ENDIAN | ||
2158 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2159 | ENDIAN_8IN16); | ||
2160 | #endif | ||
2161 | break; | ||
2162 | case DRM_FORMAT_XRGB8888: | ||
2163 | case DRM_FORMAT_ARGB8888: | ||
2164 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2165 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); | ||
2166 | #ifdef __BIG_ENDIAN | ||
2167 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2168 | ENDIAN_8IN32); | ||
2169 | #endif | ||
2170 | break; | ||
2171 | case DRM_FORMAT_XRGB2101010: | ||
2172 | case DRM_FORMAT_ARGB2101010: | ||
2173 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2174 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); | ||
2175 | #ifdef __BIG_ENDIAN | ||
2176 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2177 | ENDIAN_8IN32); | ||
2178 | #endif | ||
2179 | /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ | ||
2180 | bypass_lut = true; | ||
2181 | break; | ||
2182 | case DRM_FORMAT_BGRX1010102: | ||
2183 | case DRM_FORMAT_BGRA1010102: | ||
2184 | fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); | ||
2185 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); | ||
2186 | #ifdef __BIG_ENDIAN | ||
2187 | fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, | ||
2188 | ENDIAN_8IN32); | ||
2189 | #endif | ||
2190 | /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ | ||
2191 | bypass_lut = true; | ||
2192 | break; | ||
2193 | default: | ||
2194 | DRM_ERROR("Unsupported screen format %s\n", | ||
2195 | drm_get_format_name(target_fb->pixel_format)); | ||
2196 | return -EINVAL; | ||
2197 | } | ||
2198 | |||
2199 | if (tiling_flags & AMDGPU_TILING_MACRO) { | ||
2200 | unsigned tileb, index, num_banks, tile_split_bytes; | ||
2201 | |||
2202 | dce_v11_0_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split); | ||
2203 | /* Set NUM_BANKS. */ | ||
2204 | /* Calculate the macrotile mode index. */ | ||
2205 | tile_split_bytes = 64 << tile_split; | ||
2206 | tileb = 8 * 8 * target_fb->bits_per_pixel / 8; | ||
2207 | tileb = min(tile_split_bytes, tileb); | ||
2208 | |||
2209 | for (index = 0; tileb > 64; index++) { | ||
2210 | tileb >>= 1; | ||
2211 | } | ||
2212 | |||
2213 | if (index >= 16) { | ||
2214 | DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", | ||
2215 | target_fb->bits_per_pixel, tile_split); | ||
2216 | return -EINVAL; | ||
2217 | } | ||
2218 | |||
2219 | /* XXX fix me for VI */ | ||
2220 | num_banks = (adev->gfx.config.macrotile_mode_array[index] >> 6) & 0x3; | ||
2221 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); | ||
2222 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, | ||
2223 | ARRAY_2D_TILED_THIN1); | ||
2224 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, | ||
2225 | tile_split); | ||
2226 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); | ||
2227 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); | ||
2228 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, | ||
2229 | mtaspect); | ||
2230 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, | ||
2231 | ADDR_SURF_MICRO_TILING_DISPLAY); | ||
2232 | } else if (tiling_flags & AMDGPU_TILING_MICRO) { | ||
2233 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, | ||
2234 | ARRAY_1D_TILED_THIN1); | ||
2235 | } | ||
2236 | |||
2237 | /* Read the pipe config from the 2D TILED SCANOUT mode. | ||
2238 | * It should be the same for the other modes too, but not all | ||
2239 | * modes set the pipe config field. */ | ||
2240 | fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, | ||
2241 | pipe_config); | ||
2242 | |||
2243 | dce_v11_0_vga_enable(crtc, false); | ||
2244 | |||
2245 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2246 | upper_32_bits(fb_location)); | ||
2247 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2248 | upper_32_bits(fb_location)); | ||
2249 | WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2250 | (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); | ||
2251 | WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2252 | (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); | ||
2253 | WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); | ||
2254 | WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); | ||
2255 | |||
2256 | /* | ||
2257 | * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT | ||
2258 | * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to | ||
2259 | * retain the full precision throughout the pipeline. | ||
2260 | */ | ||
2261 | tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); | ||
2262 | if (bypass_lut) | ||
2263 | tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); | ||
2264 | else | ||
2265 | tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); | ||
2266 | WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); | ||
2267 | |||
2268 | if (bypass_lut) | ||
2269 | DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); | ||
2270 | |||
2271 | WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); | ||
2272 | WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); | ||
2273 | WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); | ||
2274 | WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); | ||
2275 | WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); | ||
2276 | WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); | ||
2277 | |||
2278 | fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); | ||
2279 | WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); | ||
2280 | |||
2281 | dce_v11_0_grph_enable(crtc, true); | ||
2282 | |||
2283 | WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, | ||
2284 | target_fb->height); | ||
2285 | |||
2286 | x &= ~3; | ||
2287 | y &= ~1; | ||
2288 | WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, | ||
2289 | (x << 16) | y); | ||
2290 | viewport_w = crtc->mode.hdisplay; | ||
2291 | viewport_h = (crtc->mode.vdisplay + 1) & ~1; | ||
2292 | WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, | ||
2293 | (viewport_w << 16) | viewport_h); | ||
2294 | |||
2295 | /* pageflip setup */ | ||
2296 | /* make sure flip is at vb rather than hb */ | ||
2297 | tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); | ||
2298 | tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, | ||
2299 | GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); | ||
2300 | WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2301 | |||
2302 | /* set pageflip to happen only at start of vblank interval (front porch) */ | ||
2303 | WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); | ||
2304 | |||
2305 | if (!atomic && fb && fb != crtc->primary->fb) { | ||
2306 | amdgpu_fb = to_amdgpu_framebuffer(fb); | ||
2307 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | ||
2308 | r = amdgpu_bo_reserve(rbo, false); | ||
2309 | if (unlikely(r != 0)) | ||
2310 | return r; | ||
2311 | amdgpu_bo_unpin(rbo); | ||
2312 | amdgpu_bo_unreserve(rbo); | ||
2313 | } | ||
2314 | |||
2315 | /* Bytes per pixel may have changed */ | ||
2316 | dce_v11_0_bandwidth_update(adev); | ||
2317 | |||
2318 | return 0; | ||
2319 | } | ||
2320 | |||
2321 | static void dce_v11_0_set_interleave(struct drm_crtc *crtc, | ||
2322 | struct drm_display_mode *mode) | ||
2323 | { | ||
2324 | struct drm_device *dev = crtc->dev; | ||
2325 | struct amdgpu_device *adev = dev->dev_private; | ||
2326 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2327 | u32 tmp; | ||
2328 | |||
2329 | tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); | ||
2330 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | ||
2331 | tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); | ||
2332 | else | ||
2333 | tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); | ||
2334 | WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); | ||
2335 | } | ||
2336 | |||
2337 | static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) | ||
2338 | { | ||
2339 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2340 | struct drm_device *dev = crtc->dev; | ||
2341 | struct amdgpu_device *adev = dev->dev_private; | ||
2342 | int i; | ||
2343 | u32 tmp; | ||
2344 | |||
2345 | DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); | ||
2346 | |||
2347 | tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); | ||
2348 | tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); | ||
2349 | WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2350 | |||
2351 | tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); | ||
2352 | tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); | ||
2353 | WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2354 | |||
2355 | tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2356 | tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); | ||
2357 | WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2358 | |||
2359 | WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); | ||
2360 | |||
2361 | WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); | ||
2362 | WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); | ||
2363 | WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); | ||
2364 | |||
2365 | WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); | ||
2366 | WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); | ||
2367 | WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); | ||
2368 | |||
2369 | WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); | ||
2370 | WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); | ||
2371 | |||
2372 | WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); | ||
2373 | for (i = 0; i < 256; i++) { | ||
2374 | WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, | ||
2375 | (amdgpu_crtc->lut_r[i] << 20) | | ||
2376 | (amdgpu_crtc->lut_g[i] << 10) | | ||
2377 | (amdgpu_crtc->lut_b[i] << 0)); | ||
2378 | } | ||
2379 | |||
2380 | tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2381 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); | ||
2382 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); | ||
2383 | tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); | ||
2384 | WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2385 | |||
2386 | tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); | ||
2387 | tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); | ||
2388 | WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2389 | |||
2390 | tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2391 | tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); | ||
2392 | WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2393 | |||
2394 | tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); | ||
2395 | tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); | ||
2396 | WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2397 | |||
2398 | /* XXX match this to the depth of the crtc fmt block, move to modeset? */ | ||
2399 | WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); | ||
2400 | /* XXX this only needs to be programmed once per crtc at startup, | ||
2401 | * not sure where the best place for it is | ||
2402 | */ | ||
2403 | tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); | ||
2404 | tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); | ||
2405 | WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2406 | } | ||
2407 | |||
2408 | static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) | ||
2409 | { | ||
2410 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
2411 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
2412 | |||
2413 | switch (amdgpu_encoder->encoder_id) { | ||
2414 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
2415 | if (dig->linkb) | ||
2416 | return 1; | ||
2417 | else | ||
2418 | return 0; | ||
2419 | break; | ||
2420 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
2421 | if (dig->linkb) | ||
2422 | return 3; | ||
2423 | else | ||
2424 | return 2; | ||
2425 | break; | ||
2426 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
2427 | if (dig->linkb) | ||
2428 | return 5; | ||
2429 | else | ||
2430 | return 4; | ||
2431 | break; | ||
2432 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
2433 | return 6; | ||
2434 | break; | ||
2435 | default: | ||
2436 | DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); | ||
2437 | return 0; | ||
2438 | } | ||
2439 | } | ||
2440 | |||
2441 | /** | ||
2442 | * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. | ||
2443 | * | ||
2444 | * @crtc: drm crtc | ||
2445 | * | ||
2446 | * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors | ||
2447 | * a single PPLL can be used for all DP crtcs/encoders. For non-DP | ||
2448 | * monitors a dedicated PPLL must be used. If a particular board has | ||
2449 | * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming | ||
2450 | * as there is no need to program the PLL itself. If we are not able to | ||
2451 | * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to | ||
2452 | * avoid messing up an existing monitor. | ||
2453 | * | ||
2454 | * Asic specific PLL information | ||
2455 | * | ||
2456 | * DCE 10.x | ||
2457 | * Tonga | ||
2458 | * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) | ||
2459 | * CI | ||
2460 | * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC | ||
2461 | * | ||
2462 | */ | ||
2463 | static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) | ||
2464 | { | ||
2465 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2466 | struct drm_device *dev = crtc->dev; | ||
2467 | struct amdgpu_device *adev = dev->dev_private; | ||
2468 | u32 pll_in_use; | ||
2469 | int pll; | ||
2470 | |||
2471 | if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { | ||
2472 | if (adev->clock.dp_extclk) | ||
2473 | /* skip PPLL programming if using ext clock */ | ||
2474 | return ATOM_PPLL_INVALID; | ||
2475 | else { | ||
2476 | /* use the same PPLL for all DP monitors */ | ||
2477 | pll = amdgpu_pll_get_shared_dp_ppll(crtc); | ||
2478 | if (pll != ATOM_PPLL_INVALID) | ||
2479 | return pll; | ||
2480 | } | ||
2481 | } else { | ||
2482 | /* use the same PPLL for all monitors with the same clock */ | ||
2483 | pll = amdgpu_pll_get_shared_nondp_ppll(crtc); | ||
2484 | if (pll != ATOM_PPLL_INVALID) | ||
2485 | return pll; | ||
2486 | } | ||
2487 | |||
2488 | /* XXX need to determine what plls are available on each DCE11 part */ | ||
2489 | pll_in_use = amdgpu_pll_get_use_mask(crtc); | ||
2490 | if (adev->asic_type == CHIP_CARRIZO) { | ||
2491 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | ||
2492 | return ATOM_PPLL1; | ||
2493 | if (!(pll_in_use & (1 << ATOM_PPLL0))) | ||
2494 | return ATOM_PPLL0; | ||
2495 | DRM_ERROR("unable to allocate a PPLL\n"); | ||
2496 | return ATOM_PPLL_INVALID; | ||
2497 | } else { | ||
2498 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
2499 | return ATOM_PPLL2; | ||
2500 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | ||
2501 | return ATOM_PPLL1; | ||
2502 | if (!(pll_in_use & (1 << ATOM_PPLL0))) | ||
2503 | return ATOM_PPLL0; | ||
2504 | DRM_ERROR("unable to allocate a PPLL\n"); | ||
2505 | return ATOM_PPLL_INVALID; | ||
2506 | } | ||
2507 | return ATOM_PPLL_INVALID; | ||
2508 | } | ||
2509 | |||
2510 | static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) | ||
2511 | { | ||
2512 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2513 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2514 | uint32_t cur_lock; | ||
2515 | |||
2516 | cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); | ||
2517 | if (lock) | ||
2518 | cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); | ||
2519 | else | ||
2520 | cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); | ||
2521 | WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); | ||
2522 | } | ||
2523 | |||
2524 | static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) | ||
2525 | { | ||
2526 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2527 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2528 | u32 tmp; | ||
2529 | |||
2530 | tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); | ||
2531 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); | ||
2532 | WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2533 | } | ||
2534 | |||
2535 | static void dce_v11_0_show_cursor(struct drm_crtc *crtc) | ||
2536 | { | ||
2537 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2538 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2539 | u32 tmp; | ||
2540 | |||
2541 | tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); | ||
2542 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); | ||
2543 | tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); | ||
2544 | WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); | ||
2545 | } | ||
2546 | |||
2547 | static void dce_v11_0_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, | ||
2548 | uint64_t gpu_addr) | ||
2549 | { | ||
2550 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2551 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2552 | |||
2553 | WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, | ||
2554 | upper_32_bits(gpu_addr)); | ||
2555 | WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, | ||
2556 | lower_32_bits(gpu_addr)); | ||
2557 | } | ||
2558 | |||
2559 | static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, | ||
2560 | int x, int y) | ||
2561 | { | ||
2562 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2563 | struct amdgpu_device *adev = crtc->dev->dev_private; | ||
2564 | int xorigin = 0, yorigin = 0; | ||
2565 | |||
2566 | /* avivo cursor are offset into the total surface */ | ||
2567 | x += crtc->x; | ||
2568 | y += crtc->y; | ||
2569 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
2570 | |||
2571 | if (x < 0) { | ||
2572 | xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); | ||
2573 | x = 0; | ||
2574 | } | ||
2575 | if (y < 0) { | ||
2576 | yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); | ||
2577 | y = 0; | ||
2578 | } | ||
2579 | |||
2580 | dce_v11_0_lock_cursor(crtc, true); | ||
2581 | WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); | ||
2582 | WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); | ||
2583 | WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, | ||
2584 | ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); | ||
2585 | dce_v11_0_lock_cursor(crtc, false); | ||
2586 | |||
2587 | return 0; | ||
2588 | } | ||
2589 | |||
2590 | static int dce_v11_0_crtc_cursor_set(struct drm_crtc *crtc, | ||
2591 | struct drm_file *file_priv, | ||
2592 | uint32_t handle, | ||
2593 | uint32_t width, | ||
2594 | uint32_t height) | ||
2595 | { | ||
2596 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2597 | struct drm_gem_object *obj; | ||
2598 | struct amdgpu_bo *robj; | ||
2599 | uint64_t gpu_addr; | ||
2600 | int ret; | ||
2601 | |||
2602 | if (!handle) { | ||
2603 | /* turn off cursor */ | ||
2604 | dce_v11_0_hide_cursor(crtc); | ||
2605 | obj = NULL; | ||
2606 | goto unpin; | ||
2607 | } | ||
2608 | |||
2609 | if ((width > amdgpu_crtc->max_cursor_width) || | ||
2610 | (height > amdgpu_crtc->max_cursor_height)) { | ||
2611 | DRM_ERROR("bad cursor width or height %d x %d\n", width, height); | ||
2612 | return -EINVAL; | ||
2613 | } | ||
2614 | |||
2615 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | ||
2616 | if (!obj) { | ||
2617 | DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); | ||
2618 | return -ENOENT; | ||
2619 | } | ||
2620 | |||
2621 | robj = gem_to_amdgpu_bo(obj); | ||
2622 | ret = amdgpu_bo_reserve(robj, false); | ||
2623 | if (unlikely(ret != 0)) | ||
2624 | goto fail; | ||
2625 | ret = amdgpu_bo_pin_restricted(robj, AMDGPU_GEM_DOMAIN_VRAM, | ||
2626 | 0, &gpu_addr); | ||
2627 | amdgpu_bo_unreserve(robj); | ||
2628 | if (ret) | ||
2629 | goto fail; | ||
2630 | |||
2631 | amdgpu_crtc->cursor_width = width; | ||
2632 | amdgpu_crtc->cursor_height = height; | ||
2633 | |||
2634 | dce_v11_0_lock_cursor(crtc, true); | ||
2635 | dce_v11_0_set_cursor(crtc, obj, gpu_addr); | ||
2636 | dce_v11_0_show_cursor(crtc); | ||
2637 | dce_v11_0_lock_cursor(crtc, false); | ||
2638 | |||
2639 | unpin: | ||
2640 | if (amdgpu_crtc->cursor_bo) { | ||
2641 | robj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); | ||
2642 | ret = amdgpu_bo_reserve(robj, false); | ||
2643 | if (likely(ret == 0)) { | ||
2644 | amdgpu_bo_unpin(robj); | ||
2645 | amdgpu_bo_unreserve(robj); | ||
2646 | } | ||
2647 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | ||
2648 | } | ||
2649 | |||
2650 | amdgpu_crtc->cursor_bo = obj; | ||
2651 | return 0; | ||
2652 | fail: | ||
2653 | drm_gem_object_unreference_unlocked(obj); | ||
2654 | |||
2655 | return ret; | ||
2656 | } | ||
2657 | |||
2658 | static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
2659 | u16 *blue, uint32_t start, uint32_t size) | ||
2660 | { | ||
2661 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2662 | int end = (start + size > 256) ? 256 : start + size, i; | ||
2663 | |||
2664 | /* userspace palettes are always correct as is */ | ||
2665 | for (i = start; i < end; i++) { | ||
2666 | amdgpu_crtc->lut_r[i] = red[i] >> 6; | ||
2667 | amdgpu_crtc->lut_g[i] = green[i] >> 6; | ||
2668 | amdgpu_crtc->lut_b[i] = blue[i] >> 6; | ||
2669 | } | ||
2670 | dce_v11_0_crtc_load_lut(crtc); | ||
2671 | } | ||
2672 | |||
2673 | static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) | ||
2674 | { | ||
2675 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2676 | |||
2677 | drm_crtc_cleanup(crtc); | ||
2678 | destroy_workqueue(amdgpu_crtc->pflip_queue); | ||
2679 | kfree(amdgpu_crtc); | ||
2680 | } | ||
2681 | |||
2682 | static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { | ||
2683 | .cursor_set = dce_v11_0_crtc_cursor_set, | ||
2684 | .cursor_move = dce_v11_0_crtc_cursor_move, | ||
2685 | .gamma_set = dce_v11_0_crtc_gamma_set, | ||
2686 | .set_config = amdgpu_crtc_set_config, | ||
2687 | .destroy = dce_v11_0_crtc_destroy, | ||
2688 | .page_flip = amdgpu_crtc_page_flip, | ||
2689 | }; | ||
2690 | |||
2691 | static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
2692 | { | ||
2693 | struct drm_device *dev = crtc->dev; | ||
2694 | struct amdgpu_device *adev = dev->dev_private; | ||
2695 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2696 | |||
2697 | switch (mode) { | ||
2698 | case DRM_MODE_DPMS_ON: | ||
2699 | amdgpu_crtc->enabled = true; | ||
2700 | amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); | ||
2701 | dce_v11_0_vga_enable(crtc, true); | ||
2702 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | ||
2703 | dce_v11_0_vga_enable(crtc, false); | ||
2704 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | ||
2705 | dce_v11_0_crtc_load_lut(crtc); | ||
2706 | break; | ||
2707 | case DRM_MODE_DPMS_STANDBY: | ||
2708 | case DRM_MODE_DPMS_SUSPEND: | ||
2709 | case DRM_MODE_DPMS_OFF: | ||
2710 | drm_vblank_pre_modeset(dev, amdgpu_crtc->crtc_id); | ||
2711 | if (amdgpu_crtc->enabled) { | ||
2712 | dce_v11_0_vga_enable(crtc, true); | ||
2713 | amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); | ||
2714 | dce_v11_0_vga_enable(crtc, false); | ||
2715 | } | ||
2716 | amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); | ||
2717 | amdgpu_crtc->enabled = false; | ||
2718 | break; | ||
2719 | } | ||
2720 | /* adjust pm to dpms */ | ||
2721 | amdgpu_pm_compute_clocks(adev); | ||
2722 | } | ||
2723 | |||
2724 | static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) | ||
2725 | { | ||
2726 | /* disable crtc pair power gating before programming */ | ||
2727 | amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); | ||
2728 | amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); | ||
2729 | dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
2730 | } | ||
2731 | |||
2732 | static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) | ||
2733 | { | ||
2734 | dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | ||
2735 | amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); | ||
2736 | } | ||
2737 | |||
2738 | static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) | ||
2739 | { | ||
2740 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2741 | struct drm_device *dev = crtc->dev; | ||
2742 | struct amdgpu_device *adev = dev->dev_private; | ||
2743 | struct amdgpu_atom_ss ss; | ||
2744 | int i; | ||
2745 | |||
2746 | dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | ||
2747 | if (crtc->primary->fb) { | ||
2748 | int r; | ||
2749 | struct amdgpu_framebuffer *amdgpu_fb; | ||
2750 | struct amdgpu_bo *rbo; | ||
2751 | |||
2752 | amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | ||
2753 | rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); | ||
2754 | r = amdgpu_bo_reserve(rbo, false); | ||
2755 | if (unlikely(r)) | ||
2756 | DRM_ERROR("failed to reserve rbo before unpin\n"); | ||
2757 | else { | ||
2758 | amdgpu_bo_unpin(rbo); | ||
2759 | amdgpu_bo_unreserve(rbo); | ||
2760 | } | ||
2761 | } | ||
2762 | /* disable the GRPH */ | ||
2763 | dce_v11_0_grph_enable(crtc, false); | ||
2764 | |||
2765 | amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); | ||
2766 | |||
2767 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
2768 | if (adev->mode_info.crtcs[i] && | ||
2769 | adev->mode_info.crtcs[i]->enabled && | ||
2770 | i != amdgpu_crtc->crtc_id && | ||
2771 | amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { | ||
2772 | /* one other crtc is using this pll don't turn | ||
2773 | * off the pll | ||
2774 | */ | ||
2775 | goto done; | ||
2776 | } | ||
2777 | } | ||
2778 | |||
2779 | switch (amdgpu_crtc->pll_id) { | ||
2780 | case ATOM_PPLL0: | ||
2781 | case ATOM_PPLL1: | ||
2782 | case ATOM_PPLL2: | ||
2783 | /* disable the ppll */ | ||
2784 | amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, | ||
2785 | 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); | ||
2786 | break; | ||
2787 | default: | ||
2788 | break; | ||
2789 | } | ||
2790 | done: | ||
2791 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | ||
2792 | amdgpu_crtc->adjusted_clock = 0; | ||
2793 | amdgpu_crtc->encoder = NULL; | ||
2794 | amdgpu_crtc->connector = NULL; | ||
2795 | } | ||
2796 | |||
2797 | static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, | ||
2798 | struct drm_display_mode *mode, | ||
2799 | struct drm_display_mode *adjusted_mode, | ||
2800 | int x, int y, struct drm_framebuffer *old_fb) | ||
2801 | { | ||
2802 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2803 | |||
2804 | if (!amdgpu_crtc->adjusted_clock) | ||
2805 | return -EINVAL; | ||
2806 | |||
2807 | amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); | ||
2808 | amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); | ||
2809 | dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); | ||
2810 | amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); | ||
2811 | amdgpu_atombios_crtc_scaler_setup(crtc); | ||
2812 | /* update the hw version fpr dpm */ | ||
2813 | amdgpu_crtc->hw_mode = *adjusted_mode; | ||
2814 | |||
2815 | return 0; | ||
2816 | } | ||
2817 | |||
2818 | static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, | ||
2819 | const struct drm_display_mode *mode, | ||
2820 | struct drm_display_mode *adjusted_mode) | ||
2821 | { | ||
2822 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | ||
2823 | struct drm_device *dev = crtc->dev; | ||
2824 | struct drm_encoder *encoder; | ||
2825 | |||
2826 | /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ | ||
2827 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
2828 | if (encoder->crtc == crtc) { | ||
2829 | amdgpu_crtc->encoder = encoder; | ||
2830 | amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); | ||
2831 | break; | ||
2832 | } | ||
2833 | } | ||
2834 | if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { | ||
2835 | amdgpu_crtc->encoder = NULL; | ||
2836 | amdgpu_crtc->connector = NULL; | ||
2837 | return false; | ||
2838 | } | ||
2839 | if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) | ||
2840 | return false; | ||
2841 | if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) | ||
2842 | return false; | ||
2843 | /* pick pll */ | ||
2844 | amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); | ||
2845 | /* if we can't get a PPLL for a non-DP encoder, fail */ | ||
2846 | if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && | ||
2847 | !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) | ||
2848 | return false; | ||
2849 | |||
2850 | return true; | ||
2851 | } | ||
2852 | |||
2853 | static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, | ||
2854 | struct drm_framebuffer *old_fb) | ||
2855 | { | ||
2856 | return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); | ||
2857 | } | ||
2858 | |||
2859 | static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, | ||
2860 | struct drm_framebuffer *fb, | ||
2861 | int x, int y, enum mode_set_atomic state) | ||
2862 | { | ||
2863 | return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); | ||
2864 | } | ||
2865 | |||
2866 | static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { | ||
2867 | .dpms = dce_v11_0_crtc_dpms, | ||
2868 | .mode_fixup = dce_v11_0_crtc_mode_fixup, | ||
2869 | .mode_set = dce_v11_0_crtc_mode_set, | ||
2870 | .mode_set_base = dce_v11_0_crtc_set_base, | ||
2871 | .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, | ||
2872 | .prepare = dce_v11_0_crtc_prepare, | ||
2873 | .commit = dce_v11_0_crtc_commit, | ||
2874 | .load_lut = dce_v11_0_crtc_load_lut, | ||
2875 | .disable = dce_v11_0_crtc_disable, | ||
2876 | }; | ||
2877 | |||
2878 | static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) | ||
2879 | { | ||
2880 | struct amdgpu_crtc *amdgpu_crtc; | ||
2881 | int i; | ||
2882 | |||
2883 | amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + | ||
2884 | (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); | ||
2885 | if (amdgpu_crtc == NULL) | ||
2886 | return -ENOMEM; | ||
2887 | |||
2888 | drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); | ||
2889 | |||
2890 | drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); | ||
2891 | amdgpu_crtc->crtc_id = index; | ||
2892 | amdgpu_crtc->pflip_queue = create_singlethread_workqueue("amdgpu-pageflip-queue"); | ||
2893 | adev->mode_info.crtcs[index] = amdgpu_crtc; | ||
2894 | |||
2895 | amdgpu_crtc->max_cursor_width = 128; | ||
2896 | amdgpu_crtc->max_cursor_height = 128; | ||
2897 | adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; | ||
2898 | adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; | ||
2899 | |||
2900 | for (i = 0; i < 256; i++) { | ||
2901 | amdgpu_crtc->lut_r[i] = i << 2; | ||
2902 | amdgpu_crtc->lut_g[i] = i << 2; | ||
2903 | amdgpu_crtc->lut_b[i] = i << 2; | ||
2904 | } | ||
2905 | |||
2906 | switch (amdgpu_crtc->crtc_id) { | ||
2907 | case 0: | ||
2908 | default: | ||
2909 | amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; | ||
2910 | break; | ||
2911 | case 1: | ||
2912 | amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; | ||
2913 | break; | ||
2914 | case 2: | ||
2915 | amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; | ||
2916 | break; | ||
2917 | case 3: | ||
2918 | amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; | ||
2919 | break; | ||
2920 | case 4: | ||
2921 | amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; | ||
2922 | break; | ||
2923 | case 5: | ||
2924 | amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; | ||
2925 | break; | ||
2926 | } | ||
2927 | |||
2928 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; | ||
2929 | amdgpu_crtc->adjusted_clock = 0; | ||
2930 | amdgpu_crtc->encoder = NULL; | ||
2931 | amdgpu_crtc->connector = NULL; | ||
2932 | drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); | ||
2933 | |||
2934 | return 0; | ||
2935 | } | ||
2936 | |||
2937 | static int dce_v11_0_early_init(struct amdgpu_device *adev) | ||
2938 | { | ||
2939 | adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; | ||
2940 | adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; | ||
2941 | |||
2942 | dce_v11_0_set_display_funcs(adev); | ||
2943 | dce_v11_0_set_irq_funcs(adev); | ||
2944 | |||
2945 | switch (adev->asic_type) { | ||
2946 | case CHIP_CARRIZO: | ||
2947 | adev->mode_info.num_crtc = 4; | ||
2948 | adev->mode_info.num_hpd = 6; | ||
2949 | adev->mode_info.num_dig = 9; | ||
2950 | break; | ||
2951 | default: | ||
2952 | /* FIXME: not supported yet */ | ||
2953 | return -EINVAL; | ||
2954 | } | ||
2955 | |||
2956 | return 0; | ||
2957 | } | ||
2958 | |||
2959 | static int dce_v11_0_sw_init(struct amdgpu_device *adev) | ||
2960 | { | ||
2961 | int r, i; | ||
2962 | |||
2963 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
2964 | r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq); | ||
2965 | if (r) | ||
2966 | return r; | ||
2967 | } | ||
2968 | |||
2969 | for (i = 8; i < 20; i += 2) { | ||
2970 | r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq); | ||
2971 | if (r) | ||
2972 | return r; | ||
2973 | } | ||
2974 | |||
2975 | /* HPD hotplug */ | ||
2976 | r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq); | ||
2977 | if (r) | ||
2978 | return r; | ||
2979 | |||
2980 | adev->mode_info.mode_config_initialized = true; | ||
2981 | |||
2982 | adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; | ||
2983 | |||
2984 | adev->ddev->mode_config.max_width = 16384; | ||
2985 | adev->ddev->mode_config.max_height = 16384; | ||
2986 | |||
2987 | adev->ddev->mode_config.preferred_depth = 24; | ||
2988 | adev->ddev->mode_config.prefer_shadow = 1; | ||
2989 | |||
2990 | adev->ddev->mode_config.fb_base = adev->mc.aper_base; | ||
2991 | |||
2992 | r = amdgpu_modeset_create_props(adev); | ||
2993 | if (r) | ||
2994 | return r; | ||
2995 | |||
2996 | adev->ddev->mode_config.max_width = 16384; | ||
2997 | adev->ddev->mode_config.max_height = 16384; | ||
2998 | |||
2999 | /* allocate crtcs */ | ||
3000 | for (i = 0; i < adev->mode_info.num_crtc; i++) { | ||
3001 | r = dce_v11_0_crtc_init(adev, i); | ||
3002 | if (r) | ||
3003 | return r; | ||
3004 | } | ||
3005 | |||
3006 | if (amdgpu_atombios_get_connector_info_from_object_table(adev)) | ||
3007 | amdgpu_print_display_setup(adev->ddev); | ||
3008 | else | ||
3009 | return -EINVAL; | ||
3010 | |||
3011 | /* setup afmt */ | ||
3012 | dce_v11_0_afmt_init(adev); | ||
3013 | |||
3014 | r = dce_v11_0_audio_init(adev); | ||
3015 | if (r) | ||
3016 | return r; | ||
3017 | |||
3018 | drm_kms_helper_poll_init(adev->ddev); | ||
3019 | |||
3020 | return r; | ||
3021 | } | ||
3022 | |||
3023 | static int dce_v11_0_sw_fini(struct amdgpu_device *adev) | ||
3024 | { | ||
3025 | kfree(adev->mode_info.bios_hardcoded_edid); | ||
3026 | |||
3027 | drm_kms_helper_poll_fini(adev->ddev); | ||
3028 | |||
3029 | dce_v11_0_audio_fini(adev); | ||
3030 | |||
3031 | dce_v11_0_afmt_fini(adev); | ||
3032 | |||
3033 | adev->mode_info.mode_config_initialized = false; | ||
3034 | |||
3035 | return 0; | ||
3036 | } | ||
3037 | |||
3038 | static int dce_v11_0_hw_init(struct amdgpu_device *adev) | ||
3039 | { | ||
3040 | int i; | ||
3041 | |||
3042 | dce_v11_0_init_golden_registers(adev); | ||
3043 | |||
3044 | /* init dig PHYs, disp eng pll */ | ||
3045 | amdgpu_atombios_encoder_init_dig(adev); | ||
3046 | amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); | ||
3047 | |||
3048 | /* initialize hpd */ | ||
3049 | dce_v11_0_hpd_init(adev); | ||
3050 | |||
3051 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
3052 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
3053 | } | ||
3054 | |||
3055 | return 0; | ||
3056 | } | ||
3057 | |||
3058 | static int dce_v11_0_hw_fini(struct amdgpu_device *adev) | ||
3059 | { | ||
3060 | int i; | ||
3061 | |||
3062 | dce_v11_0_hpd_fini(adev); | ||
3063 | |||
3064 | for (i = 0; i < adev->mode_info.audio.num_pins; i++) { | ||
3065 | dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); | ||
3066 | } | ||
3067 | |||
3068 | return 0; | ||
3069 | } | ||
3070 | |||
3071 | static int dce_v11_0_suspend(struct amdgpu_device *adev) | ||
3072 | { | ||
3073 | struct drm_connector *connector; | ||
3074 | |||
3075 | drm_kms_helper_poll_disable(adev->ddev); | ||
3076 | |||
3077 | /* turn off display hw */ | ||
3078 | list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { | ||
3079 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | ||
3080 | } | ||
3081 | |||
3082 | amdgpu_atombios_scratch_regs_save(adev); | ||
3083 | |||
3084 | dce_v11_0_hpd_fini(adev); | ||
3085 | |||
3086 | return 0; | ||
3087 | } | ||
3088 | |||
3089 | static int dce_v11_0_resume(struct amdgpu_device *adev) | ||
3090 | { | ||
3091 | struct drm_connector *connector; | ||
3092 | |||
3093 | dce_v11_0_init_golden_registers(adev); | ||
3094 | |||
3095 | amdgpu_atombios_scratch_regs_restore(adev); | ||
3096 | |||
3097 | /* init dig PHYs, disp eng pll */ | ||
3098 | amdgpu_atombios_crtc_powergate_init(adev); | ||
3099 | amdgpu_atombios_encoder_init_dig(adev); | ||
3100 | amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); | ||
3101 | /* turn on the BL */ | ||
3102 | if (adev->mode_info.bl_encoder) { | ||
3103 | u8 bl_level = amdgpu_display_backlight_get_level(adev, | ||
3104 | adev->mode_info.bl_encoder); | ||
3105 | amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, | ||
3106 | bl_level); | ||
3107 | } | ||
3108 | |||
3109 | /* initialize hpd */ | ||
3110 | dce_v11_0_hpd_init(adev); | ||
3111 | |||
3112 | /* blat the mode back in */ | ||
3113 | drm_helper_resume_force_mode(adev->ddev); | ||
3114 | /* turn on display hw */ | ||
3115 | list_for_each_entry(connector, &adev->ddev->mode_config.connector_list, head) { | ||
3116 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
3117 | } | ||
3118 | |||
3119 | drm_kms_helper_poll_enable(adev->ddev); | ||
3120 | |||
3121 | return 0; | ||
3122 | } | ||
3123 | |||
3124 | static bool dce_v11_0_is_idle(struct amdgpu_device *adev) | ||
3125 | { | ||
3126 | /* XXX todo */ | ||
3127 | return true; | ||
3128 | } | ||
3129 | |||
3130 | static int dce_v11_0_wait_for_idle(struct amdgpu_device *adev) | ||
3131 | { | ||
3132 | /* XXX todo */ | ||
3133 | return 0; | ||
3134 | } | ||
3135 | |||
3136 | static void dce_v11_0_print_status(struct amdgpu_device *adev) | ||
3137 | { | ||
3138 | dev_info(adev->dev, "DCE 10.x registers\n"); | ||
3139 | /* XXX todo */ | ||
3140 | } | ||
3141 | |||
3142 | static int dce_v11_0_soft_reset(struct amdgpu_device *adev) | ||
3143 | { | ||
3144 | u32 srbm_soft_reset = 0, tmp; | ||
3145 | |||
3146 | if (dce_v11_0_is_display_hung(adev)) | ||
3147 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; | ||
3148 | |||
3149 | if (srbm_soft_reset) { | ||
3150 | dce_v11_0_print_status(adev); | ||
3151 | |||
3152 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3153 | tmp |= srbm_soft_reset; | ||
3154 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
3155 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3156 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3157 | |||
3158 | udelay(50); | ||
3159 | |||
3160 | tmp &= ~srbm_soft_reset; | ||
3161 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3162 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3163 | |||
3164 | /* Wait a little for things to settle down */ | ||
3165 | udelay(50); | ||
3166 | dce_v11_0_print_status(adev); | ||
3167 | } | ||
3168 | return 0; | ||
3169 | } | ||
3170 | |||
3171 | static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, | ||
3172 | int crtc, | ||
3173 | enum amdgpu_interrupt_state state) | ||
3174 | { | ||
3175 | u32 lb_interrupt_mask; | ||
3176 | |||
3177 | if (crtc >= adev->mode_info.num_crtc) { | ||
3178 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3179 | return; | ||
3180 | } | ||
3181 | |||
3182 | switch (state) { | ||
3183 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3184 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3185 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3186 | VBLANK_INTERRUPT_MASK, 0); | ||
3187 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3188 | break; | ||
3189 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3190 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3191 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3192 | VBLANK_INTERRUPT_MASK, 1); | ||
3193 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3194 | break; | ||
3195 | default: | ||
3196 | break; | ||
3197 | } | ||
3198 | } | ||
3199 | |||
3200 | static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, | ||
3201 | int crtc, | ||
3202 | enum amdgpu_interrupt_state state) | ||
3203 | { | ||
3204 | u32 lb_interrupt_mask; | ||
3205 | |||
3206 | if (crtc >= adev->mode_info.num_crtc) { | ||
3207 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3208 | return; | ||
3209 | } | ||
3210 | |||
3211 | switch (state) { | ||
3212 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3213 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3214 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3215 | VLINE_INTERRUPT_MASK, 0); | ||
3216 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3217 | break; | ||
3218 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3219 | lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); | ||
3220 | lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, | ||
3221 | VLINE_INTERRUPT_MASK, 1); | ||
3222 | WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); | ||
3223 | break; | ||
3224 | default: | ||
3225 | break; | ||
3226 | } | ||
3227 | } | ||
3228 | |||
3229 | static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, | ||
3230 | struct amdgpu_irq_src *source, | ||
3231 | unsigned hpd, | ||
3232 | enum amdgpu_interrupt_state state) | ||
3233 | { | ||
3234 | u32 tmp; | ||
3235 | |||
3236 | if (hpd >= adev->mode_info.num_hpd) { | ||
3237 | DRM_DEBUG("invalid hdp %d\n", hpd); | ||
3238 | return 0; | ||
3239 | } | ||
3240 | |||
3241 | switch (state) { | ||
3242 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3243 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3244 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); | ||
3245 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3246 | break; | ||
3247 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3248 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3249 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); | ||
3250 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3251 | break; | ||
3252 | default: | ||
3253 | break; | ||
3254 | } | ||
3255 | |||
3256 | return 0; | ||
3257 | } | ||
3258 | |||
3259 | static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, | ||
3260 | struct amdgpu_irq_src *source, | ||
3261 | unsigned type, | ||
3262 | enum amdgpu_interrupt_state state) | ||
3263 | { | ||
3264 | switch (type) { | ||
3265 | case AMDGPU_CRTC_IRQ_VBLANK1: | ||
3266 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); | ||
3267 | break; | ||
3268 | case AMDGPU_CRTC_IRQ_VBLANK2: | ||
3269 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); | ||
3270 | break; | ||
3271 | case AMDGPU_CRTC_IRQ_VBLANK3: | ||
3272 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); | ||
3273 | break; | ||
3274 | case AMDGPU_CRTC_IRQ_VBLANK4: | ||
3275 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); | ||
3276 | break; | ||
3277 | case AMDGPU_CRTC_IRQ_VBLANK5: | ||
3278 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); | ||
3279 | break; | ||
3280 | case AMDGPU_CRTC_IRQ_VBLANK6: | ||
3281 | dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); | ||
3282 | break; | ||
3283 | case AMDGPU_CRTC_IRQ_VLINE1: | ||
3284 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); | ||
3285 | break; | ||
3286 | case AMDGPU_CRTC_IRQ_VLINE2: | ||
3287 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); | ||
3288 | break; | ||
3289 | case AMDGPU_CRTC_IRQ_VLINE3: | ||
3290 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); | ||
3291 | break; | ||
3292 | case AMDGPU_CRTC_IRQ_VLINE4: | ||
3293 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); | ||
3294 | break; | ||
3295 | case AMDGPU_CRTC_IRQ_VLINE5: | ||
3296 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); | ||
3297 | break; | ||
3298 | case AMDGPU_CRTC_IRQ_VLINE6: | ||
3299 | dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); | ||
3300 | break; | ||
3301 | default: | ||
3302 | break; | ||
3303 | } | ||
3304 | return 0; | ||
3305 | } | ||
3306 | |||
3307 | static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, | ||
3308 | struct amdgpu_irq_src *src, | ||
3309 | unsigned type, | ||
3310 | enum amdgpu_interrupt_state state) | ||
3311 | { | ||
3312 | u32 reg, reg_block; | ||
3313 | /* now deal with page flip IRQ */ | ||
3314 | switch (type) { | ||
3315 | case AMDGPU_PAGEFLIP_IRQ_D1: | ||
3316 | reg_block = CRTC0_REGISTER_OFFSET; | ||
3317 | break; | ||
3318 | case AMDGPU_PAGEFLIP_IRQ_D2: | ||
3319 | reg_block = CRTC1_REGISTER_OFFSET; | ||
3320 | break; | ||
3321 | case AMDGPU_PAGEFLIP_IRQ_D3: | ||
3322 | reg_block = CRTC2_REGISTER_OFFSET; | ||
3323 | break; | ||
3324 | case AMDGPU_PAGEFLIP_IRQ_D4: | ||
3325 | reg_block = CRTC3_REGISTER_OFFSET; | ||
3326 | break; | ||
3327 | case AMDGPU_PAGEFLIP_IRQ_D5: | ||
3328 | reg_block = CRTC4_REGISTER_OFFSET; | ||
3329 | break; | ||
3330 | case AMDGPU_PAGEFLIP_IRQ_D6: | ||
3331 | reg_block = CRTC5_REGISTER_OFFSET; | ||
3332 | break; | ||
3333 | default: | ||
3334 | DRM_ERROR("invalid pageflip crtc %d\n", type); | ||
3335 | return -EINVAL; | ||
3336 | } | ||
3337 | |||
3338 | reg = RREG32(mmGRPH_INTERRUPT_CONTROL + reg_block); | ||
3339 | if (state == AMDGPU_IRQ_STATE_DISABLE) | ||
3340 | WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); | ||
3341 | else | ||
3342 | WREG32(mmGRPH_INTERRUPT_CONTROL + reg_block, reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); | ||
3343 | |||
3344 | return 0; | ||
3345 | } | ||
3346 | |||
3347 | static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, | ||
3348 | struct amdgpu_irq_src *source, | ||
3349 | struct amdgpu_iv_entry *entry) | ||
3350 | { | ||
3351 | int reg_block; | ||
3352 | unsigned long flags; | ||
3353 | unsigned crtc_id; | ||
3354 | struct amdgpu_crtc *amdgpu_crtc; | ||
3355 | struct amdgpu_flip_work *works; | ||
3356 | |||
3357 | crtc_id = (entry->src_id - 8) >> 1; | ||
3358 | amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; | ||
3359 | |||
3360 | /* ack the interrupt */ | ||
3361 | switch(crtc_id){ | ||
3362 | case AMDGPU_PAGEFLIP_IRQ_D1: | ||
3363 | reg_block = CRTC0_REGISTER_OFFSET; | ||
3364 | break; | ||
3365 | case AMDGPU_PAGEFLIP_IRQ_D2: | ||
3366 | reg_block = CRTC1_REGISTER_OFFSET; | ||
3367 | break; | ||
3368 | case AMDGPU_PAGEFLIP_IRQ_D3: | ||
3369 | reg_block = CRTC2_REGISTER_OFFSET; | ||
3370 | break; | ||
3371 | case AMDGPU_PAGEFLIP_IRQ_D4: | ||
3372 | reg_block = CRTC3_REGISTER_OFFSET; | ||
3373 | break; | ||
3374 | case AMDGPU_PAGEFLIP_IRQ_D5: | ||
3375 | reg_block = CRTC4_REGISTER_OFFSET; | ||
3376 | break; | ||
3377 | case AMDGPU_PAGEFLIP_IRQ_D6: | ||
3378 | reg_block = CRTC5_REGISTER_OFFSET; | ||
3379 | break; | ||
3380 | default: | ||
3381 | DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); | ||
3382 | return -EINVAL; | ||
3383 | } | ||
3384 | |||
3385 | if (RREG32(mmGRPH_INTERRUPT_STATUS + reg_block) & GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) | ||
3386 | WREG32(mmGRPH_INTERRUPT_STATUS + reg_block, GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); | ||
3387 | |||
3388 | /* IRQ could occur when in initial stage */ | ||
3389 | if(amdgpu_crtc == NULL) | ||
3390 | return 0; | ||
3391 | |||
3392 | spin_lock_irqsave(&adev->ddev->event_lock, flags); | ||
3393 | works = amdgpu_crtc->pflip_works; | ||
3394 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ | ||
3395 | DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " | ||
3396 | "AMDGPU_FLIP_SUBMITTED(%d)\n", | ||
3397 | amdgpu_crtc->pflip_status, | ||
3398 | AMDGPU_FLIP_SUBMITTED); | ||
3399 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | ||
3400 | return 0; | ||
3401 | } | ||
3402 | |||
3403 | /* page flip completed. clean up */ | ||
3404 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; | ||
3405 | amdgpu_crtc->pflip_works = NULL; | ||
3406 | |||
3407 | /* wakeup usersapce */ | ||
3408 | if(works->event) | ||
3409 | drm_send_vblank_event(adev->ddev, crtc_id, works->event); | ||
3410 | |||
3411 | spin_unlock_irqrestore(&adev->ddev->event_lock, flags); | ||
3412 | |||
3413 | drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); | ||
3414 | amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id); | ||
3415 | queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work); | ||
3416 | |||
3417 | return 0; | ||
3418 | } | ||
3419 | |||
3420 | static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, | ||
3421 | int hpd) | ||
3422 | { | ||
3423 | u32 tmp; | ||
3424 | |||
3425 | if (hpd >= adev->mode_info.num_hpd) { | ||
3426 | DRM_DEBUG("invalid hdp %d\n", hpd); | ||
3427 | return; | ||
3428 | } | ||
3429 | |||
3430 | tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); | ||
3431 | tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); | ||
3432 | WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); | ||
3433 | } | ||
3434 | |||
3435 | static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, | ||
3436 | int crtc) | ||
3437 | { | ||
3438 | u32 tmp; | ||
3439 | |||
3440 | if (crtc >= adev->mode_info.num_crtc) { | ||
3441 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3442 | return; | ||
3443 | } | ||
3444 | |||
3445 | tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); | ||
3446 | tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); | ||
3447 | WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); | ||
3448 | } | ||
3449 | |||
3450 | static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, | ||
3451 | int crtc) | ||
3452 | { | ||
3453 | u32 tmp; | ||
3454 | |||
3455 | if (crtc >= adev->mode_info.num_crtc) { | ||
3456 | DRM_DEBUG("invalid crtc %d\n", crtc); | ||
3457 | return; | ||
3458 | } | ||
3459 | |||
3460 | tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); | ||
3461 | tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); | ||
3462 | WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); | ||
3463 | } | ||
3464 | |||
3465 | static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, | ||
3466 | struct amdgpu_irq_src *source, | ||
3467 | struct amdgpu_iv_entry *entry) | ||
3468 | { | ||
3469 | unsigned crtc = entry->src_id - 1; | ||
3470 | uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); | ||
3471 | unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); | ||
3472 | |||
3473 | switch (entry->src_data) { | ||
3474 | case 0: /* vblank */ | ||
3475 | if (disp_int & interrupt_status_offsets[crtc].vblank) { | ||
3476 | dce_v11_0_crtc_vblank_int_ack(adev, crtc); | ||
3477 | if (amdgpu_irq_enabled(adev, source, irq_type)) { | ||
3478 | drm_handle_vblank(adev->ddev, crtc); | ||
3479 | } | ||
3480 | DRM_DEBUG("IH: D%d vblank\n", crtc + 1); | ||
3481 | } | ||
3482 | break; | ||
3483 | case 1: /* vline */ | ||
3484 | if (disp_int & interrupt_status_offsets[crtc].vline) { | ||
3485 | dce_v11_0_crtc_vline_int_ack(adev, crtc); | ||
3486 | DRM_DEBUG("IH: D%d vline\n", crtc + 1); | ||
3487 | } | ||
3488 | break; | ||
3489 | default: | ||
3490 | DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); | ||
3491 | break; | ||
3492 | } | ||
3493 | |||
3494 | return 0; | ||
3495 | } | ||
3496 | |||
3497 | static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, | ||
3498 | struct amdgpu_irq_src *source, | ||
3499 | struct amdgpu_iv_entry *entry) | ||
3500 | { | ||
3501 | uint32_t disp_int, mask; | ||
3502 | unsigned hpd; | ||
3503 | |||
3504 | if (entry->src_data >= adev->mode_info.num_hpd) { | ||
3505 | DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); | ||
3506 | return 0; | ||
3507 | } | ||
3508 | |||
3509 | hpd = entry->src_data; | ||
3510 | disp_int = RREG32(interrupt_status_offsets[hpd].reg); | ||
3511 | mask = interrupt_status_offsets[hpd].hpd; | ||
3512 | |||
3513 | if (disp_int & mask) { | ||
3514 | dce_v11_0_hpd_int_ack(adev, hpd); | ||
3515 | schedule_work(&adev->hotplug_work); | ||
3516 | DRM_DEBUG("IH: HPD%d\n", hpd + 1); | ||
3517 | } | ||
3518 | |||
3519 | return 0; | ||
3520 | } | ||
3521 | |||
3522 | static int dce_v11_0_set_clockgating_state(struct amdgpu_device *adev, | ||
3523 | enum amdgpu_clockgating_state state) | ||
3524 | { | ||
3525 | return 0; | ||
3526 | } | ||
3527 | |||
3528 | static int dce_v11_0_set_powergating_state(struct amdgpu_device *adev, | ||
3529 | enum amdgpu_powergating_state state) | ||
3530 | { | ||
3531 | return 0; | ||
3532 | } | ||
3533 | |||
3534 | const struct amdgpu_ip_funcs dce_v11_0_ip_funcs = { | ||
3535 | .early_init = dce_v11_0_early_init, | ||
3536 | .late_init = NULL, | ||
3537 | .sw_init = dce_v11_0_sw_init, | ||
3538 | .sw_fini = dce_v11_0_sw_fini, | ||
3539 | .hw_init = dce_v11_0_hw_init, | ||
3540 | .hw_fini = dce_v11_0_hw_fini, | ||
3541 | .suspend = dce_v11_0_suspend, | ||
3542 | .resume = dce_v11_0_resume, | ||
3543 | .is_idle = dce_v11_0_is_idle, | ||
3544 | .wait_for_idle = dce_v11_0_wait_for_idle, | ||
3545 | .soft_reset = dce_v11_0_soft_reset, | ||
3546 | .print_status = dce_v11_0_print_status, | ||
3547 | .set_clockgating_state = dce_v11_0_set_clockgating_state, | ||
3548 | .set_powergating_state = dce_v11_0_set_powergating_state, | ||
3549 | }; | ||
3550 | |||
3551 | static void | ||
3552 | dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, | ||
3553 | struct drm_display_mode *mode, | ||
3554 | struct drm_display_mode *adjusted_mode) | ||
3555 | { | ||
3556 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3557 | |||
3558 | amdgpu_encoder->pixel_clock = adjusted_mode->clock; | ||
3559 | |||
3560 | /* need to call this here rather than in prepare() since we need some crtc info */ | ||
3561 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
3562 | |||
3563 | /* set scaler clears this on some chips */ | ||
3564 | dce_v11_0_set_interleave(encoder->crtc, mode); | ||
3565 | |||
3566 | if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { | ||
3567 | dce_v11_0_afmt_enable(encoder, true); | ||
3568 | dce_v11_0_afmt_setmode(encoder, adjusted_mode); | ||
3569 | } | ||
3570 | } | ||
3571 | |||
3572 | static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) | ||
3573 | { | ||
3574 | struct amdgpu_device *adev = encoder->dev->dev_private; | ||
3575 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3576 | struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); | ||
3577 | |||
3578 | if ((amdgpu_encoder->active_device & | ||
3579 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || | ||
3580 | (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != | ||
3581 | ENCODER_OBJECT_ID_NONE)) { | ||
3582 | struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; | ||
3583 | if (dig) { | ||
3584 | dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); | ||
3585 | if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) | ||
3586 | dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; | ||
3587 | } | ||
3588 | } | ||
3589 | |||
3590 | amdgpu_atombios_scratch_regs_lock(adev, true); | ||
3591 | |||
3592 | if (connector) { | ||
3593 | struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); | ||
3594 | |||
3595 | /* select the clock/data port if it uses a router */ | ||
3596 | if (amdgpu_connector->router.cd_valid) | ||
3597 | amdgpu_i2c_router_select_cd_port(amdgpu_connector); | ||
3598 | |||
3599 | /* turn eDP panel on for mode set */ | ||
3600 | if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) | ||
3601 | amdgpu_atombios_encoder_set_edp_panel_power(connector, | ||
3602 | ATOM_TRANSMITTER_ACTION_POWER_ON); | ||
3603 | } | ||
3604 | |||
3605 | /* this is needed for the pll/ss setup to work correctly in some cases */ | ||
3606 | amdgpu_atombios_encoder_set_crtc_source(encoder); | ||
3607 | /* set up the FMT blocks */ | ||
3608 | dce_v11_0_program_fmt(encoder); | ||
3609 | } | ||
3610 | |||
3611 | static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) | ||
3612 | { | ||
3613 | struct drm_device *dev = encoder->dev; | ||
3614 | struct amdgpu_device *adev = dev->dev_private; | ||
3615 | |||
3616 | /* need to call this here as we need the crtc set up */ | ||
3617 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); | ||
3618 | amdgpu_atombios_scratch_regs_lock(adev, false); | ||
3619 | } | ||
3620 | |||
3621 | static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) | ||
3622 | { | ||
3623 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3624 | struct amdgpu_encoder_atom_dig *dig; | ||
3625 | |||
3626 | amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
3627 | |||
3628 | if (amdgpu_atombios_encoder_is_digital(encoder)) { | ||
3629 | if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) | ||
3630 | dce_v11_0_afmt_enable(encoder, false); | ||
3631 | dig = amdgpu_encoder->enc_priv; | ||
3632 | dig->dig_encoder = -1; | ||
3633 | } | ||
3634 | amdgpu_encoder->active_device = 0; | ||
3635 | } | ||
3636 | |||
3637 | /* these are handled by the primary encoders */ | ||
3638 | static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) | ||
3639 | { | ||
3640 | |||
3641 | } | ||
3642 | |||
3643 | static void dce_v11_0_ext_commit(struct drm_encoder *encoder) | ||
3644 | { | ||
3645 | |||
3646 | } | ||
3647 | |||
3648 | static void | ||
3649 | dce_v11_0_ext_mode_set(struct drm_encoder *encoder, | ||
3650 | struct drm_display_mode *mode, | ||
3651 | struct drm_display_mode *adjusted_mode) | ||
3652 | { | ||
3653 | |||
3654 | } | ||
3655 | |||
3656 | static void dce_v11_0_ext_disable(struct drm_encoder *encoder) | ||
3657 | { | ||
3658 | |||
3659 | } | ||
3660 | |||
3661 | static void | ||
3662 | dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) | ||
3663 | { | ||
3664 | |||
3665 | } | ||
3666 | |||
3667 | static bool dce_v11_0_ext_mode_fixup(struct drm_encoder *encoder, | ||
3668 | const struct drm_display_mode *mode, | ||
3669 | struct drm_display_mode *adjusted_mode) | ||
3670 | { | ||
3671 | return true; | ||
3672 | } | ||
3673 | |||
3674 | static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { | ||
3675 | .dpms = dce_v11_0_ext_dpms, | ||
3676 | .mode_fixup = dce_v11_0_ext_mode_fixup, | ||
3677 | .prepare = dce_v11_0_ext_prepare, | ||
3678 | .mode_set = dce_v11_0_ext_mode_set, | ||
3679 | .commit = dce_v11_0_ext_commit, | ||
3680 | .disable = dce_v11_0_ext_disable, | ||
3681 | /* no detect for TMDS/LVDS yet */ | ||
3682 | }; | ||
3683 | |||
3684 | static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { | ||
3685 | .dpms = amdgpu_atombios_encoder_dpms, | ||
3686 | .mode_fixup = amdgpu_atombios_encoder_mode_fixup, | ||
3687 | .prepare = dce_v11_0_encoder_prepare, | ||
3688 | .mode_set = dce_v11_0_encoder_mode_set, | ||
3689 | .commit = dce_v11_0_encoder_commit, | ||
3690 | .disable = dce_v11_0_encoder_disable, | ||
3691 | .detect = amdgpu_atombios_encoder_dig_detect, | ||
3692 | }; | ||
3693 | |||
3694 | static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { | ||
3695 | .dpms = amdgpu_atombios_encoder_dpms, | ||
3696 | .mode_fixup = amdgpu_atombios_encoder_mode_fixup, | ||
3697 | .prepare = dce_v11_0_encoder_prepare, | ||
3698 | .mode_set = dce_v11_0_encoder_mode_set, | ||
3699 | .commit = dce_v11_0_encoder_commit, | ||
3700 | .detect = amdgpu_atombios_encoder_dac_detect, | ||
3701 | }; | ||
3702 | |||
3703 | static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) | ||
3704 | { | ||
3705 | struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3706 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
3707 | amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); | ||
3708 | kfree(amdgpu_encoder->enc_priv); | ||
3709 | drm_encoder_cleanup(encoder); | ||
3710 | kfree(amdgpu_encoder); | ||
3711 | } | ||
3712 | |||
3713 | static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { | ||
3714 | .destroy = dce_v11_0_encoder_destroy, | ||
3715 | }; | ||
3716 | |||
3717 | static void dce_v11_0_encoder_add(struct amdgpu_device *adev, | ||
3718 | uint32_t encoder_enum, | ||
3719 | uint32_t supported_device, | ||
3720 | u16 caps) | ||
3721 | { | ||
3722 | struct drm_device *dev = adev->ddev; | ||
3723 | struct drm_encoder *encoder; | ||
3724 | struct amdgpu_encoder *amdgpu_encoder; | ||
3725 | |||
3726 | /* see if we already added it */ | ||
3727 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
3728 | amdgpu_encoder = to_amdgpu_encoder(encoder); | ||
3729 | if (amdgpu_encoder->encoder_enum == encoder_enum) { | ||
3730 | amdgpu_encoder->devices |= supported_device; | ||
3731 | return; | ||
3732 | } | ||
3733 | |||
3734 | } | ||
3735 | |||
3736 | /* add a new one */ | ||
3737 | amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); | ||
3738 | if (!amdgpu_encoder) | ||
3739 | return; | ||
3740 | |||
3741 | encoder = &amdgpu_encoder->base; | ||
3742 | switch (adev->mode_info.num_crtc) { | ||
3743 | case 1: | ||
3744 | encoder->possible_crtcs = 0x1; | ||
3745 | break; | ||
3746 | case 2: | ||
3747 | default: | ||
3748 | encoder->possible_crtcs = 0x3; | ||
3749 | break; | ||
3750 | case 4: | ||
3751 | encoder->possible_crtcs = 0xf; | ||
3752 | break; | ||
3753 | case 6: | ||
3754 | encoder->possible_crtcs = 0x3f; | ||
3755 | break; | ||
3756 | } | ||
3757 | |||
3758 | amdgpu_encoder->enc_priv = NULL; | ||
3759 | |||
3760 | amdgpu_encoder->encoder_enum = encoder_enum; | ||
3761 | amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; | ||
3762 | amdgpu_encoder->devices = supported_device; | ||
3763 | amdgpu_encoder->rmx_type = RMX_OFF; | ||
3764 | amdgpu_encoder->underscan_type = UNDERSCAN_OFF; | ||
3765 | amdgpu_encoder->is_ext_encoder = false; | ||
3766 | amdgpu_encoder->caps = caps; | ||
3767 | |||
3768 | switch (amdgpu_encoder->encoder_id) { | ||
3769 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | ||
3770 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: | ||
3771 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3772 | DRM_MODE_ENCODER_DAC); | ||
3773 | drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); | ||
3774 | break; | ||
3775 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | ||
3776 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | ||
3777 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | ||
3778 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | ||
3779 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: | ||
3780 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
3781 | amdgpu_encoder->rmx_type = RMX_FULL; | ||
3782 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3783 | DRM_MODE_ENCODER_LVDS); | ||
3784 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); | ||
3785 | } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { | ||
3786 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3787 | DRM_MODE_ENCODER_DAC); | ||
3788 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); | ||
3789 | } else { | ||
3790 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3791 | DRM_MODE_ENCODER_TMDS); | ||
3792 | amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); | ||
3793 | } | ||
3794 | drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); | ||
3795 | break; | ||
3796 | case ENCODER_OBJECT_ID_SI170B: | ||
3797 | case ENCODER_OBJECT_ID_CH7303: | ||
3798 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: | ||
3799 | case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: | ||
3800 | case ENCODER_OBJECT_ID_TITFP513: | ||
3801 | case ENCODER_OBJECT_ID_VT1623: | ||
3802 | case ENCODER_OBJECT_ID_HDMI_SI1930: | ||
3803 | case ENCODER_OBJECT_ID_TRAVIS: | ||
3804 | case ENCODER_OBJECT_ID_NUTMEG: | ||
3805 | /* these are handled by the primary encoders */ | ||
3806 | amdgpu_encoder->is_ext_encoder = true; | ||
3807 | if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
3808 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3809 | DRM_MODE_ENCODER_LVDS); | ||
3810 | else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) | ||
3811 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3812 | DRM_MODE_ENCODER_DAC); | ||
3813 | else | ||
3814 | drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, | ||
3815 | DRM_MODE_ENCODER_TMDS); | ||
3816 | drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); | ||
3817 | break; | ||
3818 | } | ||
3819 | } | ||
3820 | |||
3821 | static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { | ||
3822 | .set_vga_render_state = &dce_v11_0_set_vga_render_state, | ||
3823 | .bandwidth_update = &dce_v11_0_bandwidth_update, | ||
3824 | .vblank_get_counter = &dce_v11_0_vblank_get_counter, | ||
3825 | .vblank_wait = &dce_v11_0_vblank_wait, | ||
3826 | .is_display_hung = &dce_v11_0_is_display_hung, | ||
3827 | .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, | ||
3828 | .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, | ||
3829 | .hpd_sense = &dce_v11_0_hpd_sense, | ||
3830 | .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, | ||
3831 | .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, | ||
3832 | .page_flip = &dce_v11_0_page_flip, | ||
3833 | .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, | ||
3834 | .add_encoder = &dce_v11_0_encoder_add, | ||
3835 | .add_connector = &amdgpu_connector_add, | ||
3836 | .stop_mc_access = &dce_v11_0_stop_mc_access, | ||
3837 | .resume_mc_access = &dce_v11_0_resume_mc_access, | ||
3838 | }; | ||
3839 | |||
3840 | static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) | ||
3841 | { | ||
3842 | if (adev->mode_info.funcs == NULL) | ||
3843 | adev->mode_info.funcs = &dce_v11_0_display_funcs; | ||
3844 | } | ||
3845 | |||
3846 | static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { | ||
3847 | .set = dce_v11_0_set_crtc_irq_state, | ||
3848 | .process = dce_v11_0_crtc_irq, | ||
3849 | }; | ||
3850 | |||
3851 | static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { | ||
3852 | .set = dce_v11_0_set_pageflip_irq_state, | ||
3853 | .process = dce_v11_0_pageflip_irq, | ||
3854 | }; | ||
3855 | |||
3856 | static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { | ||
3857 | .set = dce_v11_0_set_hpd_irq_state, | ||
3858 | .process = dce_v11_0_hpd_irq, | ||
3859 | }; | ||
3860 | |||
3861 | static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) | ||
3862 | { | ||
3863 | adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST; | ||
3864 | adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; | ||
3865 | |||
3866 | adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST; | ||
3867 | adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; | ||
3868 | |||
3869 | adev->hpd_irq.num_types = AMDGPU_HPD_LAST; | ||
3870 | adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; | ||
3871 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h new file mode 100644 index 000000000000..eeb9a56b514a --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __DCE_V11_0_H__ | ||
25 | #define __DCE_V11_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs dce_v11_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c new file mode 100644 index 000000000000..a8397dd2bce4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -0,0 +1,4286 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "amdgpu.h" | ||
26 | #include "amdgpu_gfx.h" | ||
27 | #include "vi.h" | ||
28 | #include "vid.h" | ||
29 | #include "amdgpu_ucode.h" | ||
30 | #include "clearstate_vi.h" | ||
31 | |||
32 | #include "gmc/gmc_8_2_d.h" | ||
33 | #include "gmc/gmc_8_2_sh_mask.h" | ||
34 | |||
35 | #include "oss/oss_3_0_d.h" | ||
36 | #include "oss/oss_3_0_sh_mask.h" | ||
37 | |||
38 | #include "bif/bif_5_0_d.h" | ||
39 | #include "bif/bif_5_0_sh_mask.h" | ||
40 | |||
41 | #include "gca/gfx_8_0_d.h" | ||
42 | #include "gca/gfx_8_0_enum.h" | ||
43 | #include "gca/gfx_8_0_sh_mask.h" | ||
44 | #include "gca/gfx_8_0_enum.h" | ||
45 | |||
46 | #include "uvd/uvd_5_0_d.h" | ||
47 | #include "uvd/uvd_5_0_sh_mask.h" | ||
48 | |||
49 | #include "dce/dce_10_0_d.h" | ||
50 | #include "dce/dce_10_0_sh_mask.h" | ||
51 | |||
52 | #define GFX8_NUM_GFX_RINGS 1 | ||
53 | #define GFX8_NUM_COMPUTE_RINGS 8 | ||
54 | |||
55 | #define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001 | ||
56 | #define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001 | ||
57 | #define TONGA_GB_ADDR_CONFIG_GOLDEN 0x22011003 | ||
58 | |||
59 | #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT) | ||
60 | #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT) | ||
61 | #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT) | ||
62 | #define MICRO_TILE_MODE_NEW(x) ((x) << GB_TILE_MODE0__MICRO_TILE_MODE_NEW__SHIFT) | ||
63 | #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT) | ||
64 | #define BANK_WIDTH(x) ((x) << GB_MACROTILE_MODE0__BANK_WIDTH__SHIFT) | ||
65 | #define BANK_HEIGHT(x) ((x) << GB_MACROTILE_MODE0__BANK_HEIGHT__SHIFT) | ||
66 | #define MACRO_TILE_ASPECT(x) ((x) << GB_MACROTILE_MODE0__MACRO_TILE_ASPECT__SHIFT) | ||
67 | #define NUM_BANKS(x) ((x) << GB_MACROTILE_MODE0__NUM_BANKS__SHIFT) | ||
68 | |||
69 | MODULE_FIRMWARE("radeon/carrizo_ce.bin"); | ||
70 | MODULE_FIRMWARE("radeon/carrizo_pfp.bin"); | ||
71 | MODULE_FIRMWARE("radeon/carrizo_me.bin"); | ||
72 | MODULE_FIRMWARE("radeon/carrizo_mec.bin"); | ||
73 | MODULE_FIRMWARE("radeon/carrizo_mec2.bin"); | ||
74 | MODULE_FIRMWARE("radeon/carrizo_rlc.bin"); | ||
75 | |||
76 | MODULE_FIRMWARE("radeon/tonga_ce.bin"); | ||
77 | MODULE_FIRMWARE("radeon/tonga_pfp.bin"); | ||
78 | MODULE_FIRMWARE("radeon/tonga_me.bin"); | ||
79 | MODULE_FIRMWARE("radeon/tonga_mec.bin"); | ||
80 | MODULE_FIRMWARE("radeon/tonga_mec2.bin"); | ||
81 | MODULE_FIRMWARE("radeon/tonga_rlc.bin"); | ||
82 | |||
83 | MODULE_FIRMWARE("radeon/topaz_ce.bin"); | ||
84 | MODULE_FIRMWARE("radeon/topaz_pfp.bin"); | ||
85 | MODULE_FIRMWARE("radeon/topaz_me.bin"); | ||
86 | MODULE_FIRMWARE("radeon/topaz_mec.bin"); | ||
87 | MODULE_FIRMWARE("radeon/topaz_mec2.bin"); | ||
88 | MODULE_FIRMWARE("radeon/topaz_rlc.bin"); | ||
89 | |||
90 | static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = | ||
91 | { | ||
92 | {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, | ||
93 | {mmGDS_VMID1_BASE, mmGDS_VMID1_SIZE, mmGDS_GWS_VMID1, mmGDS_OA_VMID1}, | ||
94 | {mmGDS_VMID2_BASE, mmGDS_VMID2_SIZE, mmGDS_GWS_VMID2, mmGDS_OA_VMID2}, | ||
95 | {mmGDS_VMID3_BASE, mmGDS_VMID3_SIZE, mmGDS_GWS_VMID3, mmGDS_OA_VMID3}, | ||
96 | {mmGDS_VMID4_BASE, mmGDS_VMID4_SIZE, mmGDS_GWS_VMID4, mmGDS_OA_VMID4}, | ||
97 | {mmGDS_VMID5_BASE, mmGDS_VMID5_SIZE, mmGDS_GWS_VMID5, mmGDS_OA_VMID5}, | ||
98 | {mmGDS_VMID6_BASE, mmGDS_VMID6_SIZE, mmGDS_GWS_VMID6, mmGDS_OA_VMID6}, | ||
99 | {mmGDS_VMID7_BASE, mmGDS_VMID7_SIZE, mmGDS_GWS_VMID7, mmGDS_OA_VMID7}, | ||
100 | {mmGDS_VMID8_BASE, mmGDS_VMID8_SIZE, mmGDS_GWS_VMID8, mmGDS_OA_VMID8}, | ||
101 | {mmGDS_VMID9_BASE, mmGDS_VMID9_SIZE, mmGDS_GWS_VMID9, mmGDS_OA_VMID9}, | ||
102 | {mmGDS_VMID10_BASE, mmGDS_VMID10_SIZE, mmGDS_GWS_VMID10, mmGDS_OA_VMID10}, | ||
103 | {mmGDS_VMID11_BASE, mmGDS_VMID11_SIZE, mmGDS_GWS_VMID11, mmGDS_OA_VMID11}, | ||
104 | {mmGDS_VMID12_BASE, mmGDS_VMID12_SIZE, mmGDS_GWS_VMID12, mmGDS_OA_VMID12}, | ||
105 | {mmGDS_VMID13_BASE, mmGDS_VMID13_SIZE, mmGDS_GWS_VMID13, mmGDS_OA_VMID13}, | ||
106 | {mmGDS_VMID14_BASE, mmGDS_VMID14_SIZE, mmGDS_GWS_VMID14, mmGDS_OA_VMID14}, | ||
107 | {mmGDS_VMID15_BASE, mmGDS_VMID15_SIZE, mmGDS_GWS_VMID15, mmGDS_OA_VMID15} | ||
108 | }; | ||
109 | |||
110 | static const u32 golden_settings_tonga_a11[] = | ||
111 | { | ||
112 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, | ||
113 | mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, | ||
114 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | ||
115 | mmGB_GPU_ID, 0x0000000f, 0x00000000, | ||
116 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | ||
117 | mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc, | ||
118 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, | ||
119 | mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, | ||
120 | mmTCC_CTRL, 0x00100000, 0xf31fff7f, | ||
121 | mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb, | ||
122 | mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b, | ||
123 | mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876, | ||
124 | }; | ||
125 | |||
126 | static const u32 tonga_golden_common_all[] = | ||
127 | { | ||
128 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
129 | mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012, | ||
130 | mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002A, | ||
131 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, | ||
132 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, | ||
133 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, | ||
134 | mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, | ||
135 | mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF | ||
136 | }; | ||
137 | |||
138 | static const u32 tonga_mgcg_cgcg_init[] = | ||
139 | { | ||
140 | mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, | ||
141 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
142 | mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
143 | mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
144 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, | ||
145 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
146 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100, | ||
147 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, | ||
148 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, | ||
149 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, | ||
150 | mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, | ||
151 | mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
152 | mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
153 | mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
154 | mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
155 | mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, | ||
156 | mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, | ||
157 | mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, | ||
158 | mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, | ||
159 | mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, | ||
160 | mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, | ||
161 | mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, | ||
162 | mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
163 | mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, | ||
164 | mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, | ||
165 | mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, | ||
166 | mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
167 | mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
168 | mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
169 | mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
170 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
171 | mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
172 | mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
173 | mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, | ||
174 | mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
175 | mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
176 | mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
177 | mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
178 | mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
179 | mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
180 | mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
181 | mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
182 | mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
183 | mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
184 | mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
185 | mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
186 | mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
187 | mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
188 | mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
189 | mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
190 | mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
191 | mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
192 | mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
193 | mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, | ||
194 | mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
195 | mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
196 | mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
197 | mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
198 | mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
199 | mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
200 | mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
201 | mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
202 | mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
203 | mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
204 | mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
205 | mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
206 | mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
207 | mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
208 | mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
209 | mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
210 | mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
211 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, | ||
212 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, | ||
213 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, | ||
214 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, | ||
215 | }; | ||
216 | |||
217 | static const u32 golden_settings_iceland_a11[] = | ||
218 | { | ||
219 | mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, | ||
220 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | ||
221 | mmDB_DEBUG3, 0xc0000000, 0xc0000000, | ||
222 | mmGB_GPU_ID, 0x0000000f, 0x00000000, | ||
223 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | ||
224 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, | ||
225 | mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x00000002, | ||
226 | mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000, | ||
227 | mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, | ||
228 | mmTCC_CTRL, 0x00100000, 0xf31fff7f, | ||
229 | mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f1, | ||
230 | mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, | ||
231 | mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00000010, | ||
232 | }; | ||
233 | |||
234 | static const u32 iceland_golden_common_all[] = | ||
235 | { | ||
236 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
237 | mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, | ||
238 | mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, | ||
239 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, | ||
240 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, | ||
241 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, | ||
242 | mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, | ||
243 | mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF | ||
244 | }; | ||
245 | |||
246 | static const u32 iceland_mgcg_cgcg_init[] = | ||
247 | { | ||
248 | mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, | ||
249 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
250 | mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
251 | mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
252 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0xc0000100, | ||
253 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0xc0000100, | ||
254 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0xc0000100, | ||
255 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, | ||
256 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, | ||
257 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, | ||
258 | mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, | ||
259 | mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
260 | mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
261 | mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
262 | mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
263 | mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, | ||
264 | mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, | ||
265 | mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, | ||
266 | mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, | ||
267 | mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, | ||
268 | mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, | ||
269 | mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, | ||
270 | mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0xff000100, | ||
271 | mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, | ||
272 | mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, | ||
273 | mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, | ||
274 | mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
275 | mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
276 | mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
277 | mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
278 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
279 | mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
280 | mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
281 | mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, | ||
282 | mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
283 | mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
284 | mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
285 | mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
286 | mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
287 | mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
288 | mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
289 | mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
290 | mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
291 | mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
292 | mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
293 | mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
294 | mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
295 | mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
296 | mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
297 | mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
298 | mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
299 | mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
300 | mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
301 | mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x0f840f87, | ||
302 | mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
303 | mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
304 | mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
305 | mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
306 | mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
307 | mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
308 | mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
309 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, | ||
310 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, | ||
311 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c, | ||
312 | }; | ||
313 | |||
314 | static const u32 cz_golden_settings_a11[] = | ||
315 | { | ||
316 | mmCB_HW_CONTROL_3, 0x00000040, 0x00000040, | ||
317 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | ||
318 | mmGB_GPU_ID, 0x0000000f, 0x00000000, | ||
319 | mmPA_SC_ENHANCE, 0xffffffff, 0x00000001, | ||
320 | mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, | ||
321 | mmTA_CNTL_AUX, 0x000f000f, 0x00010000, | ||
322 | mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f3, | ||
323 | mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00001302 | ||
324 | }; | ||
325 | |||
326 | static const u32 cz_golden_common_all[] = | ||
327 | { | ||
328 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
329 | mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000002, | ||
330 | mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000, | ||
331 | mmGB_ADDR_CONFIG, 0xffffffff, 0x22010001, | ||
332 | mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, | ||
333 | mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, | ||
334 | mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF, | ||
335 | mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF | ||
336 | }; | ||
337 | |||
338 | static const u32 cz_mgcg_cgcg_init[] = | ||
339 | { | ||
340 | mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff, | ||
341 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
342 | mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
343 | mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
344 | mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100, | ||
345 | mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
346 | mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x00000100, | ||
347 | mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100, | ||
348 | mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100, | ||
349 | mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100, | ||
350 | mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100, | ||
351 | mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
352 | mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
353 | mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100, | ||
354 | mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
355 | mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100, | ||
356 | mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100, | ||
357 | mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100, | ||
358 | mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100, | ||
359 | mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100, | ||
360 | mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100, | ||
361 | mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100, | ||
362 | mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100, | ||
363 | mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100, | ||
364 | mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100, | ||
365 | mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100, | ||
366 | mmTA_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
367 | mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
368 | mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100, | ||
369 | mmTD_CGTT_CTRL, 0xffffffff, 0x00000100, | ||
370 | mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, | ||
371 | mmCGTS_CU0_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
372 | mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
373 | mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, | ||
374 | mmCGTS_CU0_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
375 | mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
376 | mmCGTS_CU1_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
377 | mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
378 | mmCGTS_CU1_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
379 | mmCGTS_CU1_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
380 | mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
381 | mmCGTS_CU2_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
382 | mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
383 | mmCGTS_CU2_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
384 | mmCGTS_CU2_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
385 | mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
386 | mmCGTS_CU3_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
387 | mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
388 | mmCGTS_CU3_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
389 | mmCGTS_CU3_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
390 | mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
391 | mmCGTS_CU4_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
392 | mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
393 | mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007, | ||
394 | mmCGTS_CU4_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
395 | mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
396 | mmCGTS_CU5_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
397 | mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
398 | mmCGTS_CU5_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
399 | mmCGTS_CU5_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
400 | mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
401 | mmCGTS_CU6_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
402 | mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
403 | mmCGTS_CU6_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
404 | mmCGTS_CU6_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
405 | mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
406 | mmCGTS_CU7_SP0_CTRL_REG, 0xffffffff, 0x00010000, | ||
407 | mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002, | ||
408 | mmCGTS_CU7_TA_CTRL_REG, 0xffffffff, 0x00040007, | ||
409 | mmCGTS_CU7_SP1_CTRL_REG, 0xffffffff, 0x00060005, | ||
410 | mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008, | ||
411 | mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200, | ||
412 | mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100, | ||
413 | mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f, | ||
414 | mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, | ||
415 | }; | ||
416 | |||
417 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); | ||
418 | static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); | ||
419 | static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); | ||
420 | |||
421 | static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) | ||
422 | { | ||
423 | switch (adev->asic_type) { | ||
424 | case CHIP_TOPAZ: | ||
425 | amdgpu_program_register_sequence(adev, | ||
426 | iceland_mgcg_cgcg_init, | ||
427 | (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); | ||
428 | amdgpu_program_register_sequence(adev, | ||
429 | golden_settings_iceland_a11, | ||
430 | (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); | ||
431 | amdgpu_program_register_sequence(adev, | ||
432 | iceland_golden_common_all, | ||
433 | (const u32)ARRAY_SIZE(iceland_golden_common_all)); | ||
434 | break; | ||
435 | case CHIP_TONGA: | ||
436 | amdgpu_program_register_sequence(adev, | ||
437 | tonga_mgcg_cgcg_init, | ||
438 | (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); | ||
439 | amdgpu_program_register_sequence(adev, | ||
440 | golden_settings_tonga_a11, | ||
441 | (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); | ||
442 | amdgpu_program_register_sequence(adev, | ||
443 | tonga_golden_common_all, | ||
444 | (const u32)ARRAY_SIZE(tonga_golden_common_all)); | ||
445 | break; | ||
446 | case CHIP_CARRIZO: | ||
447 | amdgpu_program_register_sequence(adev, | ||
448 | cz_mgcg_cgcg_init, | ||
449 | (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); | ||
450 | amdgpu_program_register_sequence(adev, | ||
451 | cz_golden_settings_a11, | ||
452 | (const u32)ARRAY_SIZE(cz_golden_settings_a11)); | ||
453 | amdgpu_program_register_sequence(adev, | ||
454 | cz_golden_common_all, | ||
455 | (const u32)ARRAY_SIZE(cz_golden_common_all)); | ||
456 | break; | ||
457 | default: | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | adev->gfx.scratch.num_reg = 7; | ||
467 | adev->gfx.scratch.reg_base = mmSCRATCH_REG0; | ||
468 | for (i = 0; i < adev->gfx.scratch.num_reg; i++) { | ||
469 | adev->gfx.scratch.free[i] = true; | ||
470 | adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; | ||
471 | } | ||
472 | } | ||
473 | |||
474 | static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) | ||
475 | { | ||
476 | struct amdgpu_device *adev = ring->adev; | ||
477 | uint32_t scratch; | ||
478 | uint32_t tmp = 0; | ||
479 | unsigned i; | ||
480 | int r; | ||
481 | |||
482 | r = amdgpu_gfx_scratch_get(adev, &scratch); | ||
483 | if (r) { | ||
484 | DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r); | ||
485 | return r; | ||
486 | } | ||
487 | WREG32(scratch, 0xCAFEDEAD); | ||
488 | r = amdgpu_ring_lock(ring, 3); | ||
489 | if (r) { | ||
490 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
491 | ring->idx, r); | ||
492 | amdgpu_gfx_scratch_free(adev, scratch); | ||
493 | return r; | ||
494 | } | ||
495 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); | ||
496 | amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START)); | ||
497 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
498 | amdgpu_ring_unlock_commit(ring); | ||
499 | |||
500 | for (i = 0; i < adev->usec_timeout; i++) { | ||
501 | tmp = RREG32(scratch); | ||
502 | if (tmp == 0xDEADBEEF) | ||
503 | break; | ||
504 | DRM_UDELAY(1); | ||
505 | } | ||
506 | if (i < adev->usec_timeout) { | ||
507 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
508 | ring->idx, i); | ||
509 | } else { | ||
510 | DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", | ||
511 | ring->idx, scratch, tmp); | ||
512 | r = -EINVAL; | ||
513 | } | ||
514 | amdgpu_gfx_scratch_free(adev, scratch); | ||
515 | return r; | ||
516 | } | ||
517 | |||
518 | static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) | ||
519 | { | ||
520 | struct amdgpu_device *adev = ring->adev; | ||
521 | struct amdgpu_ib ib; | ||
522 | uint32_t scratch; | ||
523 | uint32_t tmp = 0; | ||
524 | unsigned i; | ||
525 | int r; | ||
526 | |||
527 | r = amdgpu_gfx_scratch_get(adev, &scratch); | ||
528 | if (r) { | ||
529 | DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); | ||
530 | return r; | ||
531 | } | ||
532 | WREG32(scratch, 0xCAFEDEAD); | ||
533 | r = amdgpu_ib_get(ring, NULL, 256, &ib); | ||
534 | if (r) { | ||
535 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
536 | amdgpu_gfx_scratch_free(adev, scratch); | ||
537 | return r; | ||
538 | } | ||
539 | ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); | ||
540 | ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START)); | ||
541 | ib.ptr[2] = 0xDEADBEEF; | ||
542 | ib.length_dw = 3; | ||
543 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
544 | if (r) { | ||
545 | amdgpu_gfx_scratch_free(adev, scratch); | ||
546 | amdgpu_ib_free(adev, &ib); | ||
547 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
548 | return r; | ||
549 | } | ||
550 | r = amdgpu_fence_wait(ib.fence, false); | ||
551 | if (r) { | ||
552 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
553 | amdgpu_gfx_scratch_free(adev, scratch); | ||
554 | amdgpu_ib_free(adev, &ib); | ||
555 | return r; | ||
556 | } | ||
557 | for (i = 0; i < adev->usec_timeout; i++) { | ||
558 | tmp = RREG32(scratch); | ||
559 | if (tmp == 0xDEADBEEF) | ||
560 | break; | ||
561 | DRM_UDELAY(1); | ||
562 | } | ||
563 | if (i < adev->usec_timeout) { | ||
564 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", | ||
565 | ib.fence->ring->idx, i); | ||
566 | } else { | ||
567 | DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", | ||
568 | scratch, tmp); | ||
569 | r = -EINVAL; | ||
570 | } | ||
571 | amdgpu_gfx_scratch_free(adev, scratch); | ||
572 | amdgpu_ib_free(adev, &ib); | ||
573 | return r; | ||
574 | } | ||
575 | |||
576 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | ||
577 | { | ||
578 | const char *chip_name; | ||
579 | char fw_name[30]; | ||
580 | int err; | ||
581 | struct amdgpu_firmware_info *info = NULL; | ||
582 | const struct common_firmware_header *header = NULL; | ||
583 | |||
584 | DRM_DEBUG("\n"); | ||
585 | |||
586 | switch (adev->asic_type) { | ||
587 | case CHIP_TOPAZ: | ||
588 | chip_name = "topaz"; | ||
589 | break; | ||
590 | case CHIP_TONGA: | ||
591 | chip_name = "tonga"; | ||
592 | break; | ||
593 | case CHIP_CARRIZO: | ||
594 | chip_name = "carrizo"; | ||
595 | break; | ||
596 | default: | ||
597 | BUG(); | ||
598 | } | ||
599 | |||
600 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | ||
601 | err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev); | ||
602 | if (err) | ||
603 | goto out; | ||
604 | err = amdgpu_ucode_validate(adev->gfx.pfp_fw); | ||
605 | if (err) | ||
606 | goto out; | ||
607 | |||
608 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name); | ||
609 | err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); | ||
610 | if (err) | ||
611 | goto out; | ||
612 | err = amdgpu_ucode_validate(adev->gfx.me_fw); | ||
613 | if (err) | ||
614 | goto out; | ||
615 | |||
616 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name); | ||
617 | err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); | ||
618 | if (err) | ||
619 | goto out; | ||
620 | err = amdgpu_ucode_validate(adev->gfx.ce_fw); | ||
621 | if (err) | ||
622 | goto out; | ||
623 | |||
624 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name); | ||
625 | err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); | ||
626 | if (err) | ||
627 | goto out; | ||
628 | err = amdgpu_ucode_validate(adev->gfx.rlc_fw); | ||
629 | |||
630 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec.bin", chip_name); | ||
631 | err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); | ||
632 | if (err) | ||
633 | goto out; | ||
634 | err = amdgpu_ucode_validate(adev->gfx.mec_fw); | ||
635 | if (err) | ||
636 | goto out; | ||
637 | |||
638 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mec2.bin", chip_name); | ||
639 | err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); | ||
640 | if (!err) { | ||
641 | err = amdgpu_ucode_validate(adev->gfx.mec2_fw); | ||
642 | if (err) | ||
643 | goto out; | ||
644 | } else { | ||
645 | err = 0; | ||
646 | adev->gfx.mec2_fw = NULL; | ||
647 | } | ||
648 | |||
649 | if (adev->firmware.smu_load) { | ||
650 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; | ||
651 | info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; | ||
652 | info->fw = adev->gfx.pfp_fw; | ||
653 | header = (const struct common_firmware_header *)info->fw->data; | ||
654 | adev->firmware.fw_size += | ||
655 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
656 | |||
657 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; | ||
658 | info->ucode_id = AMDGPU_UCODE_ID_CP_ME; | ||
659 | info->fw = adev->gfx.me_fw; | ||
660 | header = (const struct common_firmware_header *)info->fw->data; | ||
661 | adev->firmware.fw_size += | ||
662 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
663 | |||
664 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; | ||
665 | info->ucode_id = AMDGPU_UCODE_ID_CP_CE; | ||
666 | info->fw = adev->gfx.ce_fw; | ||
667 | header = (const struct common_firmware_header *)info->fw->data; | ||
668 | adev->firmware.fw_size += | ||
669 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
670 | |||
671 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; | ||
672 | info->ucode_id = AMDGPU_UCODE_ID_RLC_G; | ||
673 | info->fw = adev->gfx.rlc_fw; | ||
674 | header = (const struct common_firmware_header *)info->fw->data; | ||
675 | adev->firmware.fw_size += | ||
676 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
677 | |||
678 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; | ||
679 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; | ||
680 | info->fw = adev->gfx.mec_fw; | ||
681 | header = (const struct common_firmware_header *)info->fw->data; | ||
682 | adev->firmware.fw_size += | ||
683 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
684 | |||
685 | if (adev->gfx.mec2_fw) { | ||
686 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; | ||
687 | info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; | ||
688 | info->fw = adev->gfx.mec2_fw; | ||
689 | header = (const struct common_firmware_header *)info->fw->data; | ||
690 | adev->firmware.fw_size += | ||
691 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
692 | } | ||
693 | |||
694 | } | ||
695 | |||
696 | out: | ||
697 | if (err) { | ||
698 | dev_err(adev->dev, | ||
699 | "gfx8: Failed to load firmware \"%s\"\n", | ||
700 | fw_name); | ||
701 | release_firmware(adev->gfx.pfp_fw); | ||
702 | adev->gfx.pfp_fw = NULL; | ||
703 | release_firmware(adev->gfx.me_fw); | ||
704 | adev->gfx.me_fw = NULL; | ||
705 | release_firmware(adev->gfx.ce_fw); | ||
706 | adev->gfx.ce_fw = NULL; | ||
707 | release_firmware(adev->gfx.rlc_fw); | ||
708 | adev->gfx.rlc_fw = NULL; | ||
709 | release_firmware(adev->gfx.mec_fw); | ||
710 | adev->gfx.mec_fw = NULL; | ||
711 | release_firmware(adev->gfx.mec2_fw); | ||
712 | adev->gfx.mec2_fw = NULL; | ||
713 | } | ||
714 | return err; | ||
715 | } | ||
716 | |||
717 | static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) | ||
718 | { | ||
719 | int r; | ||
720 | |||
721 | if (adev->gfx.mec.hpd_eop_obj) { | ||
722 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); | ||
723 | if (unlikely(r != 0)) | ||
724 | dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); | ||
725 | amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); | ||
726 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | ||
727 | |||
728 | amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); | ||
729 | adev->gfx.mec.hpd_eop_obj = NULL; | ||
730 | } | ||
731 | } | ||
732 | |||
733 | #define MEC_HPD_SIZE 2048 | ||
734 | |||
735 | static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | ||
736 | { | ||
737 | int r; | ||
738 | u32 *hpd; | ||
739 | |||
740 | /* | ||
741 | * we assign only 1 pipe because all other pipes will | ||
742 | * be handled by KFD | ||
743 | */ | ||
744 | adev->gfx.mec.num_mec = 1; | ||
745 | adev->gfx.mec.num_pipe = 1; | ||
746 | adev->gfx.mec.num_queue = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe * 8; | ||
747 | |||
748 | if (adev->gfx.mec.hpd_eop_obj == NULL) { | ||
749 | r = amdgpu_bo_create(adev, | ||
750 | adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2, | ||
751 | PAGE_SIZE, true, | ||
752 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | ||
753 | &adev->gfx.mec.hpd_eop_obj); | ||
754 | if (r) { | ||
755 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | ||
756 | return r; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); | ||
761 | if (unlikely(r != 0)) { | ||
762 | gfx_v8_0_mec_fini(adev); | ||
763 | return r; | ||
764 | } | ||
765 | r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
766 | &adev->gfx.mec.hpd_eop_gpu_addr); | ||
767 | if (r) { | ||
768 | dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); | ||
769 | gfx_v8_0_mec_fini(adev); | ||
770 | return r; | ||
771 | } | ||
772 | r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); | ||
773 | if (r) { | ||
774 | dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); | ||
775 | gfx_v8_0_mec_fini(adev); | ||
776 | return r; | ||
777 | } | ||
778 | |||
779 | memset(hpd, 0, adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2); | ||
780 | |||
781 | amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); | ||
782 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | ||
783 | |||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static int gfx_v8_0_sw_init(struct amdgpu_device *adev) | ||
788 | { | ||
789 | int i, r; | ||
790 | struct amdgpu_ring *ring; | ||
791 | |||
792 | /* EOP Event */ | ||
793 | r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); | ||
794 | if (r) | ||
795 | return r; | ||
796 | |||
797 | /* Privileged reg */ | ||
798 | r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq); | ||
799 | if (r) | ||
800 | return r; | ||
801 | |||
802 | /* Privileged inst */ | ||
803 | r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq); | ||
804 | if (r) | ||
805 | return r; | ||
806 | |||
807 | adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; | ||
808 | |||
809 | gfx_v8_0_scratch_init(adev); | ||
810 | |||
811 | r = gfx_v8_0_init_microcode(adev); | ||
812 | if (r) { | ||
813 | DRM_ERROR("Failed to load gfx firmware!\n"); | ||
814 | return r; | ||
815 | } | ||
816 | |||
817 | r = gfx_v8_0_mec_init(adev); | ||
818 | if (r) { | ||
819 | DRM_ERROR("Failed to init MEC BOs!\n"); | ||
820 | return r; | ||
821 | } | ||
822 | |||
823 | r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs); | ||
824 | if (r) { | ||
825 | DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r); | ||
826 | return r; | ||
827 | } | ||
828 | |||
829 | /* set up the gfx ring */ | ||
830 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { | ||
831 | ring = &adev->gfx.gfx_ring[i]; | ||
832 | ring->ring_obj = NULL; | ||
833 | sprintf(ring->name, "gfx"); | ||
834 | /* no gfx doorbells on iceland */ | ||
835 | if (adev->asic_type != CHIP_TOPAZ) { | ||
836 | ring->use_doorbell = true; | ||
837 | ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0; | ||
838 | } | ||
839 | |||
840 | r = amdgpu_ring_init(adev, ring, 1024 * 1024, | ||
841 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | ||
842 | &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP, | ||
843 | AMDGPU_RING_TYPE_GFX); | ||
844 | if (r) | ||
845 | return r; | ||
846 | } | ||
847 | |||
848 | /* set up the compute queues */ | ||
849 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
850 | unsigned irq_type; | ||
851 | |||
852 | /* max 32 queues per MEC */ | ||
853 | if ((i >= 32) || (i >= AMDGPU_MAX_COMPUTE_RINGS)) { | ||
854 | DRM_ERROR("Too many (%d) compute rings!\n", i); | ||
855 | break; | ||
856 | } | ||
857 | ring = &adev->gfx.compute_ring[i]; | ||
858 | ring->ring_obj = NULL; | ||
859 | ring->use_doorbell = true; | ||
860 | ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + i; | ||
861 | ring->me = 1; /* first MEC */ | ||
862 | ring->pipe = i / 8; | ||
863 | ring->queue = i % 8; | ||
864 | sprintf(ring->name, "comp %d.%d.%d", ring->me, ring->pipe, ring->queue); | ||
865 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe; | ||
866 | /* type-2 packets are deprecated on MEC, use type-3 instead */ | ||
867 | r = amdgpu_ring_init(adev, ring, 1024 * 1024, | ||
868 | PACKET3(PACKET3_NOP, 0x3FFF), 0xf, | ||
869 | &adev->gfx.eop_irq, irq_type, | ||
870 | AMDGPU_RING_TYPE_COMPUTE); | ||
871 | if (r) | ||
872 | return r; | ||
873 | } | ||
874 | |||
875 | /* reserve GDS, GWS and OA resource for gfx */ | ||
876 | r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size, | ||
877 | PAGE_SIZE, true, | ||
878 | AMDGPU_GEM_DOMAIN_GDS, 0, | ||
879 | NULL, &adev->gds.gds_gfx_bo); | ||
880 | if (r) | ||
881 | return r; | ||
882 | |||
883 | r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size, | ||
884 | PAGE_SIZE, true, | ||
885 | AMDGPU_GEM_DOMAIN_GWS, 0, | ||
886 | NULL, &adev->gds.gws_gfx_bo); | ||
887 | if (r) | ||
888 | return r; | ||
889 | |||
890 | r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size, | ||
891 | PAGE_SIZE, true, | ||
892 | AMDGPU_GEM_DOMAIN_OA, 0, | ||
893 | NULL, &adev->gds.oa_gfx_bo); | ||
894 | if (r) | ||
895 | return r; | ||
896 | |||
897 | return 0; | ||
898 | } | ||
899 | |||
900 | static int gfx_v8_0_sw_fini(struct amdgpu_device *adev) | ||
901 | { | ||
902 | int i; | ||
903 | |||
904 | amdgpu_bo_unref(&adev->gds.oa_gfx_bo); | ||
905 | amdgpu_bo_unref(&adev->gds.gws_gfx_bo); | ||
906 | amdgpu_bo_unref(&adev->gds.gds_gfx_bo); | ||
907 | |||
908 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
909 | amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); | ||
910 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
911 | amdgpu_ring_fini(&adev->gfx.compute_ring[i]); | ||
912 | |||
913 | amdgpu_wb_free(adev, adev->gfx.ce_sync_offs); | ||
914 | |||
915 | gfx_v8_0_mec_fini(adev); | ||
916 | |||
917 | return 0; | ||
918 | } | ||
919 | |||
920 | static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) | ||
921 | { | ||
922 | const u32 num_tile_mode_states = 32; | ||
923 | const u32 num_secondary_tile_mode_states = 16; | ||
924 | u32 reg_offset, gb_tile_moden, split_equal_to_row_size; | ||
925 | |||
926 | switch (adev->gfx.config.mem_row_size_in_kb) { | ||
927 | case 1: | ||
928 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB; | ||
929 | break; | ||
930 | case 2: | ||
931 | default: | ||
932 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB; | ||
933 | break; | ||
934 | case 4: | ||
935 | split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB; | ||
936 | break; | ||
937 | } | ||
938 | |||
939 | switch (adev->asic_type) { | ||
940 | case CHIP_TOPAZ: | ||
941 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | ||
942 | switch (reg_offset) { | ||
943 | case 0: | ||
944 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
945 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
946 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
947 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
948 | break; | ||
949 | case 1: | ||
950 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
951 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
952 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
953 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
954 | break; | ||
955 | case 2: | ||
956 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
957 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
958 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
959 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
960 | break; | ||
961 | case 3: | ||
962 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
963 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
964 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
965 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
966 | break; | ||
967 | case 4: | ||
968 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
969 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
970 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
971 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
972 | break; | ||
973 | case 5: | ||
974 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
975 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
976 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
977 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
978 | break; | ||
979 | case 6: | ||
980 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
981 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
982 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
983 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
984 | break; | ||
985 | case 8: | ||
986 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
987 | PIPE_CONFIG(ADDR_SURF_P2)); | ||
988 | break; | ||
989 | case 9: | ||
990 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
991 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
992 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
993 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
994 | break; | ||
995 | case 10: | ||
996 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
997 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
998 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
999 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1000 | break; | ||
1001 | case 11: | ||
1002 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1003 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1004 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1005 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1006 | break; | ||
1007 | case 13: | ||
1008 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1009 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1010 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1011 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1012 | break; | ||
1013 | case 14: | ||
1014 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1015 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1016 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1017 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1018 | break; | ||
1019 | case 15: | ||
1020 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | | ||
1021 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1022 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1023 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1024 | break; | ||
1025 | case 16: | ||
1026 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1027 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1028 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1029 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1030 | break; | ||
1031 | case 18: | ||
1032 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1033 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1034 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1035 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1036 | break; | ||
1037 | case 19: | ||
1038 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1039 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1040 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1041 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1042 | break; | ||
1043 | case 20: | ||
1044 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1045 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1046 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1047 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1048 | break; | ||
1049 | case 21: | ||
1050 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | | ||
1051 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1052 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1053 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1054 | break; | ||
1055 | case 22: | ||
1056 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
1057 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1058 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1059 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1060 | break; | ||
1061 | case 24: | ||
1062 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1063 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1064 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1065 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1066 | break; | ||
1067 | case 25: | ||
1068 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | | ||
1069 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1070 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1071 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1072 | break; | ||
1073 | case 26: | ||
1074 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | | ||
1075 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1076 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1077 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1078 | break; | ||
1079 | case 27: | ||
1080 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1081 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1082 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1083 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1084 | break; | ||
1085 | case 28: | ||
1086 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1087 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1088 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1089 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1090 | break; | ||
1091 | case 29: | ||
1092 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1093 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1094 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1095 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1096 | break; | ||
1097 | case 7: | ||
1098 | case 12: | ||
1099 | case 17: | ||
1100 | case 23: | ||
1101 | /* unused idx */ | ||
1102 | continue; | ||
1103 | default: | ||
1104 | gb_tile_moden = 0; | ||
1105 | break; | ||
1106 | }; | ||
1107 | adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; | ||
1108 | WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); | ||
1109 | } | ||
1110 | for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { | ||
1111 | switch (reg_offset) { | ||
1112 | case 0: | ||
1113 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1114 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1115 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1116 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1117 | break; | ||
1118 | case 1: | ||
1119 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1120 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1121 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1122 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1123 | break; | ||
1124 | case 2: | ||
1125 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
1126 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1127 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1128 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1129 | break; | ||
1130 | case 3: | ||
1131 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1132 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1133 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1134 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1135 | break; | ||
1136 | case 4: | ||
1137 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1138 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1139 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1140 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1141 | break; | ||
1142 | case 5: | ||
1143 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1144 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1145 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1146 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1147 | break; | ||
1148 | case 6: | ||
1149 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1150 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1151 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1152 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1153 | break; | ||
1154 | case 8: | ||
1155 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1156 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | | ||
1157 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1158 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1159 | break; | ||
1160 | case 9: | ||
1161 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1162 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1163 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1164 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1165 | break; | ||
1166 | case 10: | ||
1167 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
1168 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1169 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1170 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1171 | break; | ||
1172 | case 11: | ||
1173 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
1174 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1175 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1176 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1177 | break; | ||
1178 | case 12: | ||
1179 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1180 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1181 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1182 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1183 | break; | ||
1184 | case 13: | ||
1185 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1186 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1187 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1188 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1189 | break; | ||
1190 | case 14: | ||
1191 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1192 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1193 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1194 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1195 | break; | ||
1196 | case 7: | ||
1197 | /* unused idx */ | ||
1198 | continue; | ||
1199 | default: | ||
1200 | gb_tile_moden = 0; | ||
1201 | break; | ||
1202 | }; | ||
1203 | adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; | ||
1204 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | ||
1205 | } | ||
1206 | case CHIP_TONGA: | ||
1207 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | ||
1208 | switch (reg_offset) { | ||
1209 | case 0: | ||
1210 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1211 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1212 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
1213 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1214 | break; | ||
1215 | case 1: | ||
1216 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1217 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1218 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
1219 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1220 | break; | ||
1221 | case 2: | ||
1222 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1223 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1224 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
1225 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1226 | break; | ||
1227 | case 3: | ||
1228 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1229 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1230 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
1231 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1232 | break; | ||
1233 | case 4: | ||
1234 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1235 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1236 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1237 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1238 | break; | ||
1239 | case 5: | ||
1240 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1241 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1242 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1243 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1244 | break; | ||
1245 | case 6: | ||
1246 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1247 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1248 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1249 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1250 | break; | ||
1251 | case 7: | ||
1252 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1253 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
1254 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1255 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1256 | break; | ||
1257 | case 8: | ||
1258 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
1259 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); | ||
1260 | break; | ||
1261 | case 9: | ||
1262 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1263 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1264 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1265 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1266 | break; | ||
1267 | case 10: | ||
1268 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1269 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1270 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1271 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1272 | break; | ||
1273 | case 11: | ||
1274 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1275 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1276 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1277 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1278 | break; | ||
1279 | case 12: | ||
1280 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1281 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
1282 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1283 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1284 | break; | ||
1285 | case 13: | ||
1286 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1287 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1288 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1289 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1290 | break; | ||
1291 | case 14: | ||
1292 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1293 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1294 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1295 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1296 | break; | ||
1297 | case 15: | ||
1298 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | | ||
1299 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1300 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1301 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1302 | break; | ||
1303 | case 16: | ||
1304 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1305 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1306 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1307 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1308 | break; | ||
1309 | case 17: | ||
1310 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1311 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
1312 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1313 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1314 | break; | ||
1315 | case 18: | ||
1316 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1317 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1318 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1319 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1320 | break; | ||
1321 | case 19: | ||
1322 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1323 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1324 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1325 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1326 | break; | ||
1327 | case 20: | ||
1328 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1329 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1330 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1331 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1332 | break; | ||
1333 | case 21: | ||
1334 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | | ||
1335 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1336 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1337 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1338 | break; | ||
1339 | case 22: | ||
1340 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
1341 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1342 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1343 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1344 | break; | ||
1345 | case 23: | ||
1346 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
1347 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
1348 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1349 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1350 | break; | ||
1351 | case 24: | ||
1352 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1353 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1354 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1355 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1356 | break; | ||
1357 | case 25: | ||
1358 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | | ||
1359 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1360 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1361 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1362 | break; | ||
1363 | case 26: | ||
1364 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | | ||
1365 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1366 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1367 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1368 | break; | ||
1369 | case 27: | ||
1370 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1371 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1372 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1373 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1374 | break; | ||
1375 | case 28: | ||
1376 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1377 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1378 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1379 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1380 | break; | ||
1381 | case 29: | ||
1382 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1383 | PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | | ||
1384 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1385 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1386 | break; | ||
1387 | case 30: | ||
1388 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1389 | PIPE_CONFIG(ADDR_SURF_P4_16x16) | | ||
1390 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1391 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1392 | break; | ||
1393 | default: | ||
1394 | gb_tile_moden = 0; | ||
1395 | break; | ||
1396 | }; | ||
1397 | adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; | ||
1398 | WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); | ||
1399 | } | ||
1400 | for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { | ||
1401 | switch (reg_offset) { | ||
1402 | case 0: | ||
1403 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1404 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1405 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1406 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1407 | break; | ||
1408 | case 1: | ||
1409 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1410 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1411 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1412 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1413 | break; | ||
1414 | case 2: | ||
1415 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1416 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1417 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1418 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1419 | break; | ||
1420 | case 3: | ||
1421 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1422 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1423 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1424 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1425 | break; | ||
1426 | case 4: | ||
1427 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1428 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1429 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1430 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1431 | break; | ||
1432 | case 5: | ||
1433 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1434 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1435 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
1436 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1437 | break; | ||
1438 | case 6: | ||
1439 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1440 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1441 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
1442 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1443 | break; | ||
1444 | case 8: | ||
1445 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1446 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | | ||
1447 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1448 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1449 | break; | ||
1450 | case 9: | ||
1451 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1452 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1453 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1454 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1455 | break; | ||
1456 | case 10: | ||
1457 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1458 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1459 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1460 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1461 | break; | ||
1462 | case 11: | ||
1463 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1464 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1465 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1466 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1467 | break; | ||
1468 | case 12: | ||
1469 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1470 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1471 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
1472 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1473 | break; | ||
1474 | case 13: | ||
1475 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1476 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1477 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
1478 | NUM_BANKS(ADDR_SURF_4_BANK)); | ||
1479 | break; | ||
1480 | case 14: | ||
1481 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1482 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1483 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | | ||
1484 | NUM_BANKS(ADDR_SURF_4_BANK)); | ||
1485 | break; | ||
1486 | case 7: | ||
1487 | /* unused idx */ | ||
1488 | continue; | ||
1489 | default: | ||
1490 | gb_tile_moden = 0; | ||
1491 | break; | ||
1492 | }; | ||
1493 | adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; | ||
1494 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | ||
1495 | } | ||
1496 | break; | ||
1497 | case CHIP_CARRIZO: | ||
1498 | default: | ||
1499 | for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { | ||
1500 | switch (reg_offset) { | ||
1501 | case 0: | ||
1502 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1503 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1504 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | | ||
1505 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1506 | break; | ||
1507 | case 1: | ||
1508 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1509 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1510 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | | ||
1511 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1512 | break; | ||
1513 | case 2: | ||
1514 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1515 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1516 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | | ||
1517 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1518 | break; | ||
1519 | case 3: | ||
1520 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1521 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1522 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | | ||
1523 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1524 | break; | ||
1525 | case 4: | ||
1526 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1527 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1528 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1529 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1530 | break; | ||
1531 | case 5: | ||
1532 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1533 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1534 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1535 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1536 | break; | ||
1537 | case 6: | ||
1538 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1539 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1540 | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) | | ||
1541 | MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); | ||
1542 | break; | ||
1543 | case 8: | ||
1544 | gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | | ||
1545 | PIPE_CONFIG(ADDR_SURF_P2)); | ||
1546 | break; | ||
1547 | case 9: | ||
1548 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1549 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1550 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1551 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1552 | break; | ||
1553 | case 10: | ||
1554 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1555 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1556 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1557 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1558 | break; | ||
1559 | case 11: | ||
1560 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1561 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1562 | MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | | ||
1563 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1564 | break; | ||
1565 | case 13: | ||
1566 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1567 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1568 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1569 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1570 | break; | ||
1571 | case 14: | ||
1572 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1573 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1574 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1575 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1576 | break; | ||
1577 | case 15: | ||
1578 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) | | ||
1579 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1580 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1581 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1582 | break; | ||
1583 | case 16: | ||
1584 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1585 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1586 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1587 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1588 | break; | ||
1589 | case 18: | ||
1590 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1591 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1592 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1593 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1594 | break; | ||
1595 | case 19: | ||
1596 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) | | ||
1597 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1598 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1599 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1600 | break; | ||
1601 | case 20: | ||
1602 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1603 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1604 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1605 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1606 | break; | ||
1607 | case 21: | ||
1608 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) | | ||
1609 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1610 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1611 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1612 | break; | ||
1613 | case 22: | ||
1614 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) | | ||
1615 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1616 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1617 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1618 | break; | ||
1619 | case 24: | ||
1620 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) | | ||
1621 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1622 | MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | | ||
1623 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1624 | break; | ||
1625 | case 25: | ||
1626 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) | | ||
1627 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1628 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1629 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1630 | break; | ||
1631 | case 26: | ||
1632 | gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) | | ||
1633 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1634 | MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) | | ||
1635 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1)); | ||
1636 | break; | ||
1637 | case 27: | ||
1638 | gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | | ||
1639 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1640 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1641 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1642 | break; | ||
1643 | case 28: | ||
1644 | gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | | ||
1645 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1646 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1647 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); | ||
1648 | break; | ||
1649 | case 29: | ||
1650 | gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | | ||
1651 | PIPE_CONFIG(ADDR_SURF_P2) | | ||
1652 | MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | | ||
1653 | SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8)); | ||
1654 | break; | ||
1655 | case 7: | ||
1656 | case 12: | ||
1657 | case 17: | ||
1658 | case 23: | ||
1659 | /* unused idx */ | ||
1660 | continue; | ||
1661 | default: | ||
1662 | gb_tile_moden = 0; | ||
1663 | break; | ||
1664 | }; | ||
1665 | adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden; | ||
1666 | WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden); | ||
1667 | } | ||
1668 | for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { | ||
1669 | switch (reg_offset) { | ||
1670 | case 0: | ||
1671 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1672 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1673 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1674 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1675 | break; | ||
1676 | case 1: | ||
1677 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1678 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1679 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1680 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1681 | break; | ||
1682 | case 2: | ||
1683 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1684 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1685 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1686 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1687 | break; | ||
1688 | case 3: | ||
1689 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1690 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1691 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1692 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1693 | break; | ||
1694 | case 4: | ||
1695 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1696 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1697 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1698 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1699 | break; | ||
1700 | case 5: | ||
1701 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1702 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1703 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1704 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1705 | break; | ||
1706 | case 6: | ||
1707 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1708 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1709 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1710 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1711 | break; | ||
1712 | case 8: | ||
1713 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1714 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | | ||
1715 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1716 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1717 | break; | ||
1718 | case 9: | ||
1719 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | | ||
1720 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1721 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1722 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1723 | break; | ||
1724 | case 10: | ||
1725 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
1726 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | | ||
1727 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1728 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1729 | break; | ||
1730 | case 11: | ||
1731 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | | ||
1732 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1733 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1734 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1735 | break; | ||
1736 | case 12: | ||
1737 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1738 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | | ||
1739 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1740 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1741 | break; | ||
1742 | case 13: | ||
1743 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1744 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1745 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | | ||
1746 | NUM_BANKS(ADDR_SURF_16_BANK)); | ||
1747 | break; | ||
1748 | case 14: | ||
1749 | gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | | ||
1750 | BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | | ||
1751 | MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | | ||
1752 | NUM_BANKS(ADDR_SURF_8_BANK)); | ||
1753 | break; | ||
1754 | case 7: | ||
1755 | /* unused idx */ | ||
1756 | continue; | ||
1757 | default: | ||
1758 | gb_tile_moden = 0; | ||
1759 | break; | ||
1760 | }; | ||
1761 | adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden; | ||
1762 | WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); | ||
1763 | } | ||
1764 | } | ||
1765 | } | ||
1766 | |||
1767 | static u32 gfx_v8_0_create_bitmask(u32 bit_width) | ||
1768 | { | ||
1769 | u32 i, mask = 0; | ||
1770 | |||
1771 | for (i = 0; i < bit_width; i++) { | ||
1772 | mask <<= 1; | ||
1773 | mask |= 1; | ||
1774 | } | ||
1775 | return mask; | ||
1776 | } | ||
1777 | |||
1778 | void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) | ||
1779 | { | ||
1780 | u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); | ||
1781 | |||
1782 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { | ||
1783 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | ||
1784 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); | ||
1785 | } else if (se_num == 0xffffffff) { | ||
1786 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); | ||
1787 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); | ||
1788 | } else if (sh_num == 0xffffffff) { | ||
1789 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); | ||
1790 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); | ||
1791 | } else { | ||
1792 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); | ||
1793 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); | ||
1794 | } | ||
1795 | WREG32(mmGRBM_GFX_INDEX, data); | ||
1796 | } | ||
1797 | |||
1798 | static u32 gfx_v8_0_get_rb_disabled(struct amdgpu_device *adev, | ||
1799 | u32 max_rb_num_per_se, | ||
1800 | u32 sh_per_se) | ||
1801 | { | ||
1802 | u32 data, mask; | ||
1803 | |||
1804 | data = RREG32(mmCC_RB_BACKEND_DISABLE); | ||
1805 | if (data & 1) | ||
1806 | data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK; | ||
1807 | else | ||
1808 | data = 0; | ||
1809 | |||
1810 | data |= RREG32(mmGC_USER_RB_BACKEND_DISABLE); | ||
1811 | |||
1812 | data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT; | ||
1813 | |||
1814 | mask = gfx_v8_0_create_bitmask(max_rb_num_per_se / sh_per_se); | ||
1815 | |||
1816 | return data & mask; | ||
1817 | } | ||
1818 | |||
1819 | static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, | ||
1820 | u32 se_num, u32 sh_per_se, | ||
1821 | u32 max_rb_num_per_se) | ||
1822 | { | ||
1823 | int i, j; | ||
1824 | u32 data, mask; | ||
1825 | u32 disabled_rbs = 0; | ||
1826 | u32 enabled_rbs = 0; | ||
1827 | |||
1828 | mutex_lock(&adev->grbm_idx_mutex); | ||
1829 | for (i = 0; i < se_num; i++) { | ||
1830 | for (j = 0; j < sh_per_se; j++) { | ||
1831 | gfx_v8_0_select_se_sh(adev, i, j); | ||
1832 | data = gfx_v8_0_get_rb_disabled(adev, | ||
1833 | max_rb_num_per_se, sh_per_se); | ||
1834 | disabled_rbs |= data << ((i * sh_per_se + j) * | ||
1835 | RB_BITMAP_WIDTH_PER_SH); | ||
1836 | } | ||
1837 | } | ||
1838 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
1839 | mutex_unlock(&adev->grbm_idx_mutex); | ||
1840 | |||
1841 | mask = 1; | ||
1842 | for (i = 0; i < max_rb_num_per_se * se_num; i++) { | ||
1843 | if (!(disabled_rbs & mask)) | ||
1844 | enabled_rbs |= mask; | ||
1845 | mask <<= 1; | ||
1846 | } | ||
1847 | |||
1848 | adev->gfx.config.backend_enable_mask = enabled_rbs; | ||
1849 | |||
1850 | mutex_lock(&adev->grbm_idx_mutex); | ||
1851 | for (i = 0; i < se_num; i++) { | ||
1852 | gfx_v8_0_select_se_sh(adev, i, 0xffffffff); | ||
1853 | data = 0; | ||
1854 | for (j = 0; j < sh_per_se; j++) { | ||
1855 | switch (enabled_rbs & 3) { | ||
1856 | case 0: | ||
1857 | if (j == 0) | ||
1858 | data |= (RASTER_CONFIG_RB_MAP_3 << | ||
1859 | PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); | ||
1860 | else | ||
1861 | data |= (RASTER_CONFIG_RB_MAP_0 << | ||
1862 | PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT); | ||
1863 | break; | ||
1864 | case 1: | ||
1865 | data |= (RASTER_CONFIG_RB_MAP_0 << | ||
1866 | (i * sh_per_se + j) * 2); | ||
1867 | break; | ||
1868 | case 2: | ||
1869 | data |= (RASTER_CONFIG_RB_MAP_3 << | ||
1870 | (i * sh_per_se + j) * 2); | ||
1871 | break; | ||
1872 | case 3: | ||
1873 | default: | ||
1874 | data |= (RASTER_CONFIG_RB_MAP_2 << | ||
1875 | (i * sh_per_se + j) * 2); | ||
1876 | break; | ||
1877 | } | ||
1878 | enabled_rbs >>= 2; | ||
1879 | } | ||
1880 | WREG32(mmPA_SC_RASTER_CONFIG, data); | ||
1881 | } | ||
1882 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
1883 | mutex_unlock(&adev->grbm_idx_mutex); | ||
1884 | } | ||
1885 | |||
1886 | static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) | ||
1887 | { | ||
1888 | u32 gb_addr_config; | ||
1889 | u32 mc_shared_chmap, mc_arb_ramcfg; | ||
1890 | u32 dimm00_addr_map, dimm01_addr_map, dimm10_addr_map, dimm11_addr_map; | ||
1891 | u32 tmp; | ||
1892 | int i; | ||
1893 | |||
1894 | switch (adev->asic_type) { | ||
1895 | case CHIP_TOPAZ: | ||
1896 | adev->gfx.config.max_shader_engines = 1; | ||
1897 | adev->gfx.config.max_tile_pipes = 2; | ||
1898 | adev->gfx.config.max_cu_per_sh = 6; | ||
1899 | adev->gfx.config.max_sh_per_se = 1; | ||
1900 | adev->gfx.config.max_backends_per_se = 2; | ||
1901 | adev->gfx.config.max_texture_channel_caches = 2; | ||
1902 | adev->gfx.config.max_gprs = 256; | ||
1903 | adev->gfx.config.max_gs_threads = 32; | ||
1904 | adev->gfx.config.max_hw_contexts = 8; | ||
1905 | |||
1906 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | ||
1907 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | ||
1908 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | ||
1909 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; | ||
1910 | gb_addr_config = TOPAZ_GB_ADDR_CONFIG_GOLDEN; | ||
1911 | break; | ||
1912 | case CHIP_TONGA: | ||
1913 | adev->gfx.config.max_shader_engines = 4; | ||
1914 | adev->gfx.config.max_tile_pipes = 8; | ||
1915 | adev->gfx.config.max_cu_per_sh = 8; | ||
1916 | adev->gfx.config.max_sh_per_se = 1; | ||
1917 | adev->gfx.config.max_backends_per_se = 2; | ||
1918 | adev->gfx.config.max_texture_channel_caches = 8; | ||
1919 | adev->gfx.config.max_gprs = 256; | ||
1920 | adev->gfx.config.max_gs_threads = 32; | ||
1921 | adev->gfx.config.max_hw_contexts = 8; | ||
1922 | |||
1923 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | ||
1924 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | ||
1925 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | ||
1926 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; | ||
1927 | gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; | ||
1928 | break; | ||
1929 | case CHIP_CARRIZO: | ||
1930 | adev->gfx.config.max_shader_engines = 1; | ||
1931 | adev->gfx.config.max_tile_pipes = 2; | ||
1932 | adev->gfx.config.max_cu_per_sh = 8; | ||
1933 | adev->gfx.config.max_sh_per_se = 1; | ||
1934 | adev->gfx.config.max_backends_per_se = 2; | ||
1935 | adev->gfx.config.max_texture_channel_caches = 2; | ||
1936 | adev->gfx.config.max_gprs = 256; | ||
1937 | adev->gfx.config.max_gs_threads = 32; | ||
1938 | adev->gfx.config.max_hw_contexts = 8; | ||
1939 | |||
1940 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | ||
1941 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | ||
1942 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | ||
1943 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; | ||
1944 | gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN; | ||
1945 | break; | ||
1946 | default: | ||
1947 | adev->gfx.config.max_shader_engines = 2; | ||
1948 | adev->gfx.config.max_tile_pipes = 4; | ||
1949 | adev->gfx.config.max_cu_per_sh = 2; | ||
1950 | adev->gfx.config.max_sh_per_se = 1; | ||
1951 | adev->gfx.config.max_backends_per_se = 2; | ||
1952 | adev->gfx.config.max_texture_channel_caches = 4; | ||
1953 | adev->gfx.config.max_gprs = 256; | ||
1954 | adev->gfx.config.max_gs_threads = 32; | ||
1955 | adev->gfx.config.max_hw_contexts = 8; | ||
1956 | |||
1957 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; | ||
1958 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; | ||
1959 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; | ||
1960 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130; | ||
1961 | gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN; | ||
1962 | break; | ||
1963 | } | ||
1964 | |||
1965 | tmp = RREG32(mmGRBM_CNTL); | ||
1966 | tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff); | ||
1967 | WREG32(mmGRBM_CNTL, tmp); | ||
1968 | |||
1969 | mc_shared_chmap = RREG32(mmMC_SHARED_CHMAP); | ||
1970 | adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG); | ||
1971 | mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg; | ||
1972 | |||
1973 | adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes; | ||
1974 | adev->gfx.config.mem_max_burst_length_bytes = 256; | ||
1975 | if (adev->flags & AMDGPU_IS_APU) { | ||
1976 | /* Get memory bank mapping mode. */ | ||
1977 | tmp = RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING); | ||
1978 | dimm00_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM0ADDRMAP); | ||
1979 | dimm01_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM0_BANK_ADDR_MAPPING, DIMM1ADDRMAP); | ||
1980 | |||
1981 | tmp = RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING); | ||
1982 | dimm10_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM0ADDRMAP); | ||
1983 | dimm11_addr_map = REG_GET_FIELD(tmp, MC_FUS_DRAM1_BANK_ADDR_MAPPING, DIMM1ADDRMAP); | ||
1984 | |||
1985 | /* Validate settings in case only one DIMM installed. */ | ||
1986 | if ((dimm00_addr_map == 0) || (dimm00_addr_map == 3) || (dimm00_addr_map == 4) || (dimm00_addr_map > 12)) | ||
1987 | dimm00_addr_map = 0; | ||
1988 | if ((dimm01_addr_map == 0) || (dimm01_addr_map == 3) || (dimm01_addr_map == 4) || (dimm01_addr_map > 12)) | ||
1989 | dimm01_addr_map = 0; | ||
1990 | if ((dimm10_addr_map == 0) || (dimm10_addr_map == 3) || (dimm10_addr_map == 4) || (dimm10_addr_map > 12)) | ||
1991 | dimm10_addr_map = 0; | ||
1992 | if ((dimm11_addr_map == 0) || (dimm11_addr_map == 3) || (dimm11_addr_map == 4) || (dimm11_addr_map > 12)) | ||
1993 | dimm11_addr_map = 0; | ||
1994 | |||
1995 | /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */ | ||
1996 | /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */ | ||
1997 | if ((dimm00_addr_map == 11) || (dimm01_addr_map == 11) || (dimm10_addr_map == 11) || (dimm11_addr_map == 11)) | ||
1998 | adev->gfx.config.mem_row_size_in_kb = 2; | ||
1999 | else | ||
2000 | adev->gfx.config.mem_row_size_in_kb = 1; | ||
2001 | } else { | ||
2002 | tmp = REG_GET_FIELD(mc_arb_ramcfg, MC_ARB_RAMCFG, NOOFCOLS); | ||
2003 | adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | ||
2004 | if (adev->gfx.config.mem_row_size_in_kb > 4) | ||
2005 | adev->gfx.config.mem_row_size_in_kb = 4; | ||
2006 | } | ||
2007 | |||
2008 | adev->gfx.config.shader_engine_tile_size = 32; | ||
2009 | adev->gfx.config.num_gpus = 1; | ||
2010 | adev->gfx.config.multi_gpu_tile_size = 64; | ||
2011 | |||
2012 | /* fix up row size */ | ||
2013 | switch (adev->gfx.config.mem_row_size_in_kb) { | ||
2014 | case 1: | ||
2015 | default: | ||
2016 | gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 0); | ||
2017 | break; | ||
2018 | case 2: | ||
2019 | gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 1); | ||
2020 | break; | ||
2021 | case 4: | ||
2022 | gb_addr_config = REG_SET_FIELD(gb_addr_config, GB_ADDR_CONFIG, ROW_SIZE, 2); | ||
2023 | break; | ||
2024 | } | ||
2025 | adev->gfx.config.gb_addr_config = gb_addr_config; | ||
2026 | |||
2027 | WREG32(mmGB_ADDR_CONFIG, gb_addr_config); | ||
2028 | WREG32(mmHDP_ADDR_CONFIG, gb_addr_config); | ||
2029 | WREG32(mmDMIF_ADDR_CALC, gb_addr_config); | ||
2030 | WREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET, | ||
2031 | gb_addr_config & 0x70); | ||
2032 | WREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET, | ||
2033 | gb_addr_config & 0x70); | ||
2034 | WREG32(mmUVD_UDEC_ADDR_CONFIG, gb_addr_config); | ||
2035 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); | ||
2036 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); | ||
2037 | |||
2038 | gfx_v8_0_tiling_mode_table_init(adev); | ||
2039 | |||
2040 | gfx_v8_0_setup_rb(adev, adev->gfx.config.max_shader_engines, | ||
2041 | adev->gfx.config.max_sh_per_se, | ||
2042 | adev->gfx.config.max_backends_per_se); | ||
2043 | |||
2044 | /* XXX SH_MEM regs */ | ||
2045 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | ||
2046 | mutex_lock(&adev->srbm_mutex); | ||
2047 | for (i = 0; i < 16; i++) { | ||
2048 | vi_srbm_select(adev, 0, 0, 0, i); | ||
2049 | /* CP and shaders */ | ||
2050 | if (i == 0) { | ||
2051 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_UC); | ||
2052 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_UC); | ||
2053 | WREG32(mmSH_MEM_CONFIG, tmp); | ||
2054 | } else { | ||
2055 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, DEFAULT_MTYPE, MTYPE_NC); | ||
2056 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, APE1_MTYPE, MTYPE_NC); | ||
2057 | WREG32(mmSH_MEM_CONFIG, tmp); | ||
2058 | } | ||
2059 | |||
2060 | WREG32(mmSH_MEM_APE1_BASE, 1); | ||
2061 | WREG32(mmSH_MEM_APE1_LIMIT, 0); | ||
2062 | WREG32(mmSH_MEM_BASES, 0); | ||
2063 | } | ||
2064 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
2065 | mutex_unlock(&adev->srbm_mutex); | ||
2066 | |||
2067 | mutex_lock(&adev->grbm_idx_mutex); | ||
2068 | /* | ||
2069 | * making sure that the following register writes will be broadcasted | ||
2070 | * to all the shaders | ||
2071 | */ | ||
2072 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
2073 | |||
2074 | WREG32(mmPA_SC_FIFO_SIZE, | ||
2075 | (adev->gfx.config.sc_prim_fifo_size_frontend << | ||
2076 | PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT) | | ||
2077 | (adev->gfx.config.sc_prim_fifo_size_backend << | ||
2078 | PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT) | | ||
2079 | (adev->gfx.config.sc_hiz_tile_fifo_size << | ||
2080 | PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | | ||
2081 | (adev->gfx.config.sc_earlyz_tile_fifo_size << | ||
2082 | PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); | ||
2083 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2084 | |||
2085 | } | ||
2086 | |||
2087 | static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) | ||
2088 | { | ||
2089 | u32 i, j, k; | ||
2090 | u32 mask; | ||
2091 | |||
2092 | mutex_lock(&adev->grbm_idx_mutex); | ||
2093 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
2094 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
2095 | gfx_v8_0_select_se_sh(adev, i, j); | ||
2096 | for (k = 0; k < adev->usec_timeout; k++) { | ||
2097 | if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) | ||
2098 | break; | ||
2099 | udelay(1); | ||
2100 | } | ||
2101 | } | ||
2102 | } | ||
2103 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
2104 | mutex_unlock(&adev->grbm_idx_mutex); | ||
2105 | |||
2106 | mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | | ||
2107 | RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | | ||
2108 | RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | | ||
2109 | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; | ||
2110 | for (k = 0; k < adev->usec_timeout; k++) { | ||
2111 | if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) | ||
2112 | break; | ||
2113 | udelay(1); | ||
2114 | } | ||
2115 | } | ||
2116 | |||
2117 | static void gfx_v8_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, | ||
2118 | bool enable) | ||
2119 | { | ||
2120 | u32 tmp = RREG32(mmCP_INT_CNTL_RING0); | ||
2121 | |||
2122 | if (enable) { | ||
2123 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 1); | ||
2124 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 1); | ||
2125 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 1); | ||
2126 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 1); | ||
2127 | } else { | ||
2128 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, 0); | ||
2129 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, 0); | ||
2130 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, 0); | ||
2131 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, 0); | ||
2132 | } | ||
2133 | WREG32(mmCP_INT_CNTL_RING0, tmp); | ||
2134 | } | ||
2135 | |||
2136 | void gfx_v8_0_rlc_stop(struct amdgpu_device *adev) | ||
2137 | { | ||
2138 | u32 tmp = RREG32(mmRLC_CNTL); | ||
2139 | |||
2140 | tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0); | ||
2141 | WREG32(mmRLC_CNTL, tmp); | ||
2142 | |||
2143 | gfx_v8_0_enable_gui_idle_interrupt(adev, false); | ||
2144 | |||
2145 | gfx_v8_0_wait_for_rlc_serdes(adev); | ||
2146 | } | ||
2147 | |||
2148 | static void gfx_v8_0_rlc_reset(struct amdgpu_device *adev) | ||
2149 | { | ||
2150 | u32 tmp = RREG32(mmGRBM_SOFT_RESET); | ||
2151 | |||
2152 | tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); | ||
2153 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
2154 | udelay(50); | ||
2155 | tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0); | ||
2156 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
2157 | udelay(50); | ||
2158 | } | ||
2159 | |||
2160 | static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) | ||
2161 | { | ||
2162 | u32 tmp = RREG32(mmRLC_CNTL); | ||
2163 | |||
2164 | tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1); | ||
2165 | WREG32(mmRLC_CNTL, tmp); | ||
2166 | |||
2167 | /* carrizo do enable cp interrupt after cp inited */ | ||
2168 | if (adev->asic_type != CHIP_CARRIZO) | ||
2169 | gfx_v8_0_enable_gui_idle_interrupt(adev, true); | ||
2170 | |||
2171 | udelay(50); | ||
2172 | } | ||
2173 | |||
2174 | static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) | ||
2175 | { | ||
2176 | const struct rlc_firmware_header_v2_0 *hdr; | ||
2177 | const __le32 *fw_data; | ||
2178 | unsigned i, fw_size; | ||
2179 | |||
2180 | if (!adev->gfx.rlc_fw) | ||
2181 | return -EINVAL; | ||
2182 | |||
2183 | hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; | ||
2184 | amdgpu_ucode_print_rlc_hdr(&hdr->header); | ||
2185 | adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
2186 | |||
2187 | fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + | ||
2188 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
2189 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
2190 | |||
2191 | WREG32(mmRLC_GPM_UCODE_ADDR, 0); | ||
2192 | for (i = 0; i < fw_size; i++) | ||
2193 | WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); | ||
2194 | WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); | ||
2195 | |||
2196 | return 0; | ||
2197 | } | ||
2198 | |||
2199 | static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) | ||
2200 | { | ||
2201 | int r; | ||
2202 | |||
2203 | gfx_v8_0_rlc_stop(adev); | ||
2204 | |||
2205 | /* disable CG */ | ||
2206 | WREG32(mmRLC_CGCG_CGLS_CTRL, 0); | ||
2207 | |||
2208 | /* disable PG */ | ||
2209 | WREG32(mmRLC_PG_CNTL, 0); | ||
2210 | |||
2211 | gfx_v8_0_rlc_reset(adev); | ||
2212 | |||
2213 | if (!adev->firmware.smu_load) { | ||
2214 | /* legacy rlc firmware loading */ | ||
2215 | r = gfx_v8_0_rlc_load_microcode(adev); | ||
2216 | if (r) | ||
2217 | return r; | ||
2218 | } else { | ||
2219 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
2220 | AMDGPU_UCODE_ID_RLC_G); | ||
2221 | if (r) | ||
2222 | return -EINVAL; | ||
2223 | } | ||
2224 | |||
2225 | gfx_v8_0_rlc_start(adev); | ||
2226 | |||
2227 | return 0; | ||
2228 | } | ||
2229 | |||
2230 | static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) | ||
2231 | { | ||
2232 | int i; | ||
2233 | u32 tmp = RREG32(mmCP_ME_CNTL); | ||
2234 | |||
2235 | if (enable) { | ||
2236 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0); | ||
2237 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0); | ||
2238 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0); | ||
2239 | } else { | ||
2240 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); | ||
2241 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); | ||
2242 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); | ||
2243 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
2244 | adev->gfx.gfx_ring[i].ready = false; | ||
2245 | } | ||
2246 | WREG32(mmCP_ME_CNTL, tmp); | ||
2247 | udelay(50); | ||
2248 | } | ||
2249 | |||
2250 | static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) | ||
2251 | { | ||
2252 | const struct gfx_firmware_header_v1_0 *pfp_hdr; | ||
2253 | const struct gfx_firmware_header_v1_0 *ce_hdr; | ||
2254 | const struct gfx_firmware_header_v1_0 *me_hdr; | ||
2255 | const __le32 *fw_data; | ||
2256 | unsigned i, fw_size; | ||
2257 | |||
2258 | if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) | ||
2259 | return -EINVAL; | ||
2260 | |||
2261 | pfp_hdr = (const struct gfx_firmware_header_v1_0 *) | ||
2262 | adev->gfx.pfp_fw->data; | ||
2263 | ce_hdr = (const struct gfx_firmware_header_v1_0 *) | ||
2264 | adev->gfx.ce_fw->data; | ||
2265 | me_hdr = (const struct gfx_firmware_header_v1_0 *) | ||
2266 | adev->gfx.me_fw->data; | ||
2267 | |||
2268 | amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); | ||
2269 | amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); | ||
2270 | amdgpu_ucode_print_gfx_hdr(&me_hdr->header); | ||
2271 | adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version); | ||
2272 | adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version); | ||
2273 | adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version); | ||
2274 | |||
2275 | gfx_v8_0_cp_gfx_enable(adev, false); | ||
2276 | |||
2277 | /* PFP */ | ||
2278 | fw_data = (const __le32 *) | ||
2279 | (adev->gfx.pfp_fw->data + | ||
2280 | le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); | ||
2281 | fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; | ||
2282 | WREG32(mmCP_PFP_UCODE_ADDR, 0); | ||
2283 | for (i = 0; i < fw_size; i++) | ||
2284 | WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); | ||
2285 | WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); | ||
2286 | |||
2287 | /* CE */ | ||
2288 | fw_data = (const __le32 *) | ||
2289 | (adev->gfx.ce_fw->data + | ||
2290 | le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); | ||
2291 | fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; | ||
2292 | WREG32(mmCP_CE_UCODE_ADDR, 0); | ||
2293 | for (i = 0; i < fw_size; i++) | ||
2294 | WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); | ||
2295 | WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); | ||
2296 | |||
2297 | /* ME */ | ||
2298 | fw_data = (const __le32 *) | ||
2299 | (adev->gfx.me_fw->data + | ||
2300 | le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); | ||
2301 | fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; | ||
2302 | WREG32(mmCP_ME_RAM_WADDR, 0); | ||
2303 | for (i = 0; i < fw_size; i++) | ||
2304 | WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); | ||
2305 | WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); | ||
2306 | |||
2307 | return 0; | ||
2308 | } | ||
2309 | |||
2310 | static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev) | ||
2311 | { | ||
2312 | u32 count = 0; | ||
2313 | const struct cs_section_def *sect = NULL; | ||
2314 | const struct cs_extent_def *ext = NULL; | ||
2315 | |||
2316 | /* begin clear state */ | ||
2317 | count += 2; | ||
2318 | /* context control state */ | ||
2319 | count += 3; | ||
2320 | |||
2321 | for (sect = vi_cs_data; sect->section != NULL; ++sect) { | ||
2322 | for (ext = sect->section; ext->extent != NULL; ++ext) { | ||
2323 | if (sect->id == SECT_CONTEXT) | ||
2324 | count += 2 + ext->reg_count; | ||
2325 | else | ||
2326 | return 0; | ||
2327 | } | ||
2328 | } | ||
2329 | /* pa_sc_raster_config/pa_sc_raster_config1 */ | ||
2330 | count += 4; | ||
2331 | /* end clear state */ | ||
2332 | count += 2; | ||
2333 | /* clear state */ | ||
2334 | count += 2; | ||
2335 | |||
2336 | return count; | ||
2337 | } | ||
2338 | |||
2339 | static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | ||
2340 | { | ||
2341 | struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; | ||
2342 | const struct cs_section_def *sect = NULL; | ||
2343 | const struct cs_extent_def *ext = NULL; | ||
2344 | int r, i; | ||
2345 | |||
2346 | /* init the CP */ | ||
2347 | WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1); | ||
2348 | WREG32(mmCP_ENDIAN_SWAP, 0); | ||
2349 | WREG32(mmCP_DEVICE_ID, 1); | ||
2350 | |||
2351 | gfx_v8_0_cp_gfx_enable(adev, true); | ||
2352 | |||
2353 | r = amdgpu_ring_lock(ring, gfx_v8_0_get_csb_size(adev) + 4); | ||
2354 | if (r) { | ||
2355 | DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r); | ||
2356 | return r; | ||
2357 | } | ||
2358 | |||
2359 | /* clear state buffer */ | ||
2360 | amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
2361 | amdgpu_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | ||
2362 | |||
2363 | amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); | ||
2364 | amdgpu_ring_write(ring, 0x80000000); | ||
2365 | amdgpu_ring_write(ring, 0x80000000); | ||
2366 | |||
2367 | for (sect = vi_cs_data; sect->section != NULL; ++sect) { | ||
2368 | for (ext = sect->section; ext->extent != NULL; ++ext) { | ||
2369 | if (sect->id == SECT_CONTEXT) { | ||
2370 | amdgpu_ring_write(ring, | ||
2371 | PACKET3(PACKET3_SET_CONTEXT_REG, | ||
2372 | ext->reg_count)); | ||
2373 | amdgpu_ring_write(ring, | ||
2374 | ext->reg_index - PACKET3_SET_CONTEXT_REG_START); | ||
2375 | for (i = 0; i < ext->reg_count; i++) | ||
2376 | amdgpu_ring_write(ring, ext->extent[i]); | ||
2377 | } | ||
2378 | } | ||
2379 | } | ||
2380 | |||
2381 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | ||
2382 | amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START); | ||
2383 | switch (adev->asic_type) { | ||
2384 | case CHIP_TONGA: | ||
2385 | amdgpu_ring_write(ring, 0x16000012); | ||
2386 | amdgpu_ring_write(ring, 0x0000002A); | ||
2387 | break; | ||
2388 | case CHIP_TOPAZ: | ||
2389 | case CHIP_CARRIZO: | ||
2390 | amdgpu_ring_write(ring, 0x00000002); | ||
2391 | amdgpu_ring_write(ring, 0x00000000); | ||
2392 | break; | ||
2393 | default: | ||
2394 | BUG(); | ||
2395 | } | ||
2396 | |||
2397 | amdgpu_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | ||
2398 | amdgpu_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); | ||
2399 | |||
2400 | amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); | ||
2401 | amdgpu_ring_write(ring, 0); | ||
2402 | |||
2403 | /* init the CE partitions */ | ||
2404 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); | ||
2405 | amdgpu_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); | ||
2406 | amdgpu_ring_write(ring, 0x8000); | ||
2407 | amdgpu_ring_write(ring, 0x8000); | ||
2408 | |||
2409 | amdgpu_ring_unlock_commit(ring); | ||
2410 | |||
2411 | return 0; | ||
2412 | } | ||
2413 | |||
2414 | static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev) | ||
2415 | { | ||
2416 | struct amdgpu_ring *ring; | ||
2417 | u32 tmp; | ||
2418 | u32 rb_bufsz; | ||
2419 | u64 rb_addr, rptr_addr; | ||
2420 | int r; | ||
2421 | |||
2422 | /* Set the write pointer delay */ | ||
2423 | WREG32(mmCP_RB_WPTR_DELAY, 0); | ||
2424 | |||
2425 | /* set the RB to use vmid 0 */ | ||
2426 | WREG32(mmCP_RB_VMID, 0); | ||
2427 | |||
2428 | /* Set ring buffer size */ | ||
2429 | ring = &adev->gfx.gfx_ring[0]; | ||
2430 | rb_bufsz = order_base_2(ring->ring_size / 8); | ||
2431 | tmp = REG_SET_FIELD(0, CP_RB0_CNTL, RB_BUFSZ, rb_bufsz); | ||
2432 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, RB_BLKSZ, rb_bufsz - 2); | ||
2433 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MTYPE, 3); | ||
2434 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, MIN_IB_AVAILSZ, 1); | ||
2435 | #ifdef __BIG_ENDIAN | ||
2436 | tmp = REG_SET_FIELD(tmp, CP_RB0_CNTL, BUF_SWAP, 1); | ||
2437 | #endif | ||
2438 | WREG32(mmCP_RB0_CNTL, tmp); | ||
2439 | |||
2440 | /* Initialize the ring buffer's read and write pointers */ | ||
2441 | WREG32(mmCP_RB0_CNTL, tmp | CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK); | ||
2442 | ring->wptr = 0; | ||
2443 | WREG32(mmCP_RB0_WPTR, ring->wptr); | ||
2444 | |||
2445 | /* set the wb address wether it's enabled or not */ | ||
2446 | rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | ||
2447 | WREG32(mmCP_RB0_RPTR_ADDR, lower_32_bits(rptr_addr)); | ||
2448 | WREG32(mmCP_RB0_RPTR_ADDR_HI, upper_32_bits(rptr_addr) & 0xFF); | ||
2449 | |||
2450 | mdelay(1); | ||
2451 | WREG32(mmCP_RB0_CNTL, tmp); | ||
2452 | |||
2453 | rb_addr = ring->gpu_addr >> 8; | ||
2454 | WREG32(mmCP_RB0_BASE, rb_addr); | ||
2455 | WREG32(mmCP_RB0_BASE_HI, upper_32_bits(rb_addr)); | ||
2456 | |||
2457 | /* no gfx doorbells on iceland */ | ||
2458 | if (adev->asic_type != CHIP_TOPAZ) { | ||
2459 | tmp = RREG32(mmCP_RB_DOORBELL_CONTROL); | ||
2460 | if (ring->use_doorbell) { | ||
2461 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, | ||
2462 | DOORBELL_OFFSET, ring->doorbell_index); | ||
2463 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, | ||
2464 | DOORBELL_EN, 1); | ||
2465 | } else { | ||
2466 | tmp = REG_SET_FIELD(tmp, CP_RB_DOORBELL_CONTROL, | ||
2467 | DOORBELL_EN, 0); | ||
2468 | } | ||
2469 | WREG32(mmCP_RB_DOORBELL_CONTROL, tmp); | ||
2470 | |||
2471 | if (adev->asic_type == CHIP_TONGA) { | ||
2472 | tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER, | ||
2473 | DOORBELL_RANGE_LOWER, | ||
2474 | AMDGPU_DOORBELL_GFX_RING0); | ||
2475 | WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp); | ||
2476 | |||
2477 | WREG32(mmCP_RB_DOORBELL_RANGE_UPPER, | ||
2478 | CP_RB_DOORBELL_RANGE_UPPER__DOORBELL_RANGE_UPPER_MASK); | ||
2479 | } | ||
2480 | |||
2481 | } | ||
2482 | |||
2483 | /* start the ring */ | ||
2484 | gfx_v8_0_cp_gfx_start(adev); | ||
2485 | ring->ready = true; | ||
2486 | r = amdgpu_ring_test_ring(ring); | ||
2487 | if (r) { | ||
2488 | ring->ready = false; | ||
2489 | return r; | ||
2490 | } | ||
2491 | |||
2492 | return 0; | ||
2493 | } | ||
2494 | |||
2495 | static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) | ||
2496 | { | ||
2497 | int i; | ||
2498 | |||
2499 | if (enable) { | ||
2500 | WREG32(mmCP_MEC_CNTL, 0); | ||
2501 | } else { | ||
2502 | WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); | ||
2503 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
2504 | adev->gfx.compute_ring[i].ready = false; | ||
2505 | } | ||
2506 | udelay(50); | ||
2507 | } | ||
2508 | |||
2509 | static int gfx_v8_0_cp_compute_start(struct amdgpu_device *adev) | ||
2510 | { | ||
2511 | gfx_v8_0_cp_compute_enable(adev, true); | ||
2512 | |||
2513 | return 0; | ||
2514 | } | ||
2515 | |||
2516 | static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) | ||
2517 | { | ||
2518 | const struct gfx_firmware_header_v1_0 *mec_hdr; | ||
2519 | const __le32 *fw_data; | ||
2520 | unsigned i, fw_size; | ||
2521 | |||
2522 | if (!adev->gfx.mec_fw) | ||
2523 | return -EINVAL; | ||
2524 | |||
2525 | gfx_v8_0_cp_compute_enable(adev, false); | ||
2526 | |||
2527 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; | ||
2528 | amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); | ||
2529 | adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); | ||
2530 | |||
2531 | fw_data = (const __le32 *) | ||
2532 | (adev->gfx.mec_fw->data + | ||
2533 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | ||
2534 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; | ||
2535 | |||
2536 | /* MEC1 */ | ||
2537 | WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); | ||
2538 | for (i = 0; i < fw_size; i++) | ||
2539 | WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i)); | ||
2540 | WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); | ||
2541 | |||
2542 | /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ | ||
2543 | if (adev->gfx.mec2_fw) { | ||
2544 | const struct gfx_firmware_header_v1_0 *mec2_hdr; | ||
2545 | |||
2546 | mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; | ||
2547 | amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); | ||
2548 | adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); | ||
2549 | |||
2550 | fw_data = (const __le32 *) | ||
2551 | (adev->gfx.mec2_fw->data + | ||
2552 | le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); | ||
2553 | fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; | ||
2554 | |||
2555 | WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); | ||
2556 | for (i = 0; i < fw_size; i++) | ||
2557 | WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i)); | ||
2558 | WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version); | ||
2559 | } | ||
2560 | |||
2561 | return 0; | ||
2562 | } | ||
2563 | |||
2564 | struct vi_mqd { | ||
2565 | uint32_t header; /* ordinal0 */ | ||
2566 | uint32_t compute_dispatch_initiator; /* ordinal1 */ | ||
2567 | uint32_t compute_dim_x; /* ordinal2 */ | ||
2568 | uint32_t compute_dim_y; /* ordinal3 */ | ||
2569 | uint32_t compute_dim_z; /* ordinal4 */ | ||
2570 | uint32_t compute_start_x; /* ordinal5 */ | ||
2571 | uint32_t compute_start_y; /* ordinal6 */ | ||
2572 | uint32_t compute_start_z; /* ordinal7 */ | ||
2573 | uint32_t compute_num_thread_x; /* ordinal8 */ | ||
2574 | uint32_t compute_num_thread_y; /* ordinal9 */ | ||
2575 | uint32_t compute_num_thread_z; /* ordinal10 */ | ||
2576 | uint32_t compute_pipelinestat_enable; /* ordinal11 */ | ||
2577 | uint32_t compute_perfcount_enable; /* ordinal12 */ | ||
2578 | uint32_t compute_pgm_lo; /* ordinal13 */ | ||
2579 | uint32_t compute_pgm_hi; /* ordinal14 */ | ||
2580 | uint32_t compute_tba_lo; /* ordinal15 */ | ||
2581 | uint32_t compute_tba_hi; /* ordinal16 */ | ||
2582 | uint32_t compute_tma_lo; /* ordinal17 */ | ||
2583 | uint32_t compute_tma_hi; /* ordinal18 */ | ||
2584 | uint32_t compute_pgm_rsrc1; /* ordinal19 */ | ||
2585 | uint32_t compute_pgm_rsrc2; /* ordinal20 */ | ||
2586 | uint32_t compute_vmid; /* ordinal21 */ | ||
2587 | uint32_t compute_resource_limits; /* ordinal22 */ | ||
2588 | uint32_t compute_static_thread_mgmt_se0; /* ordinal23 */ | ||
2589 | uint32_t compute_static_thread_mgmt_se1; /* ordinal24 */ | ||
2590 | uint32_t compute_tmpring_size; /* ordinal25 */ | ||
2591 | uint32_t compute_static_thread_mgmt_se2; /* ordinal26 */ | ||
2592 | uint32_t compute_static_thread_mgmt_se3; /* ordinal27 */ | ||
2593 | uint32_t compute_restart_x; /* ordinal28 */ | ||
2594 | uint32_t compute_restart_y; /* ordinal29 */ | ||
2595 | uint32_t compute_restart_z; /* ordinal30 */ | ||
2596 | uint32_t compute_thread_trace_enable; /* ordinal31 */ | ||
2597 | uint32_t compute_misc_reserved; /* ordinal32 */ | ||
2598 | uint32_t compute_dispatch_id; /* ordinal33 */ | ||
2599 | uint32_t compute_threadgroup_id; /* ordinal34 */ | ||
2600 | uint32_t compute_relaunch; /* ordinal35 */ | ||
2601 | uint32_t compute_wave_restore_addr_lo; /* ordinal36 */ | ||
2602 | uint32_t compute_wave_restore_addr_hi; /* ordinal37 */ | ||
2603 | uint32_t compute_wave_restore_control; /* ordinal38 */ | ||
2604 | uint32_t reserved9; /* ordinal39 */ | ||
2605 | uint32_t reserved10; /* ordinal40 */ | ||
2606 | uint32_t reserved11; /* ordinal41 */ | ||
2607 | uint32_t reserved12; /* ordinal42 */ | ||
2608 | uint32_t reserved13; /* ordinal43 */ | ||
2609 | uint32_t reserved14; /* ordinal44 */ | ||
2610 | uint32_t reserved15; /* ordinal45 */ | ||
2611 | uint32_t reserved16; /* ordinal46 */ | ||
2612 | uint32_t reserved17; /* ordinal47 */ | ||
2613 | uint32_t reserved18; /* ordinal48 */ | ||
2614 | uint32_t reserved19; /* ordinal49 */ | ||
2615 | uint32_t reserved20; /* ordinal50 */ | ||
2616 | uint32_t reserved21; /* ordinal51 */ | ||
2617 | uint32_t reserved22; /* ordinal52 */ | ||
2618 | uint32_t reserved23; /* ordinal53 */ | ||
2619 | uint32_t reserved24; /* ordinal54 */ | ||
2620 | uint32_t reserved25; /* ordinal55 */ | ||
2621 | uint32_t reserved26; /* ordinal56 */ | ||
2622 | uint32_t reserved27; /* ordinal57 */ | ||
2623 | uint32_t reserved28; /* ordinal58 */ | ||
2624 | uint32_t reserved29; /* ordinal59 */ | ||
2625 | uint32_t reserved30; /* ordinal60 */ | ||
2626 | uint32_t reserved31; /* ordinal61 */ | ||
2627 | uint32_t reserved32; /* ordinal62 */ | ||
2628 | uint32_t reserved33; /* ordinal63 */ | ||
2629 | uint32_t reserved34; /* ordinal64 */ | ||
2630 | uint32_t compute_user_data_0; /* ordinal65 */ | ||
2631 | uint32_t compute_user_data_1; /* ordinal66 */ | ||
2632 | uint32_t compute_user_data_2; /* ordinal67 */ | ||
2633 | uint32_t compute_user_data_3; /* ordinal68 */ | ||
2634 | uint32_t compute_user_data_4; /* ordinal69 */ | ||
2635 | uint32_t compute_user_data_5; /* ordinal70 */ | ||
2636 | uint32_t compute_user_data_6; /* ordinal71 */ | ||
2637 | uint32_t compute_user_data_7; /* ordinal72 */ | ||
2638 | uint32_t compute_user_data_8; /* ordinal73 */ | ||
2639 | uint32_t compute_user_data_9; /* ordinal74 */ | ||
2640 | uint32_t compute_user_data_10; /* ordinal75 */ | ||
2641 | uint32_t compute_user_data_11; /* ordinal76 */ | ||
2642 | uint32_t compute_user_data_12; /* ordinal77 */ | ||
2643 | uint32_t compute_user_data_13; /* ordinal78 */ | ||
2644 | uint32_t compute_user_data_14; /* ordinal79 */ | ||
2645 | uint32_t compute_user_data_15; /* ordinal80 */ | ||
2646 | uint32_t cp_compute_csinvoc_count_lo; /* ordinal81 */ | ||
2647 | uint32_t cp_compute_csinvoc_count_hi; /* ordinal82 */ | ||
2648 | uint32_t reserved35; /* ordinal83 */ | ||
2649 | uint32_t reserved36; /* ordinal84 */ | ||
2650 | uint32_t reserved37; /* ordinal85 */ | ||
2651 | uint32_t cp_mqd_query_time_lo; /* ordinal86 */ | ||
2652 | uint32_t cp_mqd_query_time_hi; /* ordinal87 */ | ||
2653 | uint32_t cp_mqd_connect_start_time_lo; /* ordinal88 */ | ||
2654 | uint32_t cp_mqd_connect_start_time_hi; /* ordinal89 */ | ||
2655 | uint32_t cp_mqd_connect_end_time_lo; /* ordinal90 */ | ||
2656 | uint32_t cp_mqd_connect_end_time_hi; /* ordinal91 */ | ||
2657 | uint32_t cp_mqd_connect_end_wf_count; /* ordinal92 */ | ||
2658 | uint32_t cp_mqd_connect_end_pq_rptr; /* ordinal93 */ | ||
2659 | uint32_t cp_mqd_connect_end_pq_wptr; /* ordinal94 */ | ||
2660 | uint32_t cp_mqd_connect_end_ib_rptr; /* ordinal95 */ | ||
2661 | uint32_t reserved38; /* ordinal96 */ | ||
2662 | uint32_t reserved39; /* ordinal97 */ | ||
2663 | uint32_t cp_mqd_save_start_time_lo; /* ordinal98 */ | ||
2664 | uint32_t cp_mqd_save_start_time_hi; /* ordinal99 */ | ||
2665 | uint32_t cp_mqd_save_end_time_lo; /* ordinal100 */ | ||
2666 | uint32_t cp_mqd_save_end_time_hi; /* ordinal101 */ | ||
2667 | uint32_t cp_mqd_restore_start_time_lo; /* ordinal102 */ | ||
2668 | uint32_t cp_mqd_restore_start_time_hi; /* ordinal103 */ | ||
2669 | uint32_t cp_mqd_restore_end_time_lo; /* ordinal104 */ | ||
2670 | uint32_t cp_mqd_restore_end_time_hi; /* ordinal105 */ | ||
2671 | uint32_t reserved40; /* ordinal106 */ | ||
2672 | uint32_t reserved41; /* ordinal107 */ | ||
2673 | uint32_t gds_cs_ctxsw_cnt0; /* ordinal108 */ | ||
2674 | uint32_t gds_cs_ctxsw_cnt1; /* ordinal109 */ | ||
2675 | uint32_t gds_cs_ctxsw_cnt2; /* ordinal110 */ | ||
2676 | uint32_t gds_cs_ctxsw_cnt3; /* ordinal111 */ | ||
2677 | uint32_t reserved42; /* ordinal112 */ | ||
2678 | uint32_t reserved43; /* ordinal113 */ | ||
2679 | uint32_t cp_pq_exe_status_lo; /* ordinal114 */ | ||
2680 | uint32_t cp_pq_exe_status_hi; /* ordinal115 */ | ||
2681 | uint32_t cp_packet_id_lo; /* ordinal116 */ | ||
2682 | uint32_t cp_packet_id_hi; /* ordinal117 */ | ||
2683 | uint32_t cp_packet_exe_status_lo; /* ordinal118 */ | ||
2684 | uint32_t cp_packet_exe_status_hi; /* ordinal119 */ | ||
2685 | uint32_t gds_save_base_addr_lo; /* ordinal120 */ | ||
2686 | uint32_t gds_save_base_addr_hi; /* ordinal121 */ | ||
2687 | uint32_t gds_save_mask_lo; /* ordinal122 */ | ||
2688 | uint32_t gds_save_mask_hi; /* ordinal123 */ | ||
2689 | uint32_t ctx_save_base_addr_lo; /* ordinal124 */ | ||
2690 | uint32_t ctx_save_base_addr_hi; /* ordinal125 */ | ||
2691 | uint32_t reserved44; /* ordinal126 */ | ||
2692 | uint32_t reserved45; /* ordinal127 */ | ||
2693 | uint32_t cp_mqd_base_addr_lo; /* ordinal128 */ | ||
2694 | uint32_t cp_mqd_base_addr_hi; /* ordinal129 */ | ||
2695 | uint32_t cp_hqd_active; /* ordinal130 */ | ||
2696 | uint32_t cp_hqd_vmid; /* ordinal131 */ | ||
2697 | uint32_t cp_hqd_persistent_state; /* ordinal132 */ | ||
2698 | uint32_t cp_hqd_pipe_priority; /* ordinal133 */ | ||
2699 | uint32_t cp_hqd_queue_priority; /* ordinal134 */ | ||
2700 | uint32_t cp_hqd_quantum; /* ordinal135 */ | ||
2701 | uint32_t cp_hqd_pq_base_lo; /* ordinal136 */ | ||
2702 | uint32_t cp_hqd_pq_base_hi; /* ordinal137 */ | ||
2703 | uint32_t cp_hqd_pq_rptr; /* ordinal138 */ | ||
2704 | uint32_t cp_hqd_pq_rptr_report_addr_lo; /* ordinal139 */ | ||
2705 | uint32_t cp_hqd_pq_rptr_report_addr_hi; /* ordinal140 */ | ||
2706 | uint32_t cp_hqd_pq_wptr_poll_addr; /* ordinal141 */ | ||
2707 | uint32_t cp_hqd_pq_wptr_poll_addr_hi; /* ordinal142 */ | ||
2708 | uint32_t cp_hqd_pq_doorbell_control; /* ordinal143 */ | ||
2709 | uint32_t cp_hqd_pq_wptr; /* ordinal144 */ | ||
2710 | uint32_t cp_hqd_pq_control; /* ordinal145 */ | ||
2711 | uint32_t cp_hqd_ib_base_addr_lo; /* ordinal146 */ | ||
2712 | uint32_t cp_hqd_ib_base_addr_hi; /* ordinal147 */ | ||
2713 | uint32_t cp_hqd_ib_rptr; /* ordinal148 */ | ||
2714 | uint32_t cp_hqd_ib_control; /* ordinal149 */ | ||
2715 | uint32_t cp_hqd_iq_timer; /* ordinal150 */ | ||
2716 | uint32_t cp_hqd_iq_rptr; /* ordinal151 */ | ||
2717 | uint32_t cp_hqd_dequeue_request; /* ordinal152 */ | ||
2718 | uint32_t cp_hqd_dma_offload; /* ordinal153 */ | ||
2719 | uint32_t cp_hqd_sema_cmd; /* ordinal154 */ | ||
2720 | uint32_t cp_hqd_msg_type; /* ordinal155 */ | ||
2721 | uint32_t cp_hqd_atomic0_preop_lo; /* ordinal156 */ | ||
2722 | uint32_t cp_hqd_atomic0_preop_hi; /* ordinal157 */ | ||
2723 | uint32_t cp_hqd_atomic1_preop_lo; /* ordinal158 */ | ||
2724 | uint32_t cp_hqd_atomic1_preop_hi; /* ordinal159 */ | ||
2725 | uint32_t cp_hqd_hq_status0; /* ordinal160 */ | ||
2726 | uint32_t cp_hqd_hq_control0; /* ordinal161 */ | ||
2727 | uint32_t cp_mqd_control; /* ordinal162 */ | ||
2728 | uint32_t cp_hqd_hq_status1; /* ordinal163 */ | ||
2729 | uint32_t cp_hqd_hq_control1; /* ordinal164 */ | ||
2730 | uint32_t cp_hqd_eop_base_addr_lo; /* ordinal165 */ | ||
2731 | uint32_t cp_hqd_eop_base_addr_hi; /* ordinal166 */ | ||
2732 | uint32_t cp_hqd_eop_control; /* ordinal167 */ | ||
2733 | uint32_t cp_hqd_eop_rptr; /* ordinal168 */ | ||
2734 | uint32_t cp_hqd_eop_wptr; /* ordinal169 */ | ||
2735 | uint32_t cp_hqd_eop_done_events; /* ordinal170 */ | ||
2736 | uint32_t cp_hqd_ctx_save_base_addr_lo; /* ordinal171 */ | ||
2737 | uint32_t cp_hqd_ctx_save_base_addr_hi; /* ordinal172 */ | ||
2738 | uint32_t cp_hqd_ctx_save_control; /* ordinal173 */ | ||
2739 | uint32_t cp_hqd_cntl_stack_offset; /* ordinal174 */ | ||
2740 | uint32_t cp_hqd_cntl_stack_size; /* ordinal175 */ | ||
2741 | uint32_t cp_hqd_wg_state_offset; /* ordinal176 */ | ||
2742 | uint32_t cp_hqd_ctx_save_size; /* ordinal177 */ | ||
2743 | uint32_t cp_hqd_gds_resource_state; /* ordinal178 */ | ||
2744 | uint32_t cp_hqd_error; /* ordinal179 */ | ||
2745 | uint32_t cp_hqd_eop_wptr_mem; /* ordinal180 */ | ||
2746 | uint32_t cp_hqd_eop_dones; /* ordinal181 */ | ||
2747 | uint32_t reserved46; /* ordinal182 */ | ||
2748 | uint32_t reserved47; /* ordinal183 */ | ||
2749 | uint32_t reserved48; /* ordinal184 */ | ||
2750 | uint32_t reserved49; /* ordinal185 */ | ||
2751 | uint32_t reserved50; /* ordinal186 */ | ||
2752 | uint32_t reserved51; /* ordinal187 */ | ||
2753 | uint32_t reserved52; /* ordinal188 */ | ||
2754 | uint32_t reserved53; /* ordinal189 */ | ||
2755 | uint32_t reserved54; /* ordinal190 */ | ||
2756 | uint32_t reserved55; /* ordinal191 */ | ||
2757 | uint32_t iqtimer_pkt_header; /* ordinal192 */ | ||
2758 | uint32_t iqtimer_pkt_dw0; /* ordinal193 */ | ||
2759 | uint32_t iqtimer_pkt_dw1; /* ordinal194 */ | ||
2760 | uint32_t iqtimer_pkt_dw2; /* ordinal195 */ | ||
2761 | uint32_t iqtimer_pkt_dw3; /* ordinal196 */ | ||
2762 | uint32_t iqtimer_pkt_dw4; /* ordinal197 */ | ||
2763 | uint32_t iqtimer_pkt_dw5; /* ordinal198 */ | ||
2764 | uint32_t iqtimer_pkt_dw6; /* ordinal199 */ | ||
2765 | uint32_t iqtimer_pkt_dw7; /* ordinal200 */ | ||
2766 | uint32_t iqtimer_pkt_dw8; /* ordinal201 */ | ||
2767 | uint32_t iqtimer_pkt_dw9; /* ordinal202 */ | ||
2768 | uint32_t iqtimer_pkt_dw10; /* ordinal203 */ | ||
2769 | uint32_t iqtimer_pkt_dw11; /* ordinal204 */ | ||
2770 | uint32_t iqtimer_pkt_dw12; /* ordinal205 */ | ||
2771 | uint32_t iqtimer_pkt_dw13; /* ordinal206 */ | ||
2772 | uint32_t iqtimer_pkt_dw14; /* ordinal207 */ | ||
2773 | uint32_t iqtimer_pkt_dw15; /* ordinal208 */ | ||
2774 | uint32_t iqtimer_pkt_dw16; /* ordinal209 */ | ||
2775 | uint32_t iqtimer_pkt_dw17; /* ordinal210 */ | ||
2776 | uint32_t iqtimer_pkt_dw18; /* ordinal211 */ | ||
2777 | uint32_t iqtimer_pkt_dw19; /* ordinal212 */ | ||
2778 | uint32_t iqtimer_pkt_dw20; /* ordinal213 */ | ||
2779 | uint32_t iqtimer_pkt_dw21; /* ordinal214 */ | ||
2780 | uint32_t iqtimer_pkt_dw22; /* ordinal215 */ | ||
2781 | uint32_t iqtimer_pkt_dw23; /* ordinal216 */ | ||
2782 | uint32_t iqtimer_pkt_dw24; /* ordinal217 */ | ||
2783 | uint32_t iqtimer_pkt_dw25; /* ordinal218 */ | ||
2784 | uint32_t iqtimer_pkt_dw26; /* ordinal219 */ | ||
2785 | uint32_t iqtimer_pkt_dw27; /* ordinal220 */ | ||
2786 | uint32_t iqtimer_pkt_dw28; /* ordinal221 */ | ||
2787 | uint32_t iqtimer_pkt_dw29; /* ordinal222 */ | ||
2788 | uint32_t iqtimer_pkt_dw30; /* ordinal223 */ | ||
2789 | uint32_t iqtimer_pkt_dw31; /* ordinal224 */ | ||
2790 | uint32_t reserved56; /* ordinal225 */ | ||
2791 | uint32_t reserved57; /* ordinal226 */ | ||
2792 | uint32_t reserved58; /* ordinal227 */ | ||
2793 | uint32_t set_resources_header; /* ordinal228 */ | ||
2794 | uint32_t set_resources_dw1; /* ordinal229 */ | ||
2795 | uint32_t set_resources_dw2; /* ordinal230 */ | ||
2796 | uint32_t set_resources_dw3; /* ordinal231 */ | ||
2797 | uint32_t set_resources_dw4; /* ordinal232 */ | ||
2798 | uint32_t set_resources_dw5; /* ordinal233 */ | ||
2799 | uint32_t set_resources_dw6; /* ordinal234 */ | ||
2800 | uint32_t set_resources_dw7; /* ordinal235 */ | ||
2801 | uint32_t reserved59; /* ordinal236 */ | ||
2802 | uint32_t reserved60; /* ordinal237 */ | ||
2803 | uint32_t reserved61; /* ordinal238 */ | ||
2804 | uint32_t reserved62; /* ordinal239 */ | ||
2805 | uint32_t reserved63; /* ordinal240 */ | ||
2806 | uint32_t reserved64; /* ordinal241 */ | ||
2807 | uint32_t reserved65; /* ordinal242 */ | ||
2808 | uint32_t reserved66; /* ordinal243 */ | ||
2809 | uint32_t reserved67; /* ordinal244 */ | ||
2810 | uint32_t reserved68; /* ordinal245 */ | ||
2811 | uint32_t reserved69; /* ordinal246 */ | ||
2812 | uint32_t reserved70; /* ordinal247 */ | ||
2813 | uint32_t reserved71; /* ordinal248 */ | ||
2814 | uint32_t reserved72; /* ordinal249 */ | ||
2815 | uint32_t reserved73; /* ordinal250 */ | ||
2816 | uint32_t reserved74; /* ordinal251 */ | ||
2817 | uint32_t reserved75; /* ordinal252 */ | ||
2818 | uint32_t reserved76; /* ordinal253 */ | ||
2819 | uint32_t reserved77; /* ordinal254 */ | ||
2820 | uint32_t reserved78; /* ordinal255 */ | ||
2821 | |||
2822 | uint32_t reserved_t[256]; /* Reserve 256 dword buffer used by ucode */ | ||
2823 | }; | ||
2824 | |||
2825 | static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev) | ||
2826 | { | ||
2827 | int i, r; | ||
2828 | |||
2829 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
2830 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | ||
2831 | |||
2832 | if (ring->mqd_obj) { | ||
2833 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | ||
2834 | if (unlikely(r != 0)) | ||
2835 | dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); | ||
2836 | |||
2837 | amdgpu_bo_unpin(ring->mqd_obj); | ||
2838 | amdgpu_bo_unreserve(ring->mqd_obj); | ||
2839 | |||
2840 | amdgpu_bo_unref(&ring->mqd_obj); | ||
2841 | ring->mqd_obj = NULL; | ||
2842 | } | ||
2843 | } | ||
2844 | } | ||
2845 | |||
2846 | static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) | ||
2847 | { | ||
2848 | int r, i, j; | ||
2849 | u32 tmp; | ||
2850 | bool use_doorbell = true; | ||
2851 | u64 hqd_gpu_addr; | ||
2852 | u64 mqd_gpu_addr; | ||
2853 | u64 eop_gpu_addr; | ||
2854 | u64 wb_gpu_addr; | ||
2855 | u32 *buf; | ||
2856 | struct vi_mqd *mqd; | ||
2857 | |||
2858 | /* init the pipes */ | ||
2859 | mutex_lock(&adev->srbm_mutex); | ||
2860 | for (i = 0; i < (adev->gfx.mec.num_pipe * adev->gfx.mec.num_mec); i++) { | ||
2861 | int me = (i < 4) ? 1 : 2; | ||
2862 | int pipe = (i < 4) ? i : (i - 4); | ||
2863 | |||
2864 | eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE); | ||
2865 | eop_gpu_addr >>= 8; | ||
2866 | |||
2867 | vi_srbm_select(adev, me, pipe, 0, 0); | ||
2868 | |||
2869 | /* write the EOP addr */ | ||
2870 | WREG32(mmCP_HQD_EOP_BASE_ADDR, eop_gpu_addr); | ||
2871 | WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr)); | ||
2872 | |||
2873 | /* set the VMID assigned */ | ||
2874 | WREG32(mmCP_HQD_VMID, 0); | ||
2875 | |||
2876 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ | ||
2877 | tmp = RREG32(mmCP_HQD_EOP_CONTROL); | ||
2878 | tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, | ||
2879 | (order_base_2(MEC_HPD_SIZE / 4) - 1)); | ||
2880 | WREG32(mmCP_HQD_EOP_CONTROL, tmp); | ||
2881 | } | ||
2882 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
2883 | mutex_unlock(&adev->srbm_mutex); | ||
2884 | |||
2885 | /* init the queues. Just two for now. */ | ||
2886 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
2887 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | ||
2888 | |||
2889 | if (ring->mqd_obj == NULL) { | ||
2890 | r = amdgpu_bo_create(adev, | ||
2891 | sizeof(struct vi_mqd), | ||
2892 | PAGE_SIZE, true, | ||
2893 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | ||
2894 | &ring->mqd_obj); | ||
2895 | if (r) { | ||
2896 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); | ||
2897 | return r; | ||
2898 | } | ||
2899 | } | ||
2900 | |||
2901 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | ||
2902 | if (unlikely(r != 0)) { | ||
2903 | gfx_v8_0_cp_compute_fini(adev); | ||
2904 | return r; | ||
2905 | } | ||
2906 | r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
2907 | &mqd_gpu_addr); | ||
2908 | if (r) { | ||
2909 | dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); | ||
2910 | gfx_v8_0_cp_compute_fini(adev); | ||
2911 | return r; | ||
2912 | } | ||
2913 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&buf); | ||
2914 | if (r) { | ||
2915 | dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); | ||
2916 | gfx_v8_0_cp_compute_fini(adev); | ||
2917 | return r; | ||
2918 | } | ||
2919 | |||
2920 | /* init the mqd struct */ | ||
2921 | memset(buf, 0, sizeof(struct vi_mqd)); | ||
2922 | |||
2923 | mqd = (struct vi_mqd *)buf; | ||
2924 | mqd->header = 0xC0310800; | ||
2925 | mqd->compute_pipelinestat_enable = 0x00000001; | ||
2926 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; | ||
2927 | mqd->compute_static_thread_mgmt_se1 = 0xffffffff; | ||
2928 | mqd->compute_static_thread_mgmt_se2 = 0xffffffff; | ||
2929 | mqd->compute_static_thread_mgmt_se3 = 0xffffffff; | ||
2930 | mqd->compute_misc_reserved = 0x00000003; | ||
2931 | |||
2932 | mutex_lock(&adev->srbm_mutex); | ||
2933 | vi_srbm_select(adev, ring->me, | ||
2934 | ring->pipe, | ||
2935 | ring->queue, 0); | ||
2936 | |||
2937 | /* disable wptr polling */ | ||
2938 | tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); | ||
2939 | tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); | ||
2940 | WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); | ||
2941 | |||
2942 | mqd->cp_hqd_eop_base_addr_lo = | ||
2943 | RREG32(mmCP_HQD_EOP_BASE_ADDR); | ||
2944 | mqd->cp_hqd_eop_base_addr_hi = | ||
2945 | RREG32(mmCP_HQD_EOP_BASE_ADDR_HI); | ||
2946 | |||
2947 | /* enable doorbell? */ | ||
2948 | tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); | ||
2949 | if (use_doorbell) { | ||
2950 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); | ||
2951 | } else { | ||
2952 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 0); | ||
2953 | } | ||
2954 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, tmp); | ||
2955 | mqd->cp_hqd_pq_doorbell_control = tmp; | ||
2956 | |||
2957 | /* disable the queue if it's active */ | ||
2958 | mqd->cp_hqd_dequeue_request = 0; | ||
2959 | mqd->cp_hqd_pq_rptr = 0; | ||
2960 | mqd->cp_hqd_pq_wptr= 0; | ||
2961 | if (RREG32(mmCP_HQD_ACTIVE) & 1) { | ||
2962 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); | ||
2963 | for (j = 0; j < adev->usec_timeout; j++) { | ||
2964 | if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) | ||
2965 | break; | ||
2966 | udelay(1); | ||
2967 | } | ||
2968 | WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); | ||
2969 | WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); | ||
2970 | WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); | ||
2971 | } | ||
2972 | |||
2973 | /* set the pointer to the MQD */ | ||
2974 | mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; | ||
2975 | mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); | ||
2976 | WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); | ||
2977 | WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); | ||
2978 | |||
2979 | /* set MQD vmid to 0 */ | ||
2980 | tmp = RREG32(mmCP_MQD_CONTROL); | ||
2981 | tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); | ||
2982 | WREG32(mmCP_MQD_CONTROL, tmp); | ||
2983 | mqd->cp_mqd_control = tmp; | ||
2984 | |||
2985 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ | ||
2986 | hqd_gpu_addr = ring->gpu_addr >> 8; | ||
2987 | mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; | ||
2988 | mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); | ||
2989 | WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); | ||
2990 | WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); | ||
2991 | |||
2992 | /* set up the HQD, this is similar to CP_RB0_CNTL */ | ||
2993 | tmp = RREG32(mmCP_HQD_PQ_CONTROL); | ||
2994 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, | ||
2995 | (order_base_2(ring->ring_size / 4) - 1)); | ||
2996 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, | ||
2997 | ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); | ||
2998 | #ifdef __BIG_ENDIAN | ||
2999 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); | ||
3000 | #endif | ||
3001 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); | ||
3002 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); | ||
3003 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); | ||
3004 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); | ||
3005 | WREG32(mmCP_HQD_PQ_CONTROL, tmp); | ||
3006 | mqd->cp_hqd_pq_control = tmp; | ||
3007 | |||
3008 | /* set the wb address wether it's enabled or not */ | ||
3009 | wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); | ||
3010 | mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; | ||
3011 | mqd->cp_hqd_pq_rptr_report_addr_hi = | ||
3012 | upper_32_bits(wb_gpu_addr) & 0xffff; | ||
3013 | WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, | ||
3014 | mqd->cp_hqd_pq_rptr_report_addr_lo); | ||
3015 | WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, | ||
3016 | mqd->cp_hqd_pq_rptr_report_addr_hi); | ||
3017 | |||
3018 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ | ||
3019 | wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
3020 | mqd->cp_hqd_pq_wptr_poll_addr = wb_gpu_addr & 0xfffffffc; | ||
3021 | mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; | ||
3022 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr); | ||
3023 | WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, | ||
3024 | mqd->cp_hqd_pq_wptr_poll_addr_hi); | ||
3025 | |||
3026 | /* enable the doorbell if requested */ | ||
3027 | if (use_doorbell) { | ||
3028 | if (adev->asic_type == CHIP_CARRIZO) { | ||
3029 | WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, | ||
3030 | AMDGPU_DOORBELL_KIQ << 2); | ||
3031 | WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, | ||
3032 | AMDGPU_DOORBELL_MEC_RING7 << 2); | ||
3033 | } | ||
3034 | tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); | ||
3035 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, | ||
3036 | DOORBELL_OFFSET, ring->doorbell_index); | ||
3037 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); | ||
3038 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_SOURCE, 0); | ||
3039 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_HIT, 0); | ||
3040 | mqd->cp_hqd_pq_doorbell_control = tmp; | ||
3041 | |||
3042 | } else { | ||
3043 | mqd->cp_hqd_pq_doorbell_control = 0; | ||
3044 | } | ||
3045 | WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, | ||
3046 | mqd->cp_hqd_pq_doorbell_control); | ||
3047 | |||
3048 | /* set the vmid for the queue */ | ||
3049 | mqd->cp_hqd_vmid = 0; | ||
3050 | WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); | ||
3051 | |||
3052 | tmp = RREG32(mmCP_HQD_PERSISTENT_STATE); | ||
3053 | tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); | ||
3054 | WREG32(mmCP_HQD_PERSISTENT_STATE, tmp); | ||
3055 | mqd->cp_hqd_persistent_state = tmp; | ||
3056 | |||
3057 | /* activate the queue */ | ||
3058 | mqd->cp_hqd_active = 1; | ||
3059 | WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); | ||
3060 | |||
3061 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
3062 | mutex_unlock(&adev->srbm_mutex); | ||
3063 | |||
3064 | amdgpu_bo_kunmap(ring->mqd_obj); | ||
3065 | amdgpu_bo_unreserve(ring->mqd_obj); | ||
3066 | } | ||
3067 | |||
3068 | if (use_doorbell) { | ||
3069 | tmp = RREG32(mmCP_PQ_STATUS); | ||
3070 | tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); | ||
3071 | WREG32(mmCP_PQ_STATUS, tmp); | ||
3072 | } | ||
3073 | |||
3074 | r = gfx_v8_0_cp_compute_start(adev); | ||
3075 | if (r) | ||
3076 | return r; | ||
3077 | |||
3078 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
3079 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | ||
3080 | |||
3081 | ring->ready = true; | ||
3082 | r = amdgpu_ring_test_ring(ring); | ||
3083 | if (r) | ||
3084 | ring->ready = false; | ||
3085 | } | ||
3086 | |||
3087 | return 0; | ||
3088 | } | ||
3089 | |||
3090 | static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) | ||
3091 | { | ||
3092 | int r; | ||
3093 | |||
3094 | if (adev->asic_type != CHIP_CARRIZO) | ||
3095 | gfx_v8_0_enable_gui_idle_interrupt(adev, false); | ||
3096 | |||
3097 | if (!adev->firmware.smu_load) { | ||
3098 | /* legacy firmware loading */ | ||
3099 | r = gfx_v8_0_cp_gfx_load_microcode(adev); | ||
3100 | if (r) | ||
3101 | return r; | ||
3102 | |||
3103 | r = gfx_v8_0_cp_compute_load_microcode(adev); | ||
3104 | if (r) | ||
3105 | return r; | ||
3106 | } else { | ||
3107 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
3108 | AMDGPU_UCODE_ID_CP_CE); | ||
3109 | if (r) | ||
3110 | return -EINVAL; | ||
3111 | |||
3112 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
3113 | AMDGPU_UCODE_ID_CP_PFP); | ||
3114 | if (r) | ||
3115 | return -EINVAL; | ||
3116 | |||
3117 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
3118 | AMDGPU_UCODE_ID_CP_ME); | ||
3119 | if (r) | ||
3120 | return -EINVAL; | ||
3121 | |||
3122 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
3123 | AMDGPU_UCODE_ID_CP_MEC1); | ||
3124 | if (r) | ||
3125 | return -EINVAL; | ||
3126 | } | ||
3127 | |||
3128 | r = gfx_v8_0_cp_gfx_resume(adev); | ||
3129 | if (r) | ||
3130 | return r; | ||
3131 | |||
3132 | r = gfx_v8_0_cp_compute_resume(adev); | ||
3133 | if (r) | ||
3134 | return r; | ||
3135 | |||
3136 | gfx_v8_0_enable_gui_idle_interrupt(adev, true); | ||
3137 | |||
3138 | return 0; | ||
3139 | } | ||
3140 | |||
3141 | static void gfx_v8_0_cp_enable(struct amdgpu_device *adev, bool enable) | ||
3142 | { | ||
3143 | gfx_v8_0_cp_gfx_enable(adev, enable); | ||
3144 | gfx_v8_0_cp_compute_enable(adev, enable); | ||
3145 | } | ||
3146 | |||
3147 | static int gfx_v8_0_hw_init(struct amdgpu_device *adev) | ||
3148 | { | ||
3149 | int r; | ||
3150 | |||
3151 | gfx_v8_0_init_golden_registers(adev); | ||
3152 | |||
3153 | gfx_v8_0_gpu_init(adev); | ||
3154 | |||
3155 | r = gfx_v8_0_rlc_resume(adev); | ||
3156 | if (r) | ||
3157 | return r; | ||
3158 | |||
3159 | r = gfx_v8_0_cp_resume(adev); | ||
3160 | if (r) | ||
3161 | return r; | ||
3162 | |||
3163 | return r; | ||
3164 | } | ||
3165 | |||
3166 | static int gfx_v8_0_hw_fini(struct amdgpu_device *adev) | ||
3167 | { | ||
3168 | gfx_v8_0_cp_enable(adev, false); | ||
3169 | gfx_v8_0_rlc_stop(adev); | ||
3170 | gfx_v8_0_cp_compute_fini(adev); | ||
3171 | |||
3172 | return 0; | ||
3173 | } | ||
3174 | |||
3175 | static int gfx_v8_0_suspend(struct amdgpu_device *adev) | ||
3176 | { | ||
3177 | return gfx_v8_0_hw_fini(adev); | ||
3178 | } | ||
3179 | |||
3180 | static int gfx_v8_0_resume(struct amdgpu_device *adev) | ||
3181 | { | ||
3182 | return gfx_v8_0_hw_init(adev); | ||
3183 | } | ||
3184 | |||
3185 | static bool gfx_v8_0_is_idle(struct amdgpu_device *adev) | ||
3186 | { | ||
3187 | if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) | ||
3188 | return false; | ||
3189 | else | ||
3190 | return true; | ||
3191 | } | ||
3192 | |||
3193 | static int gfx_v8_0_wait_for_idle(struct amdgpu_device *adev) | ||
3194 | { | ||
3195 | unsigned i; | ||
3196 | u32 tmp; | ||
3197 | |||
3198 | for (i = 0; i < adev->usec_timeout; i++) { | ||
3199 | /* read MC_STATUS */ | ||
3200 | tmp = RREG32(mmGRBM_STATUS) & GRBM_STATUS__GUI_ACTIVE_MASK; | ||
3201 | |||
3202 | if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE)) | ||
3203 | return 0; | ||
3204 | udelay(1); | ||
3205 | } | ||
3206 | return -ETIMEDOUT; | ||
3207 | } | ||
3208 | |||
3209 | static void gfx_v8_0_print_status(struct amdgpu_device *adev) | ||
3210 | { | ||
3211 | int i; | ||
3212 | |||
3213 | dev_info(adev->dev, "GFX 8.x registers\n"); | ||
3214 | dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", | ||
3215 | RREG32(mmGRBM_STATUS)); | ||
3216 | dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", | ||
3217 | RREG32(mmGRBM_STATUS2)); | ||
3218 | dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
3219 | RREG32(mmGRBM_STATUS_SE0)); | ||
3220 | dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
3221 | RREG32(mmGRBM_STATUS_SE1)); | ||
3222 | dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", | ||
3223 | RREG32(mmGRBM_STATUS_SE2)); | ||
3224 | dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", | ||
3225 | RREG32(mmGRBM_STATUS_SE3)); | ||
3226 | dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); | ||
3227 | dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", | ||
3228 | RREG32(mmCP_STALLED_STAT1)); | ||
3229 | dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", | ||
3230 | RREG32(mmCP_STALLED_STAT2)); | ||
3231 | dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", | ||
3232 | RREG32(mmCP_STALLED_STAT3)); | ||
3233 | dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", | ||
3234 | RREG32(mmCP_CPF_BUSY_STAT)); | ||
3235 | dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", | ||
3236 | RREG32(mmCP_CPF_STALLED_STAT1)); | ||
3237 | dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); | ||
3238 | dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); | ||
3239 | dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", | ||
3240 | RREG32(mmCP_CPC_STALLED_STAT1)); | ||
3241 | dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); | ||
3242 | |||
3243 | for (i = 0; i < 32; i++) { | ||
3244 | dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n", | ||
3245 | i, RREG32(mmGB_TILE_MODE0 + (i * 4))); | ||
3246 | } | ||
3247 | for (i = 0; i < 16; i++) { | ||
3248 | dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n", | ||
3249 | i, RREG32(mmGB_MACROTILE_MODE0 + (i * 4))); | ||
3250 | } | ||
3251 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
3252 | dev_info(adev->dev, " se: %d\n", i); | ||
3253 | gfx_v8_0_select_se_sh(adev, i, 0xffffffff); | ||
3254 | dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n", | ||
3255 | RREG32(mmPA_SC_RASTER_CONFIG)); | ||
3256 | dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n", | ||
3257 | RREG32(mmPA_SC_RASTER_CONFIG_1)); | ||
3258 | } | ||
3259 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
3260 | |||
3261 | dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n", | ||
3262 | RREG32(mmGB_ADDR_CONFIG)); | ||
3263 | dev_info(adev->dev, " HDP_ADDR_CONFIG=0x%08X\n", | ||
3264 | RREG32(mmHDP_ADDR_CONFIG)); | ||
3265 | dev_info(adev->dev, " DMIF_ADDR_CALC=0x%08X\n", | ||
3266 | RREG32(mmDMIF_ADDR_CALC)); | ||
3267 | dev_info(adev->dev, " SDMA0_TILING_CONFIG=0x%08X\n", | ||
3268 | RREG32(mmSDMA0_TILING_CONFIG + SDMA0_REGISTER_OFFSET)); | ||
3269 | dev_info(adev->dev, " SDMA1_TILING_CONFIG=0x%08X\n", | ||
3270 | RREG32(mmSDMA0_TILING_CONFIG + SDMA1_REGISTER_OFFSET)); | ||
3271 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | ||
3272 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | ||
3273 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | ||
3274 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | ||
3275 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | ||
3276 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | ||
3277 | |||
3278 | dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n", | ||
3279 | RREG32(mmCP_MEQ_THRESHOLDS)); | ||
3280 | dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n", | ||
3281 | RREG32(mmSX_DEBUG_1)); | ||
3282 | dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n", | ||
3283 | RREG32(mmTA_CNTL_AUX)); | ||
3284 | dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n", | ||
3285 | RREG32(mmSPI_CONFIG_CNTL)); | ||
3286 | dev_info(adev->dev, " SQ_CONFIG=0x%08X\n", | ||
3287 | RREG32(mmSQ_CONFIG)); | ||
3288 | dev_info(adev->dev, " DB_DEBUG=0x%08X\n", | ||
3289 | RREG32(mmDB_DEBUG)); | ||
3290 | dev_info(adev->dev, " DB_DEBUG2=0x%08X\n", | ||
3291 | RREG32(mmDB_DEBUG2)); | ||
3292 | dev_info(adev->dev, " DB_DEBUG3=0x%08X\n", | ||
3293 | RREG32(mmDB_DEBUG3)); | ||
3294 | dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n", | ||
3295 | RREG32(mmCB_HW_CONTROL)); | ||
3296 | dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n", | ||
3297 | RREG32(mmSPI_CONFIG_CNTL_1)); | ||
3298 | dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n", | ||
3299 | RREG32(mmPA_SC_FIFO_SIZE)); | ||
3300 | dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n", | ||
3301 | RREG32(mmVGT_NUM_INSTANCES)); | ||
3302 | dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n", | ||
3303 | RREG32(mmCP_PERFMON_CNTL)); | ||
3304 | dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n", | ||
3305 | RREG32(mmPA_SC_FORCE_EOV_MAX_CNTS)); | ||
3306 | dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n", | ||
3307 | RREG32(mmVGT_CACHE_INVALIDATION)); | ||
3308 | dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n", | ||
3309 | RREG32(mmVGT_GS_VERTEX_REUSE)); | ||
3310 | dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n", | ||
3311 | RREG32(mmPA_SC_LINE_STIPPLE_STATE)); | ||
3312 | dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n", | ||
3313 | RREG32(mmPA_CL_ENHANCE)); | ||
3314 | dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n", | ||
3315 | RREG32(mmPA_SC_ENHANCE)); | ||
3316 | |||
3317 | dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n", | ||
3318 | RREG32(mmCP_ME_CNTL)); | ||
3319 | dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n", | ||
3320 | RREG32(mmCP_MAX_CONTEXT)); | ||
3321 | dev_info(adev->dev, " CP_ENDIAN_SWAP=0x%08X\n", | ||
3322 | RREG32(mmCP_ENDIAN_SWAP)); | ||
3323 | dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n", | ||
3324 | RREG32(mmCP_DEVICE_ID)); | ||
3325 | |||
3326 | dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n", | ||
3327 | RREG32(mmCP_SEM_WAIT_TIMER)); | ||
3328 | |||
3329 | dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n", | ||
3330 | RREG32(mmCP_RB_WPTR_DELAY)); | ||
3331 | dev_info(adev->dev, " CP_RB_VMID=0x%08X\n", | ||
3332 | RREG32(mmCP_RB_VMID)); | ||
3333 | dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", | ||
3334 | RREG32(mmCP_RB0_CNTL)); | ||
3335 | dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n", | ||
3336 | RREG32(mmCP_RB0_WPTR)); | ||
3337 | dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n", | ||
3338 | RREG32(mmCP_RB0_RPTR_ADDR)); | ||
3339 | dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n", | ||
3340 | RREG32(mmCP_RB0_RPTR_ADDR_HI)); | ||
3341 | dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n", | ||
3342 | RREG32(mmCP_RB0_CNTL)); | ||
3343 | dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n", | ||
3344 | RREG32(mmCP_RB0_BASE)); | ||
3345 | dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n", | ||
3346 | RREG32(mmCP_RB0_BASE_HI)); | ||
3347 | dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n", | ||
3348 | RREG32(mmCP_MEC_CNTL)); | ||
3349 | dev_info(adev->dev, " CP_CPF_DEBUG=0x%08X\n", | ||
3350 | RREG32(mmCP_CPF_DEBUG)); | ||
3351 | |||
3352 | dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n", | ||
3353 | RREG32(mmSCRATCH_ADDR)); | ||
3354 | dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n", | ||
3355 | RREG32(mmSCRATCH_UMSK)); | ||
3356 | |||
3357 | dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n", | ||
3358 | RREG32(mmCP_INT_CNTL_RING0)); | ||
3359 | dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", | ||
3360 | RREG32(mmRLC_LB_CNTL)); | ||
3361 | dev_info(adev->dev, " RLC_CNTL=0x%08X\n", | ||
3362 | RREG32(mmRLC_CNTL)); | ||
3363 | dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n", | ||
3364 | RREG32(mmRLC_CGCG_CGLS_CTRL)); | ||
3365 | dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n", | ||
3366 | RREG32(mmRLC_LB_CNTR_INIT)); | ||
3367 | dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n", | ||
3368 | RREG32(mmRLC_LB_CNTR_MAX)); | ||
3369 | dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n", | ||
3370 | RREG32(mmRLC_LB_INIT_CU_MASK)); | ||
3371 | dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n", | ||
3372 | RREG32(mmRLC_LB_PARAMS)); | ||
3373 | dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n", | ||
3374 | RREG32(mmRLC_LB_CNTL)); | ||
3375 | dev_info(adev->dev, " RLC_MC_CNTL=0x%08X\n", | ||
3376 | RREG32(mmRLC_MC_CNTL)); | ||
3377 | dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n", | ||
3378 | RREG32(mmRLC_UCODE_CNTL)); | ||
3379 | |||
3380 | mutex_lock(&adev->srbm_mutex); | ||
3381 | for (i = 0; i < 16; i++) { | ||
3382 | vi_srbm_select(adev, 0, 0, 0, i); | ||
3383 | dev_info(adev->dev, " VM %d:\n", i); | ||
3384 | dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n", | ||
3385 | RREG32(mmSH_MEM_CONFIG)); | ||
3386 | dev_info(adev->dev, " SH_MEM_APE1_BASE=0x%08X\n", | ||
3387 | RREG32(mmSH_MEM_APE1_BASE)); | ||
3388 | dev_info(adev->dev, " SH_MEM_APE1_LIMIT=0x%08X\n", | ||
3389 | RREG32(mmSH_MEM_APE1_LIMIT)); | ||
3390 | dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n", | ||
3391 | RREG32(mmSH_MEM_BASES)); | ||
3392 | } | ||
3393 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
3394 | mutex_unlock(&adev->srbm_mutex); | ||
3395 | } | ||
3396 | |||
3397 | static int gfx_v8_0_soft_reset(struct amdgpu_device *adev) | ||
3398 | { | ||
3399 | u32 grbm_soft_reset = 0, srbm_soft_reset = 0; | ||
3400 | u32 tmp; | ||
3401 | |||
3402 | /* GRBM_STATUS */ | ||
3403 | tmp = RREG32(mmGRBM_STATUS); | ||
3404 | if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | | ||
3405 | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | | ||
3406 | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | | ||
3407 | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | | ||
3408 | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | | ||
3409 | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { | ||
3410 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | ||
3411 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | ||
3412 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | ||
3413 | GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); | ||
3414 | } | ||
3415 | |||
3416 | if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { | ||
3417 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | ||
3418 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | ||
3419 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
3420 | SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); | ||
3421 | } | ||
3422 | |||
3423 | /* GRBM_STATUS2 */ | ||
3424 | tmp = RREG32(mmGRBM_STATUS2); | ||
3425 | if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) | ||
3426 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, | ||
3427 | GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); | ||
3428 | |||
3429 | /* SRBM_STATUS */ | ||
3430 | tmp = RREG32(mmSRBM_STATUS); | ||
3431 | if (REG_GET_FIELD(tmp, SRBM_STATUS, GRBM_RQ_PENDING)) | ||
3432 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
3433 | SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); | ||
3434 | |||
3435 | if (grbm_soft_reset || srbm_soft_reset) { | ||
3436 | gfx_v8_0_print_status(adev); | ||
3437 | /* stop the rlc */ | ||
3438 | gfx_v8_0_rlc_stop(adev); | ||
3439 | |||
3440 | /* Disable GFX parsing/prefetching */ | ||
3441 | gfx_v8_0_cp_gfx_enable(adev, false); | ||
3442 | |||
3443 | /* Disable MEC parsing/prefetching */ | ||
3444 | /* XXX todo */ | ||
3445 | |||
3446 | if (grbm_soft_reset) { | ||
3447 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
3448 | tmp |= grbm_soft_reset; | ||
3449 | dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
3450 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
3451 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
3452 | |||
3453 | udelay(50); | ||
3454 | |||
3455 | tmp &= ~grbm_soft_reset; | ||
3456 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
3457 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
3458 | } | ||
3459 | |||
3460 | if (srbm_soft_reset) { | ||
3461 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3462 | tmp |= srbm_soft_reset; | ||
3463 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
3464 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3465 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3466 | |||
3467 | udelay(50); | ||
3468 | |||
3469 | tmp &= ~srbm_soft_reset; | ||
3470 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
3471 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
3472 | } | ||
3473 | /* Wait a little for things to settle down */ | ||
3474 | udelay(50); | ||
3475 | gfx_v8_0_print_status(adev); | ||
3476 | } | ||
3477 | return 0; | ||
3478 | } | ||
3479 | |||
3480 | /** | ||
3481 | * gfx_v8_0_get_gpu_clock_counter - return GPU clock counter snapshot | ||
3482 | * | ||
3483 | * @adev: amdgpu_device pointer | ||
3484 | * | ||
3485 | * Fetches a GPU clock counter snapshot. | ||
3486 | * Returns the 64 bit clock counter snapshot. | ||
3487 | */ | ||
3488 | uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) | ||
3489 | { | ||
3490 | uint64_t clock; | ||
3491 | |||
3492 | mutex_lock(&adev->gfx.gpu_clock_mutex); | ||
3493 | WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); | ||
3494 | clock = (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB) | | ||
3495 | ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); | ||
3496 | mutex_unlock(&adev->gfx.gpu_clock_mutex); | ||
3497 | return clock; | ||
3498 | } | ||
3499 | |||
3500 | static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, | ||
3501 | uint32_t vmid, | ||
3502 | uint32_t gds_base, uint32_t gds_size, | ||
3503 | uint32_t gws_base, uint32_t gws_size, | ||
3504 | uint32_t oa_base, uint32_t oa_size) | ||
3505 | { | ||
3506 | gds_base = gds_base >> AMDGPU_GDS_SHIFT; | ||
3507 | gds_size = gds_size >> AMDGPU_GDS_SHIFT; | ||
3508 | |||
3509 | gws_base = gws_base >> AMDGPU_GWS_SHIFT; | ||
3510 | gws_size = gws_size >> AMDGPU_GWS_SHIFT; | ||
3511 | |||
3512 | oa_base = oa_base >> AMDGPU_OA_SHIFT; | ||
3513 | oa_size = oa_size >> AMDGPU_OA_SHIFT; | ||
3514 | |||
3515 | /* GDS Base */ | ||
3516 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3517 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3518 | WRITE_DATA_DST_SEL(0))); | ||
3519 | amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_base); | ||
3520 | amdgpu_ring_write(ring, 0); | ||
3521 | amdgpu_ring_write(ring, gds_base); | ||
3522 | |||
3523 | /* GDS Size */ | ||
3524 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3525 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3526 | WRITE_DATA_DST_SEL(0))); | ||
3527 | amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].mem_size); | ||
3528 | amdgpu_ring_write(ring, 0); | ||
3529 | amdgpu_ring_write(ring, gds_size); | ||
3530 | |||
3531 | /* GWS */ | ||
3532 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3533 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3534 | WRITE_DATA_DST_SEL(0))); | ||
3535 | amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].gws); | ||
3536 | amdgpu_ring_write(ring, 0); | ||
3537 | amdgpu_ring_write(ring, gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); | ||
3538 | |||
3539 | /* OA */ | ||
3540 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3541 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3542 | WRITE_DATA_DST_SEL(0))); | ||
3543 | amdgpu_ring_write(ring, amdgpu_gds_reg_offset[vmid].oa); | ||
3544 | amdgpu_ring_write(ring, 0); | ||
3545 | amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); | ||
3546 | } | ||
3547 | |||
3548 | static int gfx_v8_0_early_init(struct amdgpu_device *adev) | ||
3549 | { | ||
3550 | |||
3551 | adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; | ||
3552 | adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; | ||
3553 | gfx_v8_0_set_ring_funcs(adev); | ||
3554 | gfx_v8_0_set_irq_funcs(adev); | ||
3555 | gfx_v8_0_set_gds_init(adev); | ||
3556 | |||
3557 | return 0; | ||
3558 | } | ||
3559 | |||
3560 | static int gfx_v8_0_set_powergating_state(struct amdgpu_device *adev, | ||
3561 | enum amdgpu_powergating_state state) | ||
3562 | { | ||
3563 | return 0; | ||
3564 | } | ||
3565 | |||
3566 | static int gfx_v8_0_set_clockgating_state(struct amdgpu_device *adev, | ||
3567 | enum amdgpu_clockgating_state state) | ||
3568 | { | ||
3569 | return 0; | ||
3570 | } | ||
3571 | |||
3572 | static u32 gfx_v8_0_ring_get_rptr_gfx(struct amdgpu_ring *ring) | ||
3573 | { | ||
3574 | u32 rptr; | ||
3575 | |||
3576 | rptr = ring->adev->wb.wb[ring->rptr_offs]; | ||
3577 | |||
3578 | return rptr; | ||
3579 | } | ||
3580 | |||
3581 | static u32 gfx_v8_0_ring_get_wptr_gfx(struct amdgpu_ring *ring) | ||
3582 | { | ||
3583 | struct amdgpu_device *adev = ring->adev; | ||
3584 | u32 wptr; | ||
3585 | |||
3586 | if (ring->use_doorbell) | ||
3587 | /* XXX check if swapping is necessary on BE */ | ||
3588 | wptr = ring->adev->wb.wb[ring->wptr_offs]; | ||
3589 | else | ||
3590 | wptr = RREG32(mmCP_RB0_WPTR); | ||
3591 | |||
3592 | return wptr; | ||
3593 | } | ||
3594 | |||
3595 | static void gfx_v8_0_ring_set_wptr_gfx(struct amdgpu_ring *ring) | ||
3596 | { | ||
3597 | struct amdgpu_device *adev = ring->adev; | ||
3598 | |||
3599 | if (ring->use_doorbell) { | ||
3600 | /* XXX check if swapping is necessary on BE */ | ||
3601 | adev->wb.wb[ring->wptr_offs] = ring->wptr; | ||
3602 | WDOORBELL32(ring->doorbell_index, ring->wptr); | ||
3603 | } else { | ||
3604 | WREG32(mmCP_RB0_WPTR, ring->wptr); | ||
3605 | (void)RREG32(mmCP_RB0_WPTR); | ||
3606 | } | ||
3607 | } | ||
3608 | |||
3609 | static void gfx_v8_0_hdp_flush_cp_ring_emit(struct amdgpu_ring *ring) | ||
3610 | { | ||
3611 | u32 ref_and_mask, reg_mem_engine; | ||
3612 | |||
3613 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) { | ||
3614 | switch (ring->me) { | ||
3615 | case 1: | ||
3616 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; | ||
3617 | break; | ||
3618 | case 2: | ||
3619 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP6_MASK << ring->pipe; | ||
3620 | break; | ||
3621 | default: | ||
3622 | return; | ||
3623 | } | ||
3624 | reg_mem_engine = 0; | ||
3625 | } else { | ||
3626 | ref_and_mask = GPU_HDP_FLUSH_DONE__CP0_MASK; | ||
3627 | reg_mem_engine = WAIT_REG_MEM_ENGINE(1); /* pfp */ | ||
3628 | } | ||
3629 | |||
3630 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
3631 | amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */ | ||
3632 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | ||
3633 | reg_mem_engine)); | ||
3634 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ); | ||
3635 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE); | ||
3636 | amdgpu_ring_write(ring, ref_and_mask); | ||
3637 | amdgpu_ring_write(ring, ref_and_mask); | ||
3638 | amdgpu_ring_write(ring, 0x20); /* poll interval */ | ||
3639 | } | ||
3640 | |||
3641 | static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
3642 | struct amdgpu_ib *ib) | ||
3643 | { | ||
3644 | u32 header, control = 0; | ||
3645 | u32 next_rptr = ring->wptr + 5; | ||
3646 | if (ring->type == AMDGPU_RING_TYPE_COMPUTE) | ||
3647 | control |= INDIRECT_BUFFER_VALID; | ||
3648 | |||
3649 | if (ib->flush_hdp_writefifo) | ||
3650 | next_rptr += 7; | ||
3651 | |||
3652 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) | ||
3653 | next_rptr += 2; | ||
3654 | |||
3655 | next_rptr += 4; | ||
3656 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3657 | amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); | ||
3658 | amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
3659 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); | ||
3660 | amdgpu_ring_write(ring, next_rptr); | ||
3661 | |||
3662 | if (ib->flush_hdp_writefifo) | ||
3663 | gfx_v8_0_hdp_flush_cp_ring_emit(ring); | ||
3664 | |||
3665 | /* insert SWITCH_BUFFER packet before first IB in the ring frame */ | ||
3666 | if (ring->need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { | ||
3667 | amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); | ||
3668 | amdgpu_ring_write(ring, 0); | ||
3669 | ring->need_ctx_switch = false; | ||
3670 | } | ||
3671 | |||
3672 | if (ib->is_const_ib) | ||
3673 | header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2); | ||
3674 | else | ||
3675 | header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); | ||
3676 | |||
3677 | control |= ib->length_dw | | ||
3678 | (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0); | ||
3679 | |||
3680 | amdgpu_ring_write(ring, header); | ||
3681 | amdgpu_ring_write(ring, | ||
3682 | #ifdef __BIG_ENDIAN | ||
3683 | (2 << 0) | | ||
3684 | #endif | ||
3685 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
3686 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | ||
3687 | amdgpu_ring_write(ring, control); | ||
3688 | } | ||
3689 | |||
3690 | static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, | ||
3691 | u64 seq, bool write64bit) | ||
3692 | { | ||
3693 | /* EVENT_WRITE_EOP - flush caches, send int */ | ||
3694 | amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | ||
3695 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | | ||
3696 | EOP_TC_ACTION_EN | | ||
3697 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | | ||
3698 | EVENT_INDEX(5))); | ||
3699 | amdgpu_ring_write(ring, addr & 0xfffffffc); | ||
3700 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | | ||
3701 | DATA_SEL(write64bit ? 2 : 1) | INT_SEL(2)); | ||
3702 | amdgpu_ring_write(ring, lower_32_bits(seq)); | ||
3703 | amdgpu_ring_write(ring, upper_32_bits(seq)); | ||
3704 | } | ||
3705 | |||
3706 | /** | ||
3707 | * gfx_v8_0_ring_emit_semaphore - emit a semaphore on the CP ring | ||
3708 | * | ||
3709 | * @ring: amdgpu ring buffer object | ||
3710 | * @semaphore: amdgpu semaphore object | ||
3711 | * @emit_wait: Is this a sempahore wait? | ||
3712 | * | ||
3713 | * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP | ||
3714 | * from running ahead of semaphore waits. | ||
3715 | */ | ||
3716 | static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
3717 | struct amdgpu_semaphore *semaphore, | ||
3718 | bool emit_wait) | ||
3719 | { | ||
3720 | uint64_t addr = semaphore->gpu_addr; | ||
3721 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; | ||
3722 | |||
3723 | if (ring->adev->asic_type == CHIP_TOPAZ || | ||
3724 | ring->adev->asic_type == CHIP_TONGA) { | ||
3725 | amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); | ||
3726 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
3727 | amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); | ||
3728 | } else { | ||
3729 | amdgpu_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 2)); | ||
3730 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
3731 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
3732 | amdgpu_ring_write(ring, sel); | ||
3733 | } | ||
3734 | |||
3735 | if (emit_wait && (ring->type == AMDGPU_RING_TYPE_GFX)) { | ||
3736 | /* Prevent the PFP from running ahead of the semaphore wait */ | ||
3737 | amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
3738 | amdgpu_ring_write(ring, 0x0); | ||
3739 | } | ||
3740 | |||
3741 | return true; | ||
3742 | } | ||
3743 | |||
3744 | static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring) | ||
3745 | { | ||
3746 | struct amdgpu_device *adev = ring->adev; | ||
3747 | u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4; | ||
3748 | |||
3749 | /* instruct DE to set a magic number */ | ||
3750 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3751 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3752 | WRITE_DATA_DST_SEL(5))); | ||
3753 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3754 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3755 | amdgpu_ring_write(ring, 1); | ||
3756 | |||
3757 | /* let CE wait till condition satisfied */ | ||
3758 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
3759 | amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ | ||
3760 | WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ | ||
3761 | WAIT_REG_MEM_FUNCTION(3) | /* == */ | ||
3762 | WAIT_REG_MEM_ENGINE(2))); /* ce */ | ||
3763 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3764 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3765 | amdgpu_ring_write(ring, 1); | ||
3766 | amdgpu_ring_write(ring, 0xffffffff); | ||
3767 | amdgpu_ring_write(ring, 4); /* poll interval */ | ||
3768 | |||
3769 | /* instruct CE to reset wb of ce_sync to zero */ | ||
3770 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3771 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | | ||
3772 | WRITE_DATA_DST_SEL(5) | | ||
3773 | WR_CONFIRM)); | ||
3774 | amdgpu_ring_write(ring, gpu_addr & 0xfffffffc); | ||
3775 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff); | ||
3776 | amdgpu_ring_write(ring, 0); | ||
3777 | } | ||
3778 | |||
3779 | static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||
3780 | unsigned vm_id, uint64_t pd_addr) | ||
3781 | { | ||
3782 | int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); | ||
3783 | u32 srbm_gfx_cntl = 0; | ||
3784 | |||
3785 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3786 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | | ||
3787 | WRITE_DATA_DST_SEL(0))); | ||
3788 | if (vm_id < 8) { | ||
3789 | amdgpu_ring_write(ring, | ||
3790 | (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); | ||
3791 | } else { | ||
3792 | amdgpu_ring_write(ring, | ||
3793 | (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); | ||
3794 | } | ||
3795 | amdgpu_ring_write(ring, 0); | ||
3796 | amdgpu_ring_write(ring, pd_addr >> 12); | ||
3797 | |||
3798 | /* update SH_MEM_* regs */ | ||
3799 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); | ||
3800 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3801 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3802 | WRITE_DATA_DST_SEL(0))); | ||
3803 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
3804 | amdgpu_ring_write(ring, 0); | ||
3805 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
3806 | |||
3807 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); | ||
3808 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3809 | WRITE_DATA_DST_SEL(0))); | ||
3810 | amdgpu_ring_write(ring, mmSH_MEM_BASES); | ||
3811 | amdgpu_ring_write(ring, 0); | ||
3812 | |||
3813 | amdgpu_ring_write(ring, 0); /* SH_MEM_BASES */ | ||
3814 | amdgpu_ring_write(ring, 0); /* SH_MEM_CONFIG */ | ||
3815 | amdgpu_ring_write(ring, 1); /* SH_MEM_APE1_BASE */ | ||
3816 | amdgpu_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */ | ||
3817 | |||
3818 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); | ||
3819 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3820 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3821 | WRITE_DATA_DST_SEL(0))); | ||
3822 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
3823 | amdgpu_ring_write(ring, 0); | ||
3824 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
3825 | |||
3826 | |||
3827 | /* bits 0-15 are the VM contexts0-15 */ | ||
3828 | /* invalidate the cache */ | ||
3829 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
3830 | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
3831 | WRITE_DATA_DST_SEL(0))); | ||
3832 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); | ||
3833 | amdgpu_ring_write(ring, 0); | ||
3834 | amdgpu_ring_write(ring, 1 << vm_id); | ||
3835 | |||
3836 | /* wait for the invalidate to complete */ | ||
3837 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); | ||
3838 | amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ | ||
3839 | WAIT_REG_MEM_FUNCTION(0) | /* always */ | ||
3840 | WAIT_REG_MEM_ENGINE(0))); /* me */ | ||
3841 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); | ||
3842 | amdgpu_ring_write(ring, 0); | ||
3843 | amdgpu_ring_write(ring, 0); /* ref */ | ||
3844 | amdgpu_ring_write(ring, 0); /* mask */ | ||
3845 | amdgpu_ring_write(ring, 0x20); /* poll interval */ | ||
3846 | |||
3847 | /* compute doesn't have PFP */ | ||
3848 | if (usepfp) { | ||
3849 | /* sync PFP to ME, otherwise we might get invalid PFP reads */ | ||
3850 | amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | ||
3851 | amdgpu_ring_write(ring, 0x0); | ||
3852 | |||
3853 | /* synce CE with ME to prevent CE fetch CEIB before context switch done */ | ||
3854 | gfx_v8_0_ce_sync_me(ring); | ||
3855 | } | ||
3856 | } | ||
3857 | |||
3858 | static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring) | ||
3859 | { | ||
3860 | if (gfx_v8_0_is_idle(ring->adev)) { | ||
3861 | amdgpu_ring_lockup_update(ring); | ||
3862 | return false; | ||
3863 | } | ||
3864 | return amdgpu_ring_test_lockup(ring); | ||
3865 | } | ||
3866 | |||
3867 | static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring) | ||
3868 | { | ||
3869 | return ring->adev->wb.wb[ring->rptr_offs]; | ||
3870 | } | ||
3871 | |||
3872 | static u32 gfx_v8_0_ring_get_wptr_compute(struct amdgpu_ring *ring) | ||
3873 | { | ||
3874 | return ring->adev->wb.wb[ring->wptr_offs]; | ||
3875 | } | ||
3876 | |||
3877 | static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring) | ||
3878 | { | ||
3879 | struct amdgpu_device *adev = ring->adev; | ||
3880 | |||
3881 | /* XXX check if swapping is necessary on BE */ | ||
3882 | adev->wb.wb[ring->wptr_offs] = ring->wptr; | ||
3883 | WDOORBELL32(ring->doorbell_index, ring->wptr); | ||
3884 | } | ||
3885 | |||
3886 | static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, | ||
3887 | u64 addr, u64 seq, | ||
3888 | bool write64bits) | ||
3889 | { | ||
3890 | /* RELEASE_MEM - flush caches, send int */ | ||
3891 | amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); | ||
3892 | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | | ||
3893 | EOP_TC_ACTION_EN | | ||
3894 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | | ||
3895 | EVENT_INDEX(5))); | ||
3896 | amdgpu_ring_write(ring, DATA_SEL(write64bits ? 2 : 1) | INT_SEL(2)); | ||
3897 | amdgpu_ring_write(ring, addr & 0xfffffffc); | ||
3898 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
3899 | amdgpu_ring_write(ring, lower_32_bits(seq)); | ||
3900 | amdgpu_ring_write(ring, upper_32_bits(seq)); | ||
3901 | } | ||
3902 | |||
3903 | static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, | ||
3904 | enum amdgpu_interrupt_state state) | ||
3905 | { | ||
3906 | u32 cp_int_cntl; | ||
3907 | |||
3908 | switch (state) { | ||
3909 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3910 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
3911 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
3912 | TIME_STAMP_INT_ENABLE, 0); | ||
3913 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
3914 | break; | ||
3915 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3916 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
3917 | cp_int_cntl = | ||
3918 | REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
3919 | TIME_STAMP_INT_ENABLE, 1); | ||
3920 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
3921 | break; | ||
3922 | default: | ||
3923 | break; | ||
3924 | } | ||
3925 | } | ||
3926 | |||
3927 | static void gfx_v8_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev, | ||
3928 | int me, int pipe, | ||
3929 | enum amdgpu_interrupt_state state) | ||
3930 | { | ||
3931 | u32 mec_int_cntl, mec_int_cntl_reg; | ||
3932 | |||
3933 | /* | ||
3934 | * amdgpu controls only pipe 0 of MEC1. That's why this function only | ||
3935 | * handles the setting of interrupts for this specific pipe. All other | ||
3936 | * pipes' interrupts are set by amdkfd. | ||
3937 | */ | ||
3938 | |||
3939 | if (me == 1) { | ||
3940 | switch (pipe) { | ||
3941 | case 0: | ||
3942 | mec_int_cntl_reg = mmCP_ME1_PIPE0_INT_CNTL; | ||
3943 | break; | ||
3944 | default: | ||
3945 | DRM_DEBUG("invalid pipe %d\n", pipe); | ||
3946 | return; | ||
3947 | } | ||
3948 | } else { | ||
3949 | DRM_DEBUG("invalid me %d\n", me); | ||
3950 | return; | ||
3951 | } | ||
3952 | |||
3953 | switch (state) { | ||
3954 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3955 | mec_int_cntl = RREG32(mec_int_cntl_reg); | ||
3956 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, | ||
3957 | TIME_STAMP_INT_ENABLE, 0); | ||
3958 | WREG32(mec_int_cntl_reg, mec_int_cntl); | ||
3959 | break; | ||
3960 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3961 | mec_int_cntl = RREG32(mec_int_cntl_reg); | ||
3962 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, | ||
3963 | TIME_STAMP_INT_ENABLE, 1); | ||
3964 | WREG32(mec_int_cntl_reg, mec_int_cntl); | ||
3965 | break; | ||
3966 | default: | ||
3967 | break; | ||
3968 | } | ||
3969 | } | ||
3970 | |||
3971 | static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev, | ||
3972 | struct amdgpu_irq_src *source, | ||
3973 | unsigned type, | ||
3974 | enum amdgpu_interrupt_state state) | ||
3975 | { | ||
3976 | u32 cp_int_cntl; | ||
3977 | |||
3978 | switch (state) { | ||
3979 | case AMDGPU_IRQ_STATE_DISABLE: | ||
3980 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
3981 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
3982 | PRIV_REG_INT_ENABLE, 0); | ||
3983 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
3984 | break; | ||
3985 | case AMDGPU_IRQ_STATE_ENABLE: | ||
3986 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
3987 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
3988 | PRIV_REG_INT_ENABLE, 0); | ||
3989 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
3990 | break; | ||
3991 | default: | ||
3992 | break; | ||
3993 | } | ||
3994 | |||
3995 | return 0; | ||
3996 | } | ||
3997 | |||
3998 | static int gfx_v8_0_set_priv_inst_fault_state(struct amdgpu_device *adev, | ||
3999 | struct amdgpu_irq_src *source, | ||
4000 | unsigned type, | ||
4001 | enum amdgpu_interrupt_state state) | ||
4002 | { | ||
4003 | u32 cp_int_cntl; | ||
4004 | |||
4005 | switch (state) { | ||
4006 | case AMDGPU_IRQ_STATE_DISABLE: | ||
4007 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
4008 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
4009 | PRIV_INSTR_INT_ENABLE, 0); | ||
4010 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
4011 | break; | ||
4012 | case AMDGPU_IRQ_STATE_ENABLE: | ||
4013 | cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); | ||
4014 | cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, | ||
4015 | PRIV_INSTR_INT_ENABLE, 1); | ||
4016 | WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); | ||
4017 | break; | ||
4018 | default: | ||
4019 | break; | ||
4020 | } | ||
4021 | |||
4022 | return 0; | ||
4023 | } | ||
4024 | |||
4025 | static int gfx_v8_0_set_eop_interrupt_state(struct amdgpu_device *adev, | ||
4026 | struct amdgpu_irq_src *src, | ||
4027 | unsigned type, | ||
4028 | enum amdgpu_interrupt_state state) | ||
4029 | { | ||
4030 | switch (type) { | ||
4031 | case AMDGPU_CP_IRQ_GFX_EOP: | ||
4032 | gfx_v8_0_set_gfx_eop_interrupt_state(adev, state); | ||
4033 | break; | ||
4034 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: | ||
4035 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 0, state); | ||
4036 | break; | ||
4037 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: | ||
4038 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 1, state); | ||
4039 | break; | ||
4040 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: | ||
4041 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 2, state); | ||
4042 | break; | ||
4043 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: | ||
4044 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 1, 3, state); | ||
4045 | break; | ||
4046 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: | ||
4047 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 0, state); | ||
4048 | break; | ||
4049 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: | ||
4050 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 1, state); | ||
4051 | break; | ||
4052 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: | ||
4053 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 2, state); | ||
4054 | break; | ||
4055 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: | ||
4056 | gfx_v8_0_set_compute_eop_interrupt_state(adev, 2, 3, state); | ||
4057 | break; | ||
4058 | default: | ||
4059 | break; | ||
4060 | } | ||
4061 | return 0; | ||
4062 | } | ||
4063 | |||
4064 | static int gfx_v8_0_eop_irq(struct amdgpu_device *adev, | ||
4065 | struct amdgpu_irq_src *source, | ||
4066 | struct amdgpu_iv_entry *entry) | ||
4067 | { | ||
4068 | int i; | ||
4069 | u8 me_id, pipe_id, queue_id; | ||
4070 | struct amdgpu_ring *ring; | ||
4071 | |||
4072 | DRM_DEBUG("IH: CP EOP\n"); | ||
4073 | me_id = (entry->ring_id & 0x0c) >> 2; | ||
4074 | pipe_id = (entry->ring_id & 0x03) >> 0; | ||
4075 | queue_id = (entry->ring_id & 0x70) >> 4; | ||
4076 | |||
4077 | switch (me_id) { | ||
4078 | case 0: | ||
4079 | amdgpu_fence_process(&adev->gfx.gfx_ring[0]); | ||
4080 | break; | ||
4081 | case 1: | ||
4082 | case 2: | ||
4083 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | ||
4084 | ring = &adev->gfx.compute_ring[i]; | ||
4085 | /* Per-queue interrupt is supported for MEC starting from VI. | ||
4086 | * The interrupt can only be enabled/disabled per pipe instead of per queue. | ||
4087 | */ | ||
4088 | if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) | ||
4089 | amdgpu_fence_process(ring); | ||
4090 | } | ||
4091 | break; | ||
4092 | } | ||
4093 | return 0; | ||
4094 | } | ||
4095 | |||
4096 | static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev, | ||
4097 | struct amdgpu_irq_src *source, | ||
4098 | struct amdgpu_iv_entry *entry) | ||
4099 | { | ||
4100 | DRM_ERROR("Illegal register access in command stream\n"); | ||
4101 | schedule_work(&adev->reset_work); | ||
4102 | return 0; | ||
4103 | } | ||
4104 | |||
4105 | static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, | ||
4106 | struct amdgpu_irq_src *source, | ||
4107 | struct amdgpu_iv_entry *entry) | ||
4108 | { | ||
4109 | DRM_ERROR("Illegal instruction in command stream\n"); | ||
4110 | schedule_work(&adev->reset_work); | ||
4111 | return 0; | ||
4112 | } | ||
4113 | |||
4114 | const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs = { | ||
4115 | .early_init = gfx_v8_0_early_init, | ||
4116 | .late_init = NULL, | ||
4117 | .sw_init = gfx_v8_0_sw_init, | ||
4118 | .sw_fini = gfx_v8_0_sw_fini, | ||
4119 | .hw_init = gfx_v8_0_hw_init, | ||
4120 | .hw_fini = gfx_v8_0_hw_fini, | ||
4121 | .suspend = gfx_v8_0_suspend, | ||
4122 | .resume = gfx_v8_0_resume, | ||
4123 | .is_idle = gfx_v8_0_is_idle, | ||
4124 | .wait_for_idle = gfx_v8_0_wait_for_idle, | ||
4125 | .soft_reset = gfx_v8_0_soft_reset, | ||
4126 | .print_status = gfx_v8_0_print_status, | ||
4127 | .set_clockgating_state = gfx_v8_0_set_clockgating_state, | ||
4128 | .set_powergating_state = gfx_v8_0_set_powergating_state, | ||
4129 | }; | ||
4130 | |||
4131 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { | ||
4132 | .get_rptr = gfx_v8_0_ring_get_rptr_gfx, | ||
4133 | .get_wptr = gfx_v8_0_ring_get_wptr_gfx, | ||
4134 | .set_wptr = gfx_v8_0_ring_set_wptr_gfx, | ||
4135 | .parse_cs = NULL, | ||
4136 | .emit_ib = gfx_v8_0_ring_emit_ib, | ||
4137 | .emit_fence = gfx_v8_0_ring_emit_fence_gfx, | ||
4138 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, | ||
4139 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, | ||
4140 | .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, | ||
4141 | .test_ring = gfx_v8_0_ring_test_ring, | ||
4142 | .test_ib = gfx_v8_0_ring_test_ib, | ||
4143 | .is_lockup = gfx_v8_0_ring_is_lockup, | ||
4144 | }; | ||
4145 | |||
4146 | static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { | ||
4147 | .get_rptr = gfx_v8_0_ring_get_rptr_compute, | ||
4148 | .get_wptr = gfx_v8_0_ring_get_wptr_compute, | ||
4149 | .set_wptr = gfx_v8_0_ring_set_wptr_compute, | ||
4150 | .parse_cs = NULL, | ||
4151 | .emit_ib = gfx_v8_0_ring_emit_ib, | ||
4152 | .emit_fence = gfx_v8_0_ring_emit_fence_compute, | ||
4153 | .emit_semaphore = gfx_v8_0_ring_emit_semaphore, | ||
4154 | .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, | ||
4155 | .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, | ||
4156 | .test_ring = gfx_v8_0_ring_test_ring, | ||
4157 | .test_ib = gfx_v8_0_ring_test_ib, | ||
4158 | .is_lockup = gfx_v8_0_ring_is_lockup, | ||
4159 | }; | ||
4160 | |||
4161 | static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) | ||
4162 | { | ||
4163 | int i; | ||
4164 | |||
4165 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
4166 | adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; | ||
4167 | |||
4168 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
4169 | adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute; | ||
4170 | } | ||
4171 | |||
4172 | static const struct amdgpu_irq_src_funcs gfx_v8_0_eop_irq_funcs = { | ||
4173 | .set = gfx_v8_0_set_eop_interrupt_state, | ||
4174 | .process = gfx_v8_0_eop_irq, | ||
4175 | }; | ||
4176 | |||
4177 | static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_reg_irq_funcs = { | ||
4178 | .set = gfx_v8_0_set_priv_reg_fault_state, | ||
4179 | .process = gfx_v8_0_priv_reg_irq, | ||
4180 | }; | ||
4181 | |||
4182 | static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { | ||
4183 | .set = gfx_v8_0_set_priv_inst_fault_state, | ||
4184 | .process = gfx_v8_0_priv_inst_irq, | ||
4185 | }; | ||
4186 | |||
4187 | static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) | ||
4188 | { | ||
4189 | adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; | ||
4190 | adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs; | ||
4191 | |||
4192 | adev->gfx.priv_reg_irq.num_types = 1; | ||
4193 | adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs; | ||
4194 | |||
4195 | adev->gfx.priv_inst_irq.num_types = 1; | ||
4196 | adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; | ||
4197 | } | ||
4198 | |||
4199 | static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) | ||
4200 | { | ||
4201 | /* init asci gds info */ | ||
4202 | adev->gds.mem.total_size = RREG32(mmGDS_VMID0_SIZE); | ||
4203 | adev->gds.gws.total_size = 64; | ||
4204 | adev->gds.oa.total_size = 16; | ||
4205 | |||
4206 | if (adev->gds.mem.total_size == 64 * 1024) { | ||
4207 | adev->gds.mem.gfx_partition_size = 4096; | ||
4208 | adev->gds.mem.cs_partition_size = 4096; | ||
4209 | |||
4210 | adev->gds.gws.gfx_partition_size = 4; | ||
4211 | adev->gds.gws.cs_partition_size = 4; | ||
4212 | |||
4213 | adev->gds.oa.gfx_partition_size = 4; | ||
4214 | adev->gds.oa.cs_partition_size = 1; | ||
4215 | } else { | ||
4216 | adev->gds.mem.gfx_partition_size = 1024; | ||
4217 | adev->gds.mem.cs_partition_size = 1024; | ||
4218 | |||
4219 | adev->gds.gws.gfx_partition_size = 16; | ||
4220 | adev->gds.gws.cs_partition_size = 16; | ||
4221 | |||
4222 | adev->gds.oa.gfx_partition_size = 4; | ||
4223 | adev->gds.oa.cs_partition_size = 4; | ||
4224 | } | ||
4225 | } | ||
4226 | |||
4227 | static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev, | ||
4228 | u32 se, u32 sh) | ||
4229 | { | ||
4230 | u32 mask = 0, tmp, tmp1; | ||
4231 | int i; | ||
4232 | |||
4233 | gfx_v8_0_select_se_sh(adev, se, sh); | ||
4234 | tmp = RREG32(mmCC_GC_SHADER_ARRAY_CONFIG); | ||
4235 | tmp1 = RREG32(mmGC_USER_SHADER_ARRAY_CONFIG); | ||
4236 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
4237 | |||
4238 | tmp &= 0xffff0000; | ||
4239 | |||
4240 | tmp |= tmp1; | ||
4241 | tmp >>= 16; | ||
4242 | |||
4243 | for (i = 0; i < adev->gfx.config.max_cu_per_sh; i ++) { | ||
4244 | mask <<= 1; | ||
4245 | mask |= 1; | ||
4246 | } | ||
4247 | |||
4248 | return (~tmp) & mask; | ||
4249 | } | ||
4250 | |||
4251 | int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, | ||
4252 | struct amdgpu_cu_info *cu_info) | ||
4253 | { | ||
4254 | int i, j, k, counter, active_cu_number = 0; | ||
4255 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; | ||
4256 | |||
4257 | if (!adev || !cu_info) | ||
4258 | return -EINVAL; | ||
4259 | |||
4260 | mutex_lock(&adev->grbm_idx_mutex); | ||
4261 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { | ||
4262 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { | ||
4263 | mask = 1; | ||
4264 | ao_bitmap = 0; | ||
4265 | counter = 0; | ||
4266 | bitmap = gfx_v8_0_get_cu_active_bitmap(adev, i, j); | ||
4267 | cu_info->bitmap[i][j] = bitmap; | ||
4268 | |||
4269 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { | ||
4270 | if (bitmap & mask) { | ||
4271 | if (counter < 2) | ||
4272 | ao_bitmap |= mask; | ||
4273 | counter ++; | ||
4274 | } | ||
4275 | mask <<= 1; | ||
4276 | } | ||
4277 | active_cu_number += counter; | ||
4278 | ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); | ||
4279 | } | ||
4280 | } | ||
4281 | |||
4282 | cu_info->number = active_cu_number; | ||
4283 | cu_info->ao_cu_mask = ao_cu_mask; | ||
4284 | mutex_unlock(&adev->grbm_idx_mutex); | ||
4285 | return 0; | ||
4286 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h new file mode 100644 index 000000000000..be8a5f8e176e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __GFX_V8_0_H__ | ||
25 | #define __GFX_V8_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs gfx_v8_0_ip_funcs; | ||
28 | |||
29 | uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); | ||
30 | void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); | ||
31 | int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info); | ||
32 | |||
33 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c new file mode 100644 index 000000000000..ac8cff85cde3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -0,0 +1,1271 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "amdgpu.h" | ||
26 | #include "gmc_v8_0.h" | ||
27 | #include "amdgpu_ucode.h" | ||
28 | |||
29 | #include "gmc/gmc_8_1_d.h" | ||
30 | #include "gmc/gmc_8_1_sh_mask.h" | ||
31 | |||
32 | #include "bif/bif_5_0_d.h" | ||
33 | #include "bif/bif_5_0_sh_mask.h" | ||
34 | |||
35 | #include "oss/oss_3_0_d.h" | ||
36 | #include "oss/oss_3_0_sh_mask.h" | ||
37 | |||
38 | #include "vid.h" | ||
39 | #include "vi.h" | ||
40 | |||
41 | static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); | ||
42 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); | ||
43 | |||
44 | MODULE_FIRMWARE("radeon/topaz_mc.bin"); | ||
45 | MODULE_FIRMWARE("radeon/tonga_mc.bin"); | ||
46 | |||
47 | static const u32 golden_settings_tonga_a11[] = | ||
48 | { | ||
49 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | ||
50 | mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, | ||
51 | mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, | ||
52 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
53 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
54 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
55 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
56 | }; | ||
57 | |||
58 | static const u32 tonga_mgcg_cgcg_init[] = | ||
59 | { | ||
60 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
61 | }; | ||
62 | |||
63 | static const u32 golden_settings_iceland_a11[] = | ||
64 | { | ||
65 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
66 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
67 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
68 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | ||
69 | }; | ||
70 | |||
71 | static const u32 iceland_mgcg_cgcg_init[] = | ||
72 | { | ||
73 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
74 | }; | ||
75 | |||
76 | static const u32 cz_mgcg_cgcg_init[] = | ||
77 | { | ||
78 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
79 | }; | ||
80 | |||
81 | static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) | ||
82 | { | ||
83 | switch (adev->asic_type) { | ||
84 | case CHIP_TOPAZ: | ||
85 | amdgpu_program_register_sequence(adev, | ||
86 | iceland_mgcg_cgcg_init, | ||
87 | (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); | ||
88 | amdgpu_program_register_sequence(adev, | ||
89 | golden_settings_iceland_a11, | ||
90 | (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); | ||
91 | break; | ||
92 | case CHIP_TONGA: | ||
93 | amdgpu_program_register_sequence(adev, | ||
94 | tonga_mgcg_cgcg_init, | ||
95 | (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); | ||
96 | amdgpu_program_register_sequence(adev, | ||
97 | golden_settings_tonga_a11, | ||
98 | (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); | ||
99 | break; | ||
100 | case CHIP_CARRIZO: | ||
101 | amdgpu_program_register_sequence(adev, | ||
102 | cz_mgcg_cgcg_init, | ||
103 | (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); | ||
104 | break; | ||
105 | default: | ||
106 | break; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * gmc8_mc_wait_for_idle - wait for MC idle callback. | ||
112 | * | ||
113 | * @adev: amdgpu_device pointer | ||
114 | * | ||
115 | * Wait for the MC (memory controller) to be idle. | ||
116 | * (evergreen+). | ||
117 | * Returns 0 if the MC is idle, -1 if not. | ||
118 | */ | ||
119 | int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) | ||
120 | { | ||
121 | unsigned i; | ||
122 | u32 tmp; | ||
123 | |||
124 | for (i = 0; i < adev->usec_timeout; i++) { | ||
125 | /* read MC_STATUS */ | ||
126 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | | ||
127 | SRBM_STATUS__MCB_BUSY_MASK | | ||
128 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
129 | SRBM_STATUS__MCC_BUSY_MASK | | ||
130 | SRBM_STATUS__MCD_BUSY_MASK | | ||
131 | SRBM_STATUS__VMC1_BUSY_MASK); | ||
132 | if (!tmp) | ||
133 | return 0; | ||
134 | udelay(1); | ||
135 | } | ||
136 | return -1; | ||
137 | } | ||
138 | |||
139 | void gmc_v8_0_mc_stop(struct amdgpu_device *adev, | ||
140 | struct amdgpu_mode_mc_save *save) | ||
141 | { | ||
142 | u32 blackout; | ||
143 | |||
144 | if (adev->mode_info.num_crtc) | ||
145 | amdgpu_display_stop_mc_access(adev, save); | ||
146 | |||
147 | amdgpu_asic_wait_for_mc_idle(adev); | ||
148 | |||
149 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
150 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | ||
151 | /* Block CPU access */ | ||
152 | WREG32(mmBIF_FB_EN, 0); | ||
153 | /* blackout the MC */ | ||
154 | blackout = REG_SET_FIELD(blackout, | ||
155 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); | ||
156 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | ||
157 | } | ||
158 | /* wait for the MC to settle */ | ||
159 | udelay(100); | ||
160 | } | ||
161 | |||
162 | void gmc_v8_0_mc_resume(struct amdgpu_device *adev, | ||
163 | struct amdgpu_mode_mc_save *save) | ||
164 | { | ||
165 | u32 tmp; | ||
166 | |||
167 | /* unblackout the MC */ | ||
168 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
169 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | ||
170 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | ||
171 | /* allow CPU access */ | ||
172 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); | ||
173 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | ||
174 | WREG32(mmBIF_FB_EN, tmp); | ||
175 | |||
176 | if (adev->mode_info.num_crtc) | ||
177 | amdgpu_display_resume_mc_access(adev, save); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * gmc_v8_0_init_microcode - load ucode images from disk | ||
182 | * | ||
183 | * @adev: amdgpu_device pointer | ||
184 | * | ||
185 | * Use the firmware interface to load the ucode images into | ||
186 | * the driver (not loaded into hw). | ||
187 | * Returns 0 on success, error on failure. | ||
188 | */ | ||
189 | static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) | ||
190 | { | ||
191 | const char *chip_name; | ||
192 | char fw_name[30]; | ||
193 | int err; | ||
194 | |||
195 | DRM_DEBUG("\n"); | ||
196 | |||
197 | switch (adev->asic_type) { | ||
198 | case CHIP_TOPAZ: | ||
199 | chip_name = "topaz"; | ||
200 | break; | ||
201 | case CHIP_TONGA: | ||
202 | chip_name = "tonga"; | ||
203 | break; | ||
204 | case CHIP_CARRIZO: | ||
205 | return 0; | ||
206 | default: BUG(); | ||
207 | } | ||
208 | |||
209 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
210 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); | ||
211 | if (err) | ||
212 | goto out; | ||
213 | err = amdgpu_ucode_validate(adev->mc.fw); | ||
214 | |||
215 | out: | ||
216 | if (err) { | ||
217 | printk(KERN_ERR | ||
218 | "mc: Failed to load firmware \"%s\"\n", | ||
219 | fw_name); | ||
220 | release_firmware(adev->mc.fw); | ||
221 | adev->mc.fw = NULL; | ||
222 | } | ||
223 | return err; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * gmc_v8_0_mc_load_microcode - load MC ucode into the hw | ||
228 | * | ||
229 | * @adev: amdgpu_device pointer | ||
230 | * | ||
231 | * Load the GDDR MC ucode into the hw (CIK). | ||
232 | * Returns 0 on success, error on failure. | ||
233 | */ | ||
234 | static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) | ||
235 | { | ||
236 | const struct mc_firmware_header_v1_0 *hdr; | ||
237 | const __le32 *fw_data = NULL; | ||
238 | const __le32 *io_mc_regs = NULL; | ||
239 | u32 running, blackout = 0; | ||
240 | int i, ucode_size, regs_size; | ||
241 | |||
242 | if (!adev->mc.fw) | ||
243 | return -EINVAL; | ||
244 | |||
245 | hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; | ||
246 | amdgpu_ucode_print_mc_hdr(&hdr->header); | ||
247 | |||
248 | adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
249 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); | ||
250 | io_mc_regs = (const __le32 *) | ||
251 | (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); | ||
252 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
253 | fw_data = (const __le32 *) | ||
254 | (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
255 | |||
256 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); | ||
257 | |||
258 | if (running == 0) { | ||
259 | if (running) { | ||
260 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
261 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); | ||
262 | } | ||
263 | |||
264 | /* reset the engine and set to writable */ | ||
265 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
266 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | ||
267 | |||
268 | /* load mc io regs */ | ||
269 | for (i = 0; i < regs_size; i++) { | ||
270 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | ||
271 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | ||
272 | } | ||
273 | /* load the MC ucode */ | ||
274 | for (i = 0; i < ucode_size; i++) | ||
275 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | ||
276 | |||
277 | /* put the engine back into the active state */ | ||
278 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
279 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | ||
280 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | ||
281 | |||
282 | /* wait for training to complete */ | ||
283 | for (i = 0; i < adev->usec_timeout; i++) { | ||
284 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
285 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) | ||
286 | break; | ||
287 | udelay(1); | ||
288 | } | ||
289 | for (i = 0; i < adev->usec_timeout; i++) { | ||
290 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
291 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) | ||
292 | break; | ||
293 | udelay(1); | ||
294 | } | ||
295 | |||
296 | if (running) | ||
297 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, | ||
304 | struct amdgpu_mc *mc) | ||
305 | { | ||
306 | if (mc->mc_vram_size > 0xFFC0000000ULL) { | ||
307 | /* leave room for at least 1024M GTT */ | ||
308 | dev_warn(adev->dev, "limiting VRAM\n"); | ||
309 | mc->real_vram_size = 0xFFC0000000ULL; | ||
310 | mc->mc_vram_size = 0xFFC0000000ULL; | ||
311 | } | ||
312 | amdgpu_vram_location(adev, &adev->mc, 0); | ||
313 | adev->mc.gtt_base_align = 0; | ||
314 | amdgpu_gtt_location(adev, mc); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * gmc_v8_0_mc_program - program the GPU memory controller | ||
319 | * | ||
320 | * @adev: amdgpu_device pointer | ||
321 | * | ||
322 | * Set the location of vram, gart, and AGP in the GPU's | ||
323 | * physical address space (CIK). | ||
324 | */ | ||
325 | static void gmc_v8_0_mc_program(struct amdgpu_device *adev) | ||
326 | { | ||
327 | struct amdgpu_mode_mc_save save; | ||
328 | u32 tmp; | ||
329 | int i, j; | ||
330 | |||
331 | /* Initialize HDP */ | ||
332 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
333 | WREG32((0xb05 + j), 0x00000000); | ||
334 | WREG32((0xb06 + j), 0x00000000); | ||
335 | WREG32((0xb07 + j), 0x00000000); | ||
336 | WREG32((0xb08 + j), 0x00000000); | ||
337 | WREG32((0xb09 + j), 0x00000000); | ||
338 | } | ||
339 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | ||
340 | |||
341 | if (adev->mode_info.num_crtc) | ||
342 | amdgpu_display_set_vga_render_state(adev, false); | ||
343 | |||
344 | gmc_v8_0_mc_stop(adev, &save); | ||
345 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
346 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
347 | } | ||
348 | /* Update configuration */ | ||
349 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
350 | adev->mc.vram_start >> 12); | ||
351 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
352 | adev->mc.vram_end >> 12); | ||
353 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, | ||
354 | adev->vram_scratch.gpu_addr >> 12); | ||
355 | tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; | ||
356 | tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); | ||
357 | WREG32(mmMC_VM_FB_LOCATION, tmp); | ||
358 | /* XXX double check these! */ | ||
359 | WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); | ||
360 | WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); | ||
361 | WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); | ||
362 | WREG32(mmMC_VM_AGP_BASE, 0); | ||
363 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); | ||
364 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); | ||
365 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
366 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
367 | } | ||
368 | gmc_v8_0_mc_resume(adev, &save); | ||
369 | |||
370 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); | ||
371 | |||
372 | tmp = RREG32(mmHDP_MISC_CNTL); | ||
373 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); | ||
374 | WREG32(mmHDP_MISC_CNTL, tmp); | ||
375 | |||
376 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); | ||
377 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * gmc_v8_0_mc_init - initialize the memory controller driver params | ||
382 | * | ||
383 | * @adev: amdgpu_device pointer | ||
384 | * | ||
385 | * Look up the amount of vram, vram width, and decide how to place | ||
386 | * vram and gart within the GPU's physical address space (CIK). | ||
387 | * Returns 0 for success. | ||
388 | */ | ||
389 | static int gmc_v8_0_mc_init(struct amdgpu_device *adev) | ||
390 | { | ||
391 | u32 tmp; | ||
392 | int chansize, numchan; | ||
393 | |||
394 | /* Get VRAM informations */ | ||
395 | tmp = RREG32(mmMC_ARB_RAMCFG); | ||
396 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { | ||
397 | chansize = 64; | ||
398 | } else { | ||
399 | chansize = 32; | ||
400 | } | ||
401 | tmp = RREG32(mmMC_SHARED_CHMAP); | ||
402 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | ||
403 | case 0: | ||
404 | default: | ||
405 | numchan = 1; | ||
406 | break; | ||
407 | case 1: | ||
408 | numchan = 2; | ||
409 | break; | ||
410 | case 2: | ||
411 | numchan = 4; | ||
412 | break; | ||
413 | case 3: | ||
414 | numchan = 8; | ||
415 | break; | ||
416 | case 4: | ||
417 | numchan = 3; | ||
418 | break; | ||
419 | case 5: | ||
420 | numchan = 6; | ||
421 | break; | ||
422 | case 6: | ||
423 | numchan = 10; | ||
424 | break; | ||
425 | case 7: | ||
426 | numchan = 12; | ||
427 | break; | ||
428 | case 8: | ||
429 | numchan = 16; | ||
430 | break; | ||
431 | } | ||
432 | adev->mc.vram_width = numchan * chansize; | ||
433 | /* Could aper size report 0 ? */ | ||
434 | adev->mc.aper_base = pci_resource_start(adev->pdev, 0); | ||
435 | adev->mc.aper_size = pci_resource_len(adev->pdev, 0); | ||
436 | /* size in MB on si */ | ||
437 | adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
438 | adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
439 | adev->mc.visible_vram_size = adev->mc.aper_size; | ||
440 | |||
441 | /* unless the user had overridden it, set the gart | ||
442 | * size equal to the 1024 or vram, whichever is larger. | ||
443 | */ | ||
444 | if (amdgpu_gart_size == -1) | ||
445 | adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); | ||
446 | else | ||
447 | adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; | ||
448 | |||
449 | gmc_v8_0_vram_gtt_location(adev, &adev->mc); | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * GART | ||
456 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
457 | * VMIDs 1-15 are used for userspace clients and are handled | ||
458 | * by the amdgpu vm/hsa code. | ||
459 | */ | ||
460 | |||
461 | /** | ||
462 | * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback | ||
463 | * | ||
464 | * @adev: amdgpu_device pointer | ||
465 | * @vmid: vm instance to flush | ||
466 | * | ||
467 | * Flush the TLB for the requested page table (CIK). | ||
468 | */ | ||
469 | static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, | ||
470 | uint32_t vmid) | ||
471 | { | ||
472 | /* flush hdp cache */ | ||
473 | WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); | ||
474 | |||
475 | /* bits 0-15 are the VM contexts0-15 */ | ||
476 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO | ||
481 | * | ||
482 | * @adev: amdgpu_device pointer | ||
483 | * @cpu_pt_addr: cpu address of the page table | ||
484 | * @gpu_page_idx: entry in the page table to update | ||
485 | * @addr: dst addr to write into pte/pde | ||
486 | * @flags: access flags | ||
487 | * | ||
488 | * Update the page tables using the CPU. | ||
489 | */ | ||
490 | static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, | ||
491 | void *cpu_pt_addr, | ||
492 | uint32_t gpu_page_idx, | ||
493 | uint64_t addr, | ||
494 | uint32_t flags) | ||
495 | { | ||
496 | void __iomem *ptr = (void *)cpu_pt_addr; | ||
497 | uint64_t value; | ||
498 | |||
499 | /* | ||
500 | * PTE format on VI: | ||
501 | * 63:40 reserved | ||
502 | * 39:12 4k physical page base address | ||
503 | * 11:7 fragment | ||
504 | * 6 write | ||
505 | * 5 read | ||
506 | * 4 exe | ||
507 | * 3 reserved | ||
508 | * 2 snooped | ||
509 | * 1 system | ||
510 | * 0 valid | ||
511 | * | ||
512 | * PDE format on VI: | ||
513 | * 63:59 block fragment size | ||
514 | * 58:40 reserved | ||
515 | * 39:1 physical base address of PTE | ||
516 | * bits 5:1 must be 0. | ||
517 | * 0 valid | ||
518 | */ | ||
519 | value = addr & 0x000000FFFFFFF000ULL; | ||
520 | value |= flags; | ||
521 | writeq(value, ptr + (gpu_page_idx * 8)); | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * gmc_v8_0_gart_enable - gart enable | ||
528 | * | ||
529 | * @adev: amdgpu_device pointer | ||
530 | * | ||
531 | * This sets up the TLBs, programs the page tables for VMID0, | ||
532 | * sets up the hw for VMIDs 1-15 which are allocated on | ||
533 | * demand, and sets up the global locations for the LDS, GDS, | ||
534 | * and GPUVM for FSA64 clients (CIK). | ||
535 | * Returns 0 for success, errors for failure. | ||
536 | */ | ||
537 | static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | ||
538 | { | ||
539 | int r, i; | ||
540 | u32 tmp; | ||
541 | |||
542 | if (adev->gart.robj == NULL) { | ||
543 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | r = amdgpu_gart_table_vram_pin(adev); | ||
547 | if (r) | ||
548 | return r; | ||
549 | /* Setup TLB control */ | ||
550 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
551 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); | ||
552 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); | ||
553 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); | ||
554 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); | ||
555 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); | ||
556 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
557 | /* Setup L2 cache */ | ||
558 | tmp = RREG32(mmVM_L2_CNTL); | ||
559 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); | ||
560 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); | ||
561 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
562 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
563 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | ||
564 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | ||
565 | WREG32(mmVM_L2_CNTL, tmp); | ||
566 | tmp = RREG32(mmVM_L2_CNTL2); | ||
567 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | ||
568 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | ||
569 | WREG32(mmVM_L2_CNTL2, tmp); | ||
570 | tmp = RREG32(mmVM_L2_CNTL3); | ||
571 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); | ||
572 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); | ||
573 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); | ||
574 | WREG32(mmVM_L2_CNTL3, tmp); | ||
575 | /* XXX: set to enable PTE/PDE in system memory */ | ||
576 | tmp = RREG32(mmVM_L2_CNTL4); | ||
577 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); | ||
578 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); | ||
579 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); | ||
580 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); | ||
581 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); | ||
582 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); | ||
583 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); | ||
584 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); | ||
585 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); | ||
586 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); | ||
587 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); | ||
588 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); | ||
589 | WREG32(mmVM_L2_CNTL4, tmp); | ||
590 | /* setup context0 */ | ||
591 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | ||
592 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); | ||
593 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | ||
594 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | ||
595 | (u32)(adev->dummy_page.addr >> 12)); | ||
596 | WREG32(mmVM_CONTEXT0_CNTL2, 0); | ||
597 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
598 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); | ||
599 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); | ||
600 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
601 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
602 | |||
603 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); | ||
604 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); | ||
605 | WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); | ||
606 | |||
607 | /* empty context1-15 */ | ||
608 | /* FIXME start with 4G, once using 2 level pt switch to full | ||
609 | * vm size space | ||
610 | */ | ||
611 | /* set vm size, must be a multiple of 4 */ | ||
612 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | ||
613 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn); | ||
614 | for (i = 1; i < 16; i++) { | ||
615 | if (i < 8) | ||
616 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, | ||
617 | adev->gart.table_addr >> 12); | ||
618 | else | ||
619 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, | ||
620 | adev->gart.table_addr >> 12); | ||
621 | } | ||
622 | |||
623 | /* enable context1-15 */ | ||
624 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | ||
625 | (u32)(adev->dummy_page.addr >> 12)); | ||
626 | WREG32(mmVM_CONTEXT1_CNTL2, 4); | ||
627 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
628 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); | ||
629 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); | ||
630 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
631 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
632 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
633 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
634 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
635 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
636 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
637 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
638 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
639 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
640 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
641 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
642 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
643 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
644 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, | ||
645 | amdgpu_vm_block_size - 9); | ||
646 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
647 | |||
648 | gmc_v8_0_gart_flush_gpu_tlb(adev, 0); | ||
649 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
650 | (unsigned)(adev->mc.gtt_size >> 20), | ||
651 | (unsigned long long)adev->gart.table_addr); | ||
652 | adev->gart.ready = true; | ||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | static int gmc_v8_0_gart_init(struct amdgpu_device *adev) | ||
657 | { | ||
658 | int r; | ||
659 | |||
660 | if (adev->gart.robj) { | ||
661 | WARN(1, "R600 PCIE GART already initialized\n"); | ||
662 | return 0; | ||
663 | } | ||
664 | /* Initialize common gart structure */ | ||
665 | r = amdgpu_gart_init(adev); | ||
666 | if (r) | ||
667 | return r; | ||
668 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | ||
669 | return amdgpu_gart_table_vram_alloc(adev); | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * gmc_v8_0_gart_disable - gart disable | ||
674 | * | ||
675 | * @adev: amdgpu_device pointer | ||
676 | * | ||
677 | * This disables all VM page table (CIK). | ||
678 | */ | ||
679 | static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | ||
680 | { | ||
681 | u32 tmp; | ||
682 | |||
683 | /* Disable all tables */ | ||
684 | WREG32(mmVM_CONTEXT0_CNTL, 0); | ||
685 | WREG32(mmVM_CONTEXT1_CNTL, 0); | ||
686 | /* Setup TLB control */ | ||
687 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
688 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); | ||
689 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); | ||
690 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); | ||
691 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
692 | /* Setup L2 cache */ | ||
693 | tmp = RREG32(mmVM_L2_CNTL); | ||
694 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); | ||
695 | WREG32(mmVM_L2_CNTL, tmp); | ||
696 | WREG32(mmVM_L2_CNTL2, 0); | ||
697 | amdgpu_gart_table_vram_unpin(adev); | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * gmc_v8_0_gart_fini - vm fini callback | ||
702 | * | ||
703 | * @adev: amdgpu_device pointer | ||
704 | * | ||
705 | * Tears down the driver GART/VM setup (CIK). | ||
706 | */ | ||
707 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
708 | { | ||
709 | amdgpu_gart_table_vram_free(adev); | ||
710 | amdgpu_gart_fini(adev); | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * vm | ||
715 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
716 | * VMIDs 1-15 are used for userspace clients and are handled | ||
717 | * by the amdgpu vm/hsa code. | ||
718 | */ | ||
719 | /** | ||
720 | * gmc_v8_0_vm_init - cik vm init callback | ||
721 | * | ||
722 | * @adev: amdgpu_device pointer | ||
723 | * | ||
724 | * Inits cik specific vm parameters (number of VMs, base of vram for | ||
725 | * VMIDs 1-15) (CIK). | ||
726 | * Returns 0 for success. | ||
727 | */ | ||
728 | static int gmc_v8_0_vm_init(struct amdgpu_device *adev) | ||
729 | { | ||
730 | /* | ||
731 | * number of VMs | ||
732 | * VMID 0 is reserved for System | ||
733 | * amdgpu graphics/compute will use VMIDs 1-7 | ||
734 | * amdkfd will use VMIDs 8-15 | ||
735 | */ | ||
736 | adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; | ||
737 | |||
738 | /* base offset of vram pages */ | ||
739 | if (adev->flags & AMDGPU_IS_APU) { | ||
740 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | ||
741 | tmp <<= 22; | ||
742 | adev->vm_manager.vram_base_offset = tmp; | ||
743 | } else | ||
744 | adev->vm_manager.vram_base_offset = 0; | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | /** | ||
750 | * gmc_v8_0_vm_fini - cik vm fini callback | ||
751 | * | ||
752 | * @adev: amdgpu_device pointer | ||
753 | * | ||
754 | * Tear down any asic specific VM setup (CIK). | ||
755 | */ | ||
756 | static void gmc_v8_0_vm_fini(struct amdgpu_device *adev) | ||
757 | { | ||
758 | } | ||
759 | |||
760 | /** | ||
761 | * gmc_v8_0_vm_decode_fault - print human readable fault info | ||
762 | * | ||
763 | * @adev: amdgpu_device pointer | ||
764 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
765 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
766 | * | ||
767 | * Print human readable fault information (CIK). | ||
768 | */ | ||
769 | static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, | ||
770 | u32 status, u32 addr, u32 mc_client) | ||
771 | { | ||
772 | u32 mc_id; | ||
773 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); | ||
774 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
775 | PROTECTIONS); | ||
776 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, | ||
777 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | ||
778 | |||
779 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
780 | MEMORY_CLIENT_ID); | ||
781 | |||
782 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", | ||
783 | protections, vmid, addr, | ||
784 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
785 | MEMORY_CLIENT_RW) ? | ||
786 | "write" : "read", block, mc_client, mc_id); | ||
787 | } | ||
788 | |||
789 | static int gmc_v8_0_early_init(struct amdgpu_device *adev) | ||
790 | { | ||
791 | gmc_v8_0_set_gart_funcs(adev); | ||
792 | gmc_v8_0_set_irq_funcs(adev); | ||
793 | |||
794 | if (adev->flags & AMDGPU_IS_APU) { | ||
795 | adev->mc.is_gddr5 = false; | ||
796 | } else { | ||
797 | u32 tmp = RREG32(mmMC_SEQ_MISC0); | ||
798 | |||
799 | if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> | ||
800 | MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) | ||
801 | adev->mc.is_gddr5 = true; | ||
802 | else | ||
803 | adev->mc.is_gddr5 = false; | ||
804 | } | ||
805 | |||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | static int gmc_v8_0_sw_init(struct amdgpu_device *adev) | ||
810 | { | ||
811 | int r; | ||
812 | int dma_bits; | ||
813 | |||
814 | r = amdgpu_gem_init(adev); | ||
815 | if (r) | ||
816 | return r; | ||
817 | |||
818 | r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); | ||
819 | if (r) | ||
820 | return r; | ||
821 | |||
822 | r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); | ||
823 | if (r) | ||
824 | return r; | ||
825 | |||
826 | /* Adjust VM size here. | ||
827 | * Currently set to 4GB ((1 << 20) 4k pages). | ||
828 | * Max GPUVM size for cayman and SI is 40 bits. | ||
829 | */ | ||
830 | adev->vm_manager.max_pfn = amdgpu_vm_size << 18; | ||
831 | |||
832 | /* Set the internal MC address mask | ||
833 | * This is the max address of the GPU's | ||
834 | * internal address space. | ||
835 | */ | ||
836 | adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ | ||
837 | |||
838 | /* set DMA mask + need_dma32 flags. | ||
839 | * PCIE - can handle 40-bits. | ||
840 | * IGP - can handle 40-bits | ||
841 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics | ||
842 | */ | ||
843 | adev->need_dma32 = false; | ||
844 | dma_bits = adev->need_dma32 ? 32 : 40; | ||
845 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
846 | if (r) { | ||
847 | adev->need_dma32 = true; | ||
848 | dma_bits = 32; | ||
849 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | ||
850 | } | ||
851 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
852 | if (r) { | ||
853 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | ||
854 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | ||
855 | } | ||
856 | |||
857 | r = gmc_v8_0_init_microcode(adev); | ||
858 | if (r) { | ||
859 | DRM_ERROR("Failed to load mc firmware!\n"); | ||
860 | return r; | ||
861 | } | ||
862 | |||
863 | r = gmc_v8_0_mc_init(adev); | ||
864 | if (r) | ||
865 | return r; | ||
866 | |||
867 | /* Memory manager */ | ||
868 | r = amdgpu_bo_init(adev); | ||
869 | if (r) | ||
870 | return r; | ||
871 | |||
872 | r = gmc_v8_0_gart_init(adev); | ||
873 | if (r) | ||
874 | return r; | ||
875 | |||
876 | if (!adev->vm_manager.enabled) { | ||
877 | r = gmc_v8_0_vm_init(adev); | ||
878 | if (r) { | ||
879 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
880 | return r; | ||
881 | } | ||
882 | adev->vm_manager.enabled = true; | ||
883 | } | ||
884 | |||
885 | return r; | ||
886 | } | ||
887 | |||
888 | static int gmc_v8_0_sw_fini(struct amdgpu_device *adev) | ||
889 | { | ||
890 | int i; | ||
891 | |||
892 | if (adev->vm_manager.enabled) { | ||
893 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
894 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
895 | gmc_v8_0_vm_fini(adev); | ||
896 | adev->vm_manager.enabled = false; | ||
897 | } | ||
898 | gmc_v8_0_gart_fini(adev); | ||
899 | amdgpu_gem_fini(adev); | ||
900 | amdgpu_bo_fini(adev); | ||
901 | |||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static int gmc_v8_0_hw_init(struct amdgpu_device *adev) | ||
906 | { | ||
907 | int r; | ||
908 | |||
909 | gmc_v8_0_init_golden_registers(adev); | ||
910 | |||
911 | gmc_v8_0_mc_program(adev); | ||
912 | |||
913 | if (!(adev->flags & AMDGPU_IS_APU)) { | ||
914 | r = gmc_v8_0_mc_load_microcode(adev); | ||
915 | if (r) { | ||
916 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
917 | return r; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | r = gmc_v8_0_gart_enable(adev); | ||
922 | if (r) | ||
923 | return r; | ||
924 | |||
925 | return r; | ||
926 | } | ||
927 | |||
928 | static int gmc_v8_0_hw_fini(struct amdgpu_device *adev) | ||
929 | { | ||
930 | gmc_v8_0_gart_disable(adev); | ||
931 | |||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | static int gmc_v8_0_suspend(struct amdgpu_device *adev) | ||
936 | { | ||
937 | int i; | ||
938 | |||
939 | if (adev->vm_manager.enabled) { | ||
940 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
941 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
942 | gmc_v8_0_vm_fini(adev); | ||
943 | adev->vm_manager.enabled = false; | ||
944 | } | ||
945 | gmc_v8_0_hw_fini(adev); | ||
946 | |||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | static int gmc_v8_0_resume(struct amdgpu_device *adev) | ||
951 | { | ||
952 | int r; | ||
953 | |||
954 | r = gmc_v8_0_hw_init(adev); | ||
955 | if (r) | ||
956 | return r; | ||
957 | |||
958 | if (!adev->vm_manager.enabled) { | ||
959 | r = gmc_v8_0_vm_init(adev); | ||
960 | if (r) { | ||
961 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
962 | return r; | ||
963 | } | ||
964 | adev->vm_manager.enabled = true; | ||
965 | } | ||
966 | |||
967 | return r; | ||
968 | } | ||
969 | |||
970 | static bool gmc_v8_0_is_idle(struct amdgpu_device *adev) | ||
971 | { | ||
972 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
973 | |||
974 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
975 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | ||
976 | return false; | ||
977 | |||
978 | return true; | ||
979 | } | ||
980 | |||
981 | static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev) | ||
982 | { | ||
983 | unsigned i; | ||
984 | u32 tmp; | ||
985 | |||
986 | for (i = 0; i < adev->usec_timeout; i++) { | ||
987 | /* read MC_STATUS */ | ||
988 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | | ||
989 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
990 | SRBM_STATUS__MCC_BUSY_MASK | | ||
991 | SRBM_STATUS__MCD_BUSY_MASK | | ||
992 | SRBM_STATUS__VMC_BUSY_MASK | | ||
993 | SRBM_STATUS__VMC1_BUSY_MASK); | ||
994 | if (!tmp) | ||
995 | return 0; | ||
996 | udelay(1); | ||
997 | } | ||
998 | return -ETIMEDOUT; | ||
999 | |||
1000 | } | ||
1001 | |||
1002 | static void gmc_v8_0_print_status(struct amdgpu_device *adev) | ||
1003 | { | ||
1004 | int i, j; | ||
1005 | |||
1006 | dev_info(adev->dev, "GMC 8.x registers\n"); | ||
1007 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
1008 | RREG32(mmSRBM_STATUS)); | ||
1009 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
1010 | RREG32(mmSRBM_STATUS2)); | ||
1011 | |||
1012 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1013 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); | ||
1014 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1015 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); | ||
1016 | dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", | ||
1017 | RREG32(mmMC_VM_MX_L1_TLB_CNTL)); | ||
1018 | dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", | ||
1019 | RREG32(mmVM_L2_CNTL)); | ||
1020 | dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", | ||
1021 | RREG32(mmVM_L2_CNTL2)); | ||
1022 | dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", | ||
1023 | RREG32(mmVM_L2_CNTL3)); | ||
1024 | dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n", | ||
1025 | RREG32(mmVM_L2_CNTL4)); | ||
1026 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1027 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); | ||
1028 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1029 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); | ||
1030 | dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1031 | RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1032 | dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", | ||
1033 | RREG32(mmVM_CONTEXT0_CNTL2)); | ||
1034 | dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", | ||
1035 | RREG32(mmVM_CONTEXT0_CNTL)); | ||
1036 | dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n", | ||
1037 | RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR)); | ||
1038 | dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n", | ||
1039 | RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR)); | ||
1040 | dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n", | ||
1041 | RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET)); | ||
1042 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1043 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); | ||
1044 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1045 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); | ||
1046 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1047 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1048 | dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", | ||
1049 | RREG32(mmVM_CONTEXT1_CNTL2)); | ||
1050 | dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", | ||
1051 | RREG32(mmVM_CONTEXT1_CNTL)); | ||
1052 | for (i = 0; i < 16; i++) { | ||
1053 | if (i < 8) | ||
1054 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1055 | i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); | ||
1056 | else | ||
1057 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1058 | i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); | ||
1059 | } | ||
1060 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", | ||
1061 | RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); | ||
1062 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", | ||
1063 | RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); | ||
1064 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", | ||
1065 | RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); | ||
1066 | dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", | ||
1067 | RREG32(mmMC_VM_FB_LOCATION)); | ||
1068 | dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", | ||
1069 | RREG32(mmMC_VM_AGP_BASE)); | ||
1070 | dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", | ||
1071 | RREG32(mmMC_VM_AGP_TOP)); | ||
1072 | dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", | ||
1073 | RREG32(mmMC_VM_AGP_BOT)); | ||
1074 | |||
1075 | dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", | ||
1076 | RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); | ||
1077 | dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", | ||
1078 | RREG32(mmHDP_NONSURFACE_BASE)); | ||
1079 | dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", | ||
1080 | RREG32(mmHDP_NONSURFACE_INFO)); | ||
1081 | dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", | ||
1082 | RREG32(mmHDP_NONSURFACE_SIZE)); | ||
1083 | dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", | ||
1084 | RREG32(mmHDP_MISC_CNTL)); | ||
1085 | dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", | ||
1086 | RREG32(mmHDP_HOST_PATH_CNTL)); | ||
1087 | |||
1088 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
1089 | dev_info(adev->dev, " %d:\n", i); | ||
1090 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1091 | 0xb05 + j, RREG32(0xb05 + j)); | ||
1092 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1093 | 0xb06 + j, RREG32(0xb06 + j)); | ||
1094 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1095 | 0xb07 + j, RREG32(0xb07 + j)); | ||
1096 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1097 | 0xb08 + j, RREG32(0xb08 + j)); | ||
1098 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1099 | 0xb09 + j, RREG32(0xb09 + j)); | ||
1100 | } | ||
1101 | |||
1102 | dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", | ||
1103 | RREG32(mmBIF_FB_EN)); | ||
1104 | } | ||
1105 | |||
1106 | static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) | ||
1107 | { | ||
1108 | struct amdgpu_mode_mc_save save; | ||
1109 | u32 srbm_soft_reset = 0; | ||
1110 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
1111 | |||
1112 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | ||
1113 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1114 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | ||
1115 | |||
1116 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
1117 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | ||
1118 | if (!(adev->flags & AMDGPU_IS_APU)) | ||
1119 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1120 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | ||
1121 | } | ||
1122 | |||
1123 | if (srbm_soft_reset) { | ||
1124 | gmc_v8_0_print_status(adev); | ||
1125 | |||
1126 | gmc_v8_0_mc_stop(adev, &save); | ||
1127 | if (gmc_v8_0_wait_for_idle(adev)) { | ||
1128 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); | ||
1129 | } | ||
1130 | |||
1131 | |||
1132 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1133 | tmp |= srbm_soft_reset; | ||
1134 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
1135 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1136 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1137 | |||
1138 | udelay(50); | ||
1139 | |||
1140 | tmp &= ~srbm_soft_reset; | ||
1141 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1142 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1143 | |||
1144 | /* Wait a little for things to settle down */ | ||
1145 | udelay(50); | ||
1146 | |||
1147 | gmc_v8_0_mc_resume(adev, &save); | ||
1148 | udelay(50); | ||
1149 | |||
1150 | gmc_v8_0_print_status(adev); | ||
1151 | } | ||
1152 | |||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | ||
1157 | struct amdgpu_irq_src *src, | ||
1158 | unsigned type, | ||
1159 | enum amdgpu_interrupt_state state) | ||
1160 | { | ||
1161 | u32 tmp; | ||
1162 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1163 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1164 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1165 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1166 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1167 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1168 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | ||
1169 | |||
1170 | switch (state) { | ||
1171 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1172 | /* system context */ | ||
1173 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1174 | tmp &= ~bits; | ||
1175 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1176 | /* VMs */ | ||
1177 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1178 | tmp &= ~bits; | ||
1179 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1180 | break; | ||
1181 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1182 | /* system context */ | ||
1183 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1184 | tmp |= bits; | ||
1185 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1186 | /* VMs */ | ||
1187 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1188 | tmp |= bits; | ||
1189 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1190 | break; | ||
1191 | default: | ||
1192 | break; | ||
1193 | } | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | ||
1199 | struct amdgpu_irq_src *source, | ||
1200 | struct amdgpu_iv_entry *entry) | ||
1201 | { | ||
1202 | u32 addr, status, mc_client; | ||
1203 | |||
1204 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
1205 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
1206 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | ||
1207 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | ||
1208 | entry->src_id, entry->src_data); | ||
1209 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1210 | addr); | ||
1211 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1212 | status); | ||
1213 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); | ||
1214 | /* reset addr and status */ | ||
1215 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1216 | |||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static int gmc_v8_0_set_clockgating_state(struct amdgpu_device *adev, | ||
1221 | enum amdgpu_clockgating_state state) | ||
1222 | { | ||
1223 | /* XXX handled via the smc on VI */ | ||
1224 | |||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static int gmc_v8_0_set_powergating_state(struct amdgpu_device *adev, | ||
1229 | enum amdgpu_powergating_state state) | ||
1230 | { | ||
1231 | return 0; | ||
1232 | } | ||
1233 | |||
1234 | const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs = { | ||
1235 | .early_init = gmc_v8_0_early_init, | ||
1236 | .late_init = NULL, | ||
1237 | .sw_init = gmc_v8_0_sw_init, | ||
1238 | .sw_fini = gmc_v8_0_sw_fini, | ||
1239 | .hw_init = gmc_v8_0_hw_init, | ||
1240 | .hw_fini = gmc_v8_0_hw_fini, | ||
1241 | .suspend = gmc_v8_0_suspend, | ||
1242 | .resume = gmc_v8_0_resume, | ||
1243 | .is_idle = gmc_v8_0_is_idle, | ||
1244 | .wait_for_idle = gmc_v8_0_wait_for_idle, | ||
1245 | .soft_reset = gmc_v8_0_soft_reset, | ||
1246 | .print_status = gmc_v8_0_print_status, | ||
1247 | .set_clockgating_state = gmc_v8_0_set_clockgating_state, | ||
1248 | .set_powergating_state = gmc_v8_0_set_powergating_state, | ||
1249 | }; | ||
1250 | |||
1251 | static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { | ||
1252 | .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, | ||
1253 | .set_pte_pde = gmc_v8_0_gart_set_pte_pde, | ||
1254 | }; | ||
1255 | |||
1256 | static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { | ||
1257 | .set = gmc_v8_0_vm_fault_interrupt_state, | ||
1258 | .process = gmc_v8_0_process_interrupt, | ||
1259 | }; | ||
1260 | |||
1261 | static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) | ||
1262 | { | ||
1263 | if (adev->gart.gart_funcs == NULL) | ||
1264 | adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; | ||
1265 | } | ||
1266 | |||
1267 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | ||
1268 | { | ||
1269 | adev->mc.vm_fault.num_types = 1; | ||
1270 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | ||
1271 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h new file mode 100644 index 000000000000..2dd7f809d4e1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __GMC_V8_0_H__ | ||
25 | #define __GMC_V8_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs; | ||
28 | |||
29 | /* XXX these shouldn't be exported */ | ||
30 | void gmc_v8_0_mc_stop(struct amdgpu_device *adev, | ||
31 | struct amdgpu_mode_mc_save *save); | ||
32 | void gmc_v8_0_mc_resume(struct amdgpu_device *adev, | ||
33 | struct amdgpu_mode_mc_save *save); | ||
34 | int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); | ||
35 | |||
36 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c new file mode 100644 index 000000000000..8f5c54be70b0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include "drmP.h" | ||
26 | #include "amdgpu.h" | ||
27 | #include "iceland_smumgr.h" | ||
28 | |||
29 | MODULE_FIRMWARE("radeon/topaz_smc.bin"); | ||
30 | |||
31 | static void iceland_dpm_set_funcs(struct amdgpu_device *adev); | ||
32 | |||
33 | static int iceland_dpm_early_init(struct amdgpu_device *adev) | ||
34 | { | ||
35 | iceland_dpm_set_funcs(adev); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static int iceland_dpm_init_microcode(struct amdgpu_device *adev) | ||
41 | { | ||
42 | char fw_name[30] = "radeon/topaz_smc.bin"; | ||
43 | int err; | ||
44 | |||
45 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | ||
46 | if (err) | ||
47 | goto out; | ||
48 | err = amdgpu_ucode_validate(adev->pm.fw); | ||
49 | |||
50 | out: | ||
51 | if (err) { | ||
52 | DRM_ERROR("Failed to load firmware \"%s\"", fw_name); | ||
53 | release_firmware(adev->pm.fw); | ||
54 | adev->pm.fw = NULL; | ||
55 | } | ||
56 | return err; | ||
57 | } | ||
58 | |||
59 | static int iceland_dpm_sw_init(struct amdgpu_device *adev) | ||
60 | { | ||
61 | int ret; | ||
62 | |||
63 | ret = iceland_dpm_init_microcode(adev); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int iceland_dpm_sw_fini(struct amdgpu_device *adev) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int iceland_dpm_hw_init(struct amdgpu_device *adev) | ||
76 | { | ||
77 | int ret; | ||
78 | |||
79 | mutex_lock(&adev->pm.mutex); | ||
80 | |||
81 | ret = iceland_smu_init(adev); | ||
82 | if (ret) { | ||
83 | DRM_ERROR("SMU initialization failed\n"); | ||
84 | goto fail; | ||
85 | } | ||
86 | |||
87 | ret = iceland_smu_start(adev); | ||
88 | if (ret) { | ||
89 | DRM_ERROR("SMU start failed\n"); | ||
90 | goto fail; | ||
91 | } | ||
92 | |||
93 | mutex_unlock(&adev->pm.mutex); | ||
94 | return 0; | ||
95 | |||
96 | fail: | ||
97 | adev->firmware.smu_load = false; | ||
98 | mutex_unlock(&adev->pm.mutex); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | |||
102 | static int iceland_dpm_hw_fini(struct amdgpu_device *adev) | ||
103 | { | ||
104 | mutex_lock(&adev->pm.mutex); | ||
105 | iceland_smu_fini(adev); | ||
106 | mutex_unlock(&adev->pm.mutex); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int iceland_dpm_suspend(struct amdgpu_device *adev) | ||
111 | { | ||
112 | iceland_dpm_hw_fini(adev); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int iceland_dpm_resume(struct amdgpu_device *adev) | ||
118 | { | ||
119 | iceland_dpm_hw_init(adev); | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int iceland_dpm_set_clockgating_state(struct amdgpu_device *adev, | ||
125 | enum amdgpu_clockgating_state state) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int iceland_dpm_set_powergating_state(struct amdgpu_device *adev, | ||
131 | enum amdgpu_powergating_state state) | ||
132 | { | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | const struct amdgpu_ip_funcs iceland_dpm_ip_funcs = { | ||
137 | .early_init = iceland_dpm_early_init, | ||
138 | .late_init = NULL, | ||
139 | .sw_init = iceland_dpm_sw_init, | ||
140 | .sw_fini = iceland_dpm_sw_fini, | ||
141 | .hw_init = iceland_dpm_hw_init, | ||
142 | .hw_fini = iceland_dpm_hw_fini, | ||
143 | .suspend = iceland_dpm_suspend, | ||
144 | .resume = iceland_dpm_resume, | ||
145 | .is_idle = NULL, | ||
146 | .wait_for_idle = NULL, | ||
147 | .soft_reset = NULL, | ||
148 | .print_status = NULL, | ||
149 | .set_clockgating_state = iceland_dpm_set_clockgating_state, | ||
150 | .set_powergating_state = iceland_dpm_set_powergating_state, | ||
151 | }; | ||
152 | |||
153 | static const struct amdgpu_dpm_funcs iceland_dpm_funcs = { | ||
154 | .get_temperature = NULL, | ||
155 | .pre_set_power_state = NULL, | ||
156 | .set_power_state = NULL, | ||
157 | .post_set_power_state = NULL, | ||
158 | .display_configuration_changed = NULL, | ||
159 | .get_sclk = NULL, | ||
160 | .get_mclk = NULL, | ||
161 | .print_power_state = NULL, | ||
162 | .debugfs_print_current_performance_level = NULL, | ||
163 | .force_performance_level = NULL, | ||
164 | .vblank_too_short = NULL, | ||
165 | .powergate_uvd = NULL, | ||
166 | }; | ||
167 | |||
168 | static void iceland_dpm_set_funcs(struct amdgpu_device *adev) | ||
169 | { | ||
170 | if (NULL == adev->pm.funcs) | ||
171 | adev->pm.funcs = &iceland_dpm_funcs; | ||
172 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c new file mode 100644 index 000000000000..2de8adfac471 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "drmP.h" | ||
24 | #include "amdgpu.h" | ||
25 | #include "amdgpu_ih.h" | ||
26 | #include "vid.h" | ||
27 | |||
28 | #include "oss/oss_2_4_d.h" | ||
29 | #include "oss/oss_2_4_sh_mask.h" | ||
30 | |||
31 | #include "bif/bif_5_1_d.h" | ||
32 | #include "bif/bif_5_1_sh_mask.h" | ||
33 | |||
34 | /* | ||
35 | * Interrupts | ||
36 | * Starting with r6xx, interrupts are handled via a ring buffer. | ||
37 | * Ring buffers are areas of GPU accessible memory that the GPU | ||
38 | * writes interrupt vectors into and the host reads vectors out of. | ||
39 | * There is a rptr (read pointer) that determines where the | ||
40 | * host is currently reading, and a wptr (write pointer) | ||
41 | * which determines where the GPU has written. When the | ||
42 | * pointers are equal, the ring is idle. When the GPU | ||
43 | * writes vectors to the ring buffer, it increments the | ||
44 | * wptr. When there is an interrupt, the host then starts | ||
45 | * fetching commands and processing them until the pointers are | ||
46 | * equal again at which point it updates the rptr. | ||
47 | */ | ||
48 | |||
49 | static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev); | ||
50 | |||
51 | /** | ||
52 | * iceland_ih_enable_interrupts - Enable the interrupt ring buffer | ||
53 | * | ||
54 | * @adev: amdgpu_device pointer | ||
55 | * | ||
56 | * Enable the interrupt ring buffer (VI). | ||
57 | */ | ||
58 | static void iceland_ih_enable_interrupts(struct amdgpu_device *adev) | ||
59 | { | ||
60 | u32 ih_cntl = RREG32(mmIH_CNTL); | ||
61 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
62 | |||
63 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 1); | ||
64 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); | ||
65 | WREG32(mmIH_CNTL, ih_cntl); | ||
66 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
67 | adev->irq.ih.enabled = true; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * iceland_ih_disable_interrupts - Disable the interrupt ring buffer | ||
72 | * | ||
73 | * @adev: amdgpu_device pointer | ||
74 | * | ||
75 | * Disable the interrupt ring buffer (VI). | ||
76 | */ | ||
77 | static void iceland_ih_disable_interrupts(struct amdgpu_device *adev) | ||
78 | { | ||
79 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
80 | u32 ih_cntl = RREG32(mmIH_CNTL); | ||
81 | |||
82 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); | ||
83 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, ENABLE_INTR, 0); | ||
84 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
85 | WREG32(mmIH_CNTL, ih_cntl); | ||
86 | /* set rptr, wptr to 0 */ | ||
87 | WREG32(mmIH_RB_RPTR, 0); | ||
88 | WREG32(mmIH_RB_WPTR, 0); | ||
89 | adev->irq.ih.enabled = false; | ||
90 | adev->irq.ih.rptr = 0; | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * iceland_ih_irq_init - init and enable the interrupt ring | ||
95 | * | ||
96 | * @adev: amdgpu_device pointer | ||
97 | * | ||
98 | * Allocate a ring buffer for the interrupt controller, | ||
99 | * enable the RLC, disable interrupts, enable the IH | ||
100 | * ring buffer and enable it (VI). | ||
101 | * Called at device load and reume. | ||
102 | * Returns 0 for success, errors for failure. | ||
103 | */ | ||
104 | static int iceland_ih_irq_init(struct amdgpu_device *adev) | ||
105 | { | ||
106 | int ret = 0; | ||
107 | int rb_bufsz; | ||
108 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | ||
109 | u64 wptr_off; | ||
110 | |||
111 | /* disable irqs */ | ||
112 | iceland_ih_disable_interrupts(adev); | ||
113 | |||
114 | /* setup interrupt control */ | ||
115 | WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); | ||
116 | interrupt_cntl = RREG32(mmINTERRUPT_CNTL); | ||
117 | /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi | ||
118 | * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN | ||
119 | */ | ||
120 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); | ||
121 | /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ | ||
122 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); | ||
123 | WREG32(mmINTERRUPT_CNTL, interrupt_cntl); | ||
124 | |||
125 | /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ | ||
126 | WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); | ||
127 | |||
128 | rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); | ||
129 | ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1); | ||
130 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
131 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); | ||
132 | |||
133 | /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ | ||
134 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); | ||
135 | |||
136 | /* set the writeback address whether it's enabled or not */ | ||
137 | wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); | ||
138 | WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); | ||
139 | WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); | ||
140 | |||
141 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
142 | |||
143 | /* set rptr, wptr to 0 */ | ||
144 | WREG32(mmIH_RB_RPTR, 0); | ||
145 | WREG32(mmIH_RB_WPTR, 0); | ||
146 | |||
147 | /* Default settings for IH_CNTL (disabled at first) */ | ||
148 | ih_cntl = RREG32(mmIH_CNTL); | ||
149 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, MC_VMID, 0); | ||
150 | |||
151 | if (adev->irq.msi_enabled) | ||
152 | ih_cntl = REG_SET_FIELD(ih_cntl, IH_CNTL, RPTR_REARM, 1); | ||
153 | WREG32(mmIH_CNTL, ih_cntl); | ||
154 | |||
155 | pci_set_master(adev->pdev); | ||
156 | |||
157 | /* enable interrupts */ | ||
158 | iceland_ih_enable_interrupts(adev); | ||
159 | |||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * iceland_ih_irq_disable - disable interrupts | ||
165 | * | ||
166 | * @adev: amdgpu_device pointer | ||
167 | * | ||
168 | * Disable interrupts on the hw (VI). | ||
169 | */ | ||
170 | static void iceland_ih_irq_disable(struct amdgpu_device *adev) | ||
171 | { | ||
172 | iceland_ih_disable_interrupts(adev); | ||
173 | |||
174 | /* Wait and acknowledge irq */ | ||
175 | mdelay(1); | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * iceland_ih_get_wptr - get the IH ring buffer wptr | ||
180 | * | ||
181 | * @adev: amdgpu_device pointer | ||
182 | * | ||
183 | * Get the IH ring buffer wptr from either the register | ||
184 | * or the writeback memory buffer (VI). Also check for | ||
185 | * ring buffer overflow and deal with it. | ||
186 | * Used by cz_irq_process(VI). | ||
187 | * Returns the value of the wptr. | ||
188 | */ | ||
189 | static u32 iceland_ih_get_wptr(struct amdgpu_device *adev) | ||
190 | { | ||
191 | u32 wptr, tmp; | ||
192 | |||
193 | wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); | ||
194 | |||
195 | if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { | ||
196 | wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); | ||
197 | /* When a ring buffer overflow happen start parsing interrupt | ||
198 | * from the last not overwritten vector (wptr + 16). Hopefully | ||
199 | * this should allow us to catchup. | ||
200 | */ | ||
201 | dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", | ||
202 | wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); | ||
203 | adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; | ||
204 | tmp = RREG32(mmIH_RB_CNTL); | ||
205 | tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
206 | WREG32(mmIH_RB_CNTL, tmp); | ||
207 | } | ||
208 | return (wptr & adev->irq.ih.ptr_mask); | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * iceland_ih_decode_iv - decode an interrupt vector | ||
213 | * | ||
214 | * @adev: amdgpu_device pointer | ||
215 | * | ||
216 | * Decodes the interrupt vector at the current rptr | ||
217 | * position and also advance the position. | ||
218 | */ | ||
219 | static void iceland_ih_decode_iv(struct amdgpu_device *adev, | ||
220 | struct amdgpu_iv_entry *entry) | ||
221 | { | ||
222 | /* wptr/rptr are in bytes! */ | ||
223 | u32 ring_index = adev->irq.ih.rptr >> 2; | ||
224 | uint32_t dw[4]; | ||
225 | |||
226 | dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); | ||
227 | dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); | ||
228 | dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); | ||
229 | dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); | ||
230 | |||
231 | entry->src_id = dw[0] & 0xff; | ||
232 | entry->src_data = dw[1] & 0xfffffff; | ||
233 | entry->ring_id = dw[2] & 0xff; | ||
234 | entry->vm_id = (dw[2] >> 8) & 0xff; | ||
235 | entry->pas_id = (dw[2] >> 16) & 0xffff; | ||
236 | |||
237 | /* wptr/rptr are in bytes! */ | ||
238 | adev->irq.ih.rptr += 16; | ||
239 | } | ||
240 | |||
241 | /** | ||
242 | * iceland_ih_set_rptr - set the IH ring buffer rptr | ||
243 | * | ||
244 | * @adev: amdgpu_device pointer | ||
245 | * | ||
246 | * Set the IH ring buffer rptr. | ||
247 | */ | ||
248 | static void iceland_ih_set_rptr(struct amdgpu_device *adev) | ||
249 | { | ||
250 | WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); | ||
251 | } | ||
252 | |||
253 | static int iceland_ih_early_init(struct amdgpu_device *adev) | ||
254 | { | ||
255 | iceland_ih_set_interrupt_funcs(adev); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int iceland_ih_sw_init(struct amdgpu_device *adev) | ||
260 | { | ||
261 | int r; | ||
262 | |||
263 | r = amdgpu_ih_ring_init(adev, 64 * 1024, false); | ||
264 | if (r) | ||
265 | return r; | ||
266 | |||
267 | r = amdgpu_irq_init(adev); | ||
268 | |||
269 | return r; | ||
270 | } | ||
271 | |||
272 | static int iceland_ih_sw_fini(struct amdgpu_device *adev) | ||
273 | { | ||
274 | amdgpu_irq_fini(adev); | ||
275 | amdgpu_ih_ring_fini(adev); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static int iceland_ih_hw_init(struct amdgpu_device *adev) | ||
281 | { | ||
282 | int r; | ||
283 | |||
284 | r = iceland_ih_irq_init(adev); | ||
285 | if (r) | ||
286 | return r; | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static int iceland_ih_hw_fini(struct amdgpu_device *adev) | ||
292 | { | ||
293 | iceland_ih_irq_disable(adev); | ||
294 | |||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int iceland_ih_suspend(struct amdgpu_device *adev) | ||
299 | { | ||
300 | return iceland_ih_hw_fini(adev); | ||
301 | } | ||
302 | |||
303 | static int iceland_ih_resume(struct amdgpu_device *adev) | ||
304 | { | ||
305 | return iceland_ih_hw_init(adev); | ||
306 | } | ||
307 | |||
308 | static bool iceland_ih_is_idle(struct amdgpu_device *adev) | ||
309 | { | ||
310 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
311 | |||
312 | if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
313 | return false; | ||
314 | |||
315 | return true; | ||
316 | } | ||
317 | |||
318 | static int iceland_ih_wait_for_idle(struct amdgpu_device *adev) | ||
319 | { | ||
320 | unsigned i; | ||
321 | u32 tmp; | ||
322 | |||
323 | for (i = 0; i < adev->usec_timeout; i++) { | ||
324 | /* read MC_STATUS */ | ||
325 | tmp = RREG32(mmSRBM_STATUS); | ||
326 | if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
327 | return 0; | ||
328 | udelay(1); | ||
329 | } | ||
330 | return -ETIMEDOUT; | ||
331 | } | ||
332 | |||
333 | static void iceland_ih_print_status(struct amdgpu_device *adev) | ||
334 | { | ||
335 | dev_info(adev->dev, "ICELAND IH registers\n"); | ||
336 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
337 | RREG32(mmSRBM_STATUS)); | ||
338 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
339 | RREG32(mmSRBM_STATUS2)); | ||
340 | dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", | ||
341 | RREG32(mmINTERRUPT_CNTL)); | ||
342 | dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", | ||
343 | RREG32(mmINTERRUPT_CNTL2)); | ||
344 | dev_info(adev->dev, " IH_CNTL=0x%08X\n", | ||
345 | RREG32(mmIH_CNTL)); | ||
346 | dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", | ||
347 | RREG32(mmIH_RB_CNTL)); | ||
348 | dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", | ||
349 | RREG32(mmIH_RB_BASE)); | ||
350 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", | ||
351 | RREG32(mmIH_RB_WPTR_ADDR_LO)); | ||
352 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", | ||
353 | RREG32(mmIH_RB_WPTR_ADDR_HI)); | ||
354 | dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", | ||
355 | RREG32(mmIH_RB_RPTR)); | ||
356 | dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", | ||
357 | RREG32(mmIH_RB_WPTR)); | ||
358 | } | ||
359 | |||
360 | static int iceland_ih_soft_reset(struct amdgpu_device *adev) | ||
361 | { | ||
362 | u32 srbm_soft_reset = 0; | ||
363 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
364 | |||
365 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) | ||
366 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, | ||
367 | SOFT_RESET_IH, 1); | ||
368 | |||
369 | if (srbm_soft_reset) { | ||
370 | iceland_ih_print_status(adev); | ||
371 | |||
372 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
373 | tmp |= srbm_soft_reset; | ||
374 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
375 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
376 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
377 | |||
378 | udelay(50); | ||
379 | |||
380 | tmp &= ~srbm_soft_reset; | ||
381 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
382 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
383 | |||
384 | /* Wait a little for things to settle down */ | ||
385 | udelay(50); | ||
386 | |||
387 | iceland_ih_print_status(adev); | ||
388 | } | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int iceland_ih_set_clockgating_state(struct amdgpu_device *adev, | ||
394 | enum amdgpu_clockgating_state state) | ||
395 | { | ||
396 | // TODO | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int iceland_ih_set_powergating_state(struct amdgpu_device *adev, | ||
401 | enum amdgpu_powergating_state state) | ||
402 | { | ||
403 | // TODO | ||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | const struct amdgpu_ip_funcs iceland_ih_ip_funcs = { | ||
408 | .early_init = iceland_ih_early_init, | ||
409 | .late_init = NULL, | ||
410 | .sw_init = iceland_ih_sw_init, | ||
411 | .sw_fini = iceland_ih_sw_fini, | ||
412 | .hw_init = iceland_ih_hw_init, | ||
413 | .hw_fini = iceland_ih_hw_fini, | ||
414 | .suspend = iceland_ih_suspend, | ||
415 | .resume = iceland_ih_resume, | ||
416 | .is_idle = iceland_ih_is_idle, | ||
417 | .wait_for_idle = iceland_ih_wait_for_idle, | ||
418 | .soft_reset = iceland_ih_soft_reset, | ||
419 | .print_status = iceland_ih_print_status, | ||
420 | .set_clockgating_state = iceland_ih_set_clockgating_state, | ||
421 | .set_powergating_state = iceland_ih_set_powergating_state, | ||
422 | }; | ||
423 | |||
424 | static const struct amdgpu_ih_funcs iceland_ih_funcs = { | ||
425 | .get_wptr = iceland_ih_get_wptr, | ||
426 | .decode_iv = iceland_ih_decode_iv, | ||
427 | .set_rptr = iceland_ih_set_rptr | ||
428 | }; | ||
429 | |||
430 | static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev) | ||
431 | { | ||
432 | if (adev->irq.ih_funcs == NULL) | ||
433 | adev->irq.ih_funcs = &iceland_ih_funcs; | ||
434 | } | ||
435 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.h b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h new file mode 100644 index 000000000000..d001895eb93b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __ICELAND_IH_H__ | ||
25 | #define __ICELAND_IH_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs iceland_ih_ip_funcs; | ||
28 | |||
29 | #endif /* __ICELAND_IH_H__ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h new file mode 100644 index 000000000000..c723602c7b0c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_sdma_pkt_open.h | |||
@@ -0,0 +1,2167 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included | ||
12 | * in all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
15 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN | ||
18 | * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
19 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __ICELAND_SDMA_PKT_OPEN_H_ | ||
24 | #define __ICELAND_SDMA_PKT_OPEN_H_ | ||
25 | |||
26 | #define SDMA_OP_NOP 0 | ||
27 | #define SDMA_OP_COPY 1 | ||
28 | #define SDMA_OP_WRITE 2 | ||
29 | #define SDMA_OP_INDIRECT 4 | ||
30 | #define SDMA_OP_FENCE 5 | ||
31 | #define SDMA_OP_TRAP 6 | ||
32 | #define SDMA_OP_SEM 7 | ||
33 | #define SDMA_OP_POLL_REGMEM 8 | ||
34 | #define SDMA_OP_COND_EXE 9 | ||
35 | #define SDMA_OP_ATOMIC 10 | ||
36 | #define SDMA_OP_CONST_FILL 11 | ||
37 | #define SDMA_OP_GEN_PTEPDE 12 | ||
38 | #define SDMA_OP_TIMESTAMP 13 | ||
39 | #define SDMA_OP_SRBM_WRITE 14 | ||
40 | #define SDMA_OP_PRE_EXE 15 | ||
41 | #define SDMA_SUBOP_TIMESTAMP_SET 0 | ||
42 | #define SDMA_SUBOP_TIMESTAMP_GET 1 | ||
43 | #define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL 2 | ||
44 | #define SDMA_SUBOP_COPY_LINEAR 0 | ||
45 | #define SDMA_SUBOP_COPY_LINEAR_SUB_WIND 4 | ||
46 | #define SDMA_SUBOP_COPY_TILED 1 | ||
47 | #define SDMA_SUBOP_COPY_TILED_SUB_WIND 5 | ||
48 | #define SDMA_SUBOP_COPY_T2T_SUB_WIND 6 | ||
49 | #define SDMA_SUBOP_COPY_SOA 3 | ||
50 | #define SDMA_SUBOP_WRITE_LINEAR 0 | ||
51 | #define SDMA_SUBOP_WRITE_TILED 1 | ||
52 | |||
53 | /*define for op field*/ | ||
54 | #define SDMA_PKT_HEADER_op_offset 0 | ||
55 | #define SDMA_PKT_HEADER_op_mask 0x000000FF | ||
56 | #define SDMA_PKT_HEADER_op_shift 0 | ||
57 | #define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift) | ||
58 | |||
59 | /*define for sub_op field*/ | ||
60 | #define SDMA_PKT_HEADER_sub_op_offset 0 | ||
61 | #define SDMA_PKT_HEADER_sub_op_mask 0x000000FF | ||
62 | #define SDMA_PKT_HEADER_sub_op_shift 8 | ||
63 | #define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift) | ||
64 | |||
65 | /* | ||
66 | ** Definitions for SDMA_PKT_COPY_LINEAR packet | ||
67 | */ | ||
68 | |||
69 | /*define for HEADER word*/ | ||
70 | /*define for op field*/ | ||
71 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0 | ||
72 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_mask 0x000000FF | ||
73 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_shift 0 | ||
74 | #define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift) | ||
75 | |||
76 | /*define for sub_op field*/ | ||
77 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0 | ||
78 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask 0x000000FF | ||
79 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift 8 | ||
80 | #define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift) | ||
81 | |||
82 | /*define for broadcast field*/ | ||
83 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0 | ||
84 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask 0x00000001 | ||
85 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift 27 | ||
86 | #define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift) | ||
87 | |||
88 | /*define for COUNT word*/ | ||
89 | /*define for count field*/ | ||
90 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1 | ||
91 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_mask 0x003FFFFF | ||
92 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_shift 0 | ||
93 | #define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift) | ||
94 | |||
95 | /*define for PARAMETER word*/ | ||
96 | /*define for dst_sw field*/ | ||
97 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2 | ||
98 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask 0x00000003 | ||
99 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift 16 | ||
100 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift) | ||
101 | |||
102 | /*define for dst_ha field*/ | ||
103 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2 | ||
104 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask 0x00000001 | ||
105 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift 22 | ||
106 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift) | ||
107 | |||
108 | /*define for src_sw field*/ | ||
109 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2 | ||
110 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask 0x00000003 | ||
111 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift 24 | ||
112 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift) | ||
113 | |||
114 | /*define for src_ha field*/ | ||
115 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2 | ||
116 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask 0x00000001 | ||
117 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift 30 | ||
118 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift) | ||
119 | |||
120 | /*define for SRC_ADDR_LO word*/ | ||
121 | /*define for src_addr_31_0 field*/ | ||
122 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 | ||
123 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
124 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
125 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) | ||
126 | |||
127 | /*define for SRC_ADDR_HI word*/ | ||
128 | /*define for src_addr_63_32 field*/ | ||
129 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 | ||
130 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
131 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
132 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) | ||
133 | |||
134 | /*define for DST_ADDR_LO word*/ | ||
135 | /*define for dst_addr_31_0 field*/ | ||
136 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5 | ||
137 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
138 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
139 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift) | ||
140 | |||
141 | /*define for DST_ADDR_HI word*/ | ||
142 | /*define for dst_addr_63_32 field*/ | ||
143 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6 | ||
144 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
145 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
146 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift) | ||
147 | |||
148 | |||
149 | /* | ||
150 | ** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet | ||
151 | */ | ||
152 | |||
153 | /*define for HEADER word*/ | ||
154 | /*define for op field*/ | ||
155 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0 | ||
156 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask 0x000000FF | ||
157 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift 0 | ||
158 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift) | ||
159 | |||
160 | /*define for sub_op field*/ | ||
161 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0 | ||
162 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask 0x000000FF | ||
163 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift 8 | ||
164 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift) | ||
165 | |||
166 | /*define for broadcast field*/ | ||
167 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0 | ||
168 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask 0x00000001 | ||
169 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift 27 | ||
170 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift) | ||
171 | |||
172 | /*define for COUNT word*/ | ||
173 | /*define for count field*/ | ||
174 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1 | ||
175 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask 0x003FFFFF | ||
176 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift 0 | ||
177 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift) | ||
178 | |||
179 | /*define for PARAMETER word*/ | ||
180 | /*define for dst2_sw field*/ | ||
181 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2 | ||
182 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask 0x00000003 | ||
183 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift 8 | ||
184 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift) | ||
185 | |||
186 | /*define for dst2_ha field*/ | ||
187 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2 | ||
188 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask 0x00000001 | ||
189 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift 14 | ||
190 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift) | ||
191 | |||
192 | /*define for dst1_sw field*/ | ||
193 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2 | ||
194 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask 0x00000003 | ||
195 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift 16 | ||
196 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift) | ||
197 | |||
198 | /*define for dst1_ha field*/ | ||
199 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2 | ||
200 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask 0x00000001 | ||
201 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift 22 | ||
202 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift) | ||
203 | |||
204 | /*define for src_sw field*/ | ||
205 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2 | ||
206 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask 0x00000003 | ||
207 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift 24 | ||
208 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift) | ||
209 | |||
210 | /*define for src_ha field*/ | ||
211 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2 | ||
212 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask 0x00000001 | ||
213 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift 30 | ||
214 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift) | ||
215 | |||
216 | /*define for SRC_ADDR_LO word*/ | ||
217 | /*define for src_addr_31_0 field*/ | ||
218 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 | ||
219 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
220 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
221 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) | ||
222 | |||
223 | /*define for SRC_ADDR_HI word*/ | ||
224 | /*define for src_addr_63_32 field*/ | ||
225 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 | ||
226 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
227 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
228 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) | ||
229 | |||
230 | /*define for DST1_ADDR_LO word*/ | ||
231 | /*define for dst1_addr_31_0 field*/ | ||
232 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5 | ||
233 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask 0xFFFFFFFF | ||
234 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift 0 | ||
235 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift) | ||
236 | |||
237 | /*define for DST1_ADDR_HI word*/ | ||
238 | /*define for dst1_addr_63_32 field*/ | ||
239 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6 | ||
240 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask 0xFFFFFFFF | ||
241 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift 0 | ||
242 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift) | ||
243 | |||
244 | /*define for DST2_ADDR_LO word*/ | ||
245 | /*define for dst2_addr_31_0 field*/ | ||
246 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7 | ||
247 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask 0xFFFFFFFF | ||
248 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift 0 | ||
249 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift) | ||
250 | |||
251 | /*define for DST2_ADDR_HI word*/ | ||
252 | /*define for dst2_addr_63_32 field*/ | ||
253 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8 | ||
254 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask 0xFFFFFFFF | ||
255 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift 0 | ||
256 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift) | ||
257 | |||
258 | |||
259 | /* | ||
260 | ** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet | ||
261 | */ | ||
262 | |||
263 | /*define for HEADER word*/ | ||
264 | /*define for op field*/ | ||
265 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0 | ||
266 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask 0x000000FF | ||
267 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift 0 | ||
268 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift) | ||
269 | |||
270 | /*define for sub_op field*/ | ||
271 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0 | ||
272 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask 0x000000FF | ||
273 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift 8 | ||
274 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift) | ||
275 | |||
276 | /*define for elementsize field*/ | ||
277 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0 | ||
278 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask 0x00000007 | ||
279 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift 29 | ||
280 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift) | ||
281 | |||
282 | /*define for SRC_ADDR_LO word*/ | ||
283 | /*define for src_addr_31_0 field*/ | ||
284 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1 | ||
285 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
286 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
287 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift) | ||
288 | |||
289 | /*define for SRC_ADDR_HI word*/ | ||
290 | /*define for src_addr_63_32 field*/ | ||
291 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2 | ||
292 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
293 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
294 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift) | ||
295 | |||
296 | /*define for DW_3 word*/ | ||
297 | /*define for src_x field*/ | ||
298 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3 | ||
299 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask 0x00003FFF | ||
300 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift 0 | ||
301 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift) | ||
302 | |||
303 | /*define for src_y field*/ | ||
304 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3 | ||
305 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask 0x00003FFF | ||
306 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift 16 | ||
307 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift) | ||
308 | |||
309 | /*define for DW_4 word*/ | ||
310 | /*define for src_z field*/ | ||
311 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4 | ||
312 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask 0x000007FF | ||
313 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift 0 | ||
314 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift) | ||
315 | |||
316 | /*define for src_pitch field*/ | ||
317 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4 | ||
318 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask 0x00003FFF | ||
319 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift 16 | ||
320 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift) | ||
321 | |||
322 | /*define for DW_5 word*/ | ||
323 | /*define for src_slice_pitch field*/ | ||
324 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5 | ||
325 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask 0x0FFFFFFF | ||
326 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift 0 | ||
327 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift) | ||
328 | |||
329 | /*define for DST_ADDR_LO word*/ | ||
330 | /*define for dst_addr_31_0 field*/ | ||
331 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6 | ||
332 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
333 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
334 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift) | ||
335 | |||
336 | /*define for DST_ADDR_HI word*/ | ||
337 | /*define for dst_addr_63_32 field*/ | ||
338 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7 | ||
339 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
340 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
341 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift) | ||
342 | |||
343 | /*define for DW_8 word*/ | ||
344 | /*define for dst_x field*/ | ||
345 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8 | ||
346 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask 0x00003FFF | ||
347 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift 0 | ||
348 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift) | ||
349 | |||
350 | /*define for dst_y field*/ | ||
351 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8 | ||
352 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask 0x00003FFF | ||
353 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift 16 | ||
354 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift) | ||
355 | |||
356 | /*define for DW_9 word*/ | ||
357 | /*define for dst_z field*/ | ||
358 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9 | ||
359 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask 0x000007FF | ||
360 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift 0 | ||
361 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift) | ||
362 | |||
363 | /*define for dst_pitch field*/ | ||
364 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9 | ||
365 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask 0x00003FFF | ||
366 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift 16 | ||
367 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift) | ||
368 | |||
369 | /*define for DW_10 word*/ | ||
370 | /*define for dst_slice_pitch field*/ | ||
371 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10 | ||
372 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask 0x0FFFFFFF | ||
373 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift 0 | ||
374 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift) | ||
375 | |||
376 | /*define for DW_11 word*/ | ||
377 | /*define for rect_x field*/ | ||
378 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11 | ||
379 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask 0x00003FFF | ||
380 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift 0 | ||
381 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift) | ||
382 | |||
383 | /*define for rect_y field*/ | ||
384 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11 | ||
385 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask 0x00003FFF | ||
386 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift 16 | ||
387 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift) | ||
388 | |||
389 | /*define for DW_12 word*/ | ||
390 | /*define for rect_z field*/ | ||
391 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12 | ||
392 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask 0x000007FF | ||
393 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift 0 | ||
394 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift) | ||
395 | |||
396 | /*define for dst_sw field*/ | ||
397 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12 | ||
398 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask 0x00000003 | ||
399 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift 16 | ||
400 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift) | ||
401 | |||
402 | /*define for dst_ha field*/ | ||
403 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12 | ||
404 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask 0x00000001 | ||
405 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift 22 | ||
406 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift) | ||
407 | |||
408 | /*define for src_sw field*/ | ||
409 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12 | ||
410 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask 0x00000003 | ||
411 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift 24 | ||
412 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift) | ||
413 | |||
414 | /*define for src_ha field*/ | ||
415 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12 | ||
416 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask 0x00000001 | ||
417 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift 30 | ||
418 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift) | ||
419 | |||
420 | |||
421 | /* | ||
422 | ** Definitions for SDMA_PKT_COPY_TILED packet | ||
423 | */ | ||
424 | |||
425 | /*define for HEADER word*/ | ||
426 | /*define for op field*/ | ||
427 | #define SDMA_PKT_COPY_TILED_HEADER_op_offset 0 | ||
428 | #define SDMA_PKT_COPY_TILED_HEADER_op_mask 0x000000FF | ||
429 | #define SDMA_PKT_COPY_TILED_HEADER_op_shift 0 | ||
430 | #define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift) | ||
431 | |||
432 | /*define for sub_op field*/ | ||
433 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0 | ||
434 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask 0x000000FF | ||
435 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift 8 | ||
436 | #define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift) | ||
437 | |||
438 | /*define for detile field*/ | ||
439 | #define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0 | ||
440 | #define SDMA_PKT_COPY_TILED_HEADER_detile_mask 0x00000001 | ||
441 | #define SDMA_PKT_COPY_TILED_HEADER_detile_shift 31 | ||
442 | #define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift) | ||
443 | |||
444 | /*define for TILED_ADDR_LO word*/ | ||
445 | /*define for tiled_addr_31_0 field*/ | ||
446 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1 | ||
447 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF | ||
448 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift 0 | ||
449 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift) | ||
450 | |||
451 | /*define for TILED_ADDR_HI word*/ | ||
452 | /*define for tiled_addr_63_32 field*/ | ||
453 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2 | ||
454 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF | ||
455 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift 0 | ||
456 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift) | ||
457 | |||
458 | /*define for DW_3 word*/ | ||
459 | /*define for pitch_in_tile field*/ | ||
460 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3 | ||
461 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask 0x000007FF | ||
462 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift 0 | ||
463 | #define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift) | ||
464 | |||
465 | /*define for height field*/ | ||
466 | #define SDMA_PKT_COPY_TILED_DW_3_height_offset 3 | ||
467 | #define SDMA_PKT_COPY_TILED_DW_3_height_mask 0x00003FFF | ||
468 | #define SDMA_PKT_COPY_TILED_DW_3_height_shift 16 | ||
469 | #define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift) | ||
470 | |||
471 | /*define for DW_4 word*/ | ||
472 | /*define for slice_pitch field*/ | ||
473 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4 | ||
474 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask 0x003FFFFF | ||
475 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift 0 | ||
476 | #define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift) | ||
477 | |||
478 | /*define for DW_5 word*/ | ||
479 | /*define for element_size field*/ | ||
480 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5 | ||
481 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_mask 0x00000007 | ||
482 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_shift 0 | ||
483 | #define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift) | ||
484 | |||
485 | /*define for array_mode field*/ | ||
486 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5 | ||
487 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask 0x0000000F | ||
488 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift 3 | ||
489 | #define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift) | ||
490 | |||
491 | /*define for mit_mode field*/ | ||
492 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5 | ||
493 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask 0x00000007 | ||
494 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift 8 | ||
495 | #define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift) | ||
496 | |||
497 | /*define for tilesplit_size field*/ | ||
498 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5 | ||
499 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask 0x00000007 | ||
500 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift 11 | ||
501 | #define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift) | ||
502 | |||
503 | /*define for bank_w field*/ | ||
504 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5 | ||
505 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask 0x00000003 | ||
506 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift 15 | ||
507 | #define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift) | ||
508 | |||
509 | /*define for bank_h field*/ | ||
510 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5 | ||
511 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask 0x00000003 | ||
512 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift 18 | ||
513 | #define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift) | ||
514 | |||
515 | /*define for num_bank field*/ | ||
516 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5 | ||
517 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask 0x00000003 | ||
518 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift 21 | ||
519 | #define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift) | ||
520 | |||
521 | /*define for mat_aspt field*/ | ||
522 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5 | ||
523 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask 0x00000003 | ||
524 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift 24 | ||
525 | #define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift) | ||
526 | |||
527 | /*define for pipe_config field*/ | ||
528 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5 | ||
529 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask 0x0000001F | ||
530 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift 26 | ||
531 | #define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift) | ||
532 | |||
533 | /*define for DW_6 word*/ | ||
534 | /*define for x field*/ | ||
535 | #define SDMA_PKT_COPY_TILED_DW_6_x_offset 6 | ||
536 | #define SDMA_PKT_COPY_TILED_DW_6_x_mask 0x00003FFF | ||
537 | #define SDMA_PKT_COPY_TILED_DW_6_x_shift 0 | ||
538 | #define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift) | ||
539 | |||
540 | /*define for y field*/ | ||
541 | #define SDMA_PKT_COPY_TILED_DW_6_y_offset 6 | ||
542 | #define SDMA_PKT_COPY_TILED_DW_6_y_mask 0x00003FFF | ||
543 | #define SDMA_PKT_COPY_TILED_DW_6_y_shift 16 | ||
544 | #define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift) | ||
545 | |||
546 | /*define for DW_7 word*/ | ||
547 | /*define for z field*/ | ||
548 | #define SDMA_PKT_COPY_TILED_DW_7_z_offset 7 | ||
549 | #define SDMA_PKT_COPY_TILED_DW_7_z_mask 0x00000FFF | ||
550 | #define SDMA_PKT_COPY_TILED_DW_7_z_shift 0 | ||
551 | #define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift) | ||
552 | |||
553 | /*define for linear_sw field*/ | ||
554 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7 | ||
555 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask 0x00000003 | ||
556 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift 16 | ||
557 | #define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift) | ||
558 | |||
559 | /*define for tile_sw field*/ | ||
560 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7 | ||
561 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask 0x00000003 | ||
562 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift 24 | ||
563 | #define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift) | ||
564 | |||
565 | /*define for LINEAR_ADDR_LO word*/ | ||
566 | /*define for linear_addr_31_0 field*/ | ||
567 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8 | ||
568 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
569 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
570 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
571 | |||
572 | /*define for LINEAR_ADDR_HI word*/ | ||
573 | /*define for linear_addr_63_32 field*/ | ||
574 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9 | ||
575 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
576 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
577 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
578 | |||
579 | /*define for LINEAR_PITCH word*/ | ||
580 | /*define for linear_pitch field*/ | ||
581 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10 | ||
582 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF | ||
583 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift 0 | ||
584 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift) | ||
585 | |||
586 | /*define for COUNT word*/ | ||
587 | /*define for count field*/ | ||
588 | #define SDMA_PKT_COPY_TILED_COUNT_count_offset 11 | ||
589 | #define SDMA_PKT_COPY_TILED_COUNT_count_mask 0x000FFFFF | ||
590 | #define SDMA_PKT_COPY_TILED_COUNT_count_shift 0 | ||
591 | #define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift) | ||
592 | |||
593 | |||
594 | /* | ||
595 | ** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet | ||
596 | */ | ||
597 | |||
598 | /*define for HEADER word*/ | ||
599 | /*define for op field*/ | ||
600 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0 | ||
601 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask 0x000000FF | ||
602 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift 0 | ||
603 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift) | ||
604 | |||
605 | /*define for sub_op field*/ | ||
606 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0 | ||
607 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask 0x000000FF | ||
608 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift 8 | ||
609 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift) | ||
610 | |||
611 | /*define for videocopy field*/ | ||
612 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0 | ||
613 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask 0x00000001 | ||
614 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift 26 | ||
615 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift) | ||
616 | |||
617 | /*define for broadcast field*/ | ||
618 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0 | ||
619 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask 0x00000001 | ||
620 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift 27 | ||
621 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift) | ||
622 | |||
623 | /*define for TILED_ADDR_LO_0 word*/ | ||
624 | /*define for tiled_addr0_31_0 field*/ | ||
625 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1 | ||
626 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask 0xFFFFFFFF | ||
627 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift 0 | ||
628 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift) | ||
629 | |||
630 | /*define for TILED_ADDR_HI_0 word*/ | ||
631 | /*define for tiled_addr0_63_32 field*/ | ||
632 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2 | ||
633 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask 0xFFFFFFFF | ||
634 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift 0 | ||
635 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift) | ||
636 | |||
637 | /*define for TILED_ADDR_LO_1 word*/ | ||
638 | /*define for tiled_addr1_31_0 field*/ | ||
639 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3 | ||
640 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask 0xFFFFFFFF | ||
641 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift 0 | ||
642 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift) | ||
643 | |||
644 | /*define for TILED_ADDR_HI_1 word*/ | ||
645 | /*define for tiled_addr1_63_32 field*/ | ||
646 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4 | ||
647 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask 0xFFFFFFFF | ||
648 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift 0 | ||
649 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift) | ||
650 | |||
651 | /*define for DW_5 word*/ | ||
652 | /*define for pitch_in_tile field*/ | ||
653 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5 | ||
654 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask 0x000007FF | ||
655 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift 0 | ||
656 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift) | ||
657 | |||
658 | /*define for height field*/ | ||
659 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5 | ||
660 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask 0x00003FFF | ||
661 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift 16 | ||
662 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift) | ||
663 | |||
664 | /*define for DW_6 word*/ | ||
665 | /*define for slice_pitch field*/ | ||
666 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6 | ||
667 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask 0x003FFFFF | ||
668 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift 0 | ||
669 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift) | ||
670 | |||
671 | /*define for DW_7 word*/ | ||
672 | /*define for element_size field*/ | ||
673 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7 | ||
674 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask 0x00000007 | ||
675 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift 0 | ||
676 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift) | ||
677 | |||
678 | /*define for array_mode field*/ | ||
679 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7 | ||
680 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask 0x0000000F | ||
681 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift 3 | ||
682 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift) | ||
683 | |||
684 | /*define for mit_mode field*/ | ||
685 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7 | ||
686 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask 0x00000007 | ||
687 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift 8 | ||
688 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift) | ||
689 | |||
690 | /*define for tilesplit_size field*/ | ||
691 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7 | ||
692 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask 0x00000007 | ||
693 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift 11 | ||
694 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift) | ||
695 | |||
696 | /*define for bank_w field*/ | ||
697 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7 | ||
698 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask 0x00000003 | ||
699 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift 15 | ||
700 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift) | ||
701 | |||
702 | /*define for bank_h field*/ | ||
703 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7 | ||
704 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask 0x00000003 | ||
705 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift 18 | ||
706 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift) | ||
707 | |||
708 | /*define for num_bank field*/ | ||
709 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7 | ||
710 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask 0x00000003 | ||
711 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift 21 | ||
712 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift) | ||
713 | |||
714 | /*define for mat_aspt field*/ | ||
715 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7 | ||
716 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask 0x00000003 | ||
717 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift 24 | ||
718 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift) | ||
719 | |||
720 | /*define for pipe_config field*/ | ||
721 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7 | ||
722 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask 0x0000001F | ||
723 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift 26 | ||
724 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift) | ||
725 | |||
726 | /*define for DW_8 word*/ | ||
727 | /*define for x field*/ | ||
728 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8 | ||
729 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask 0x00003FFF | ||
730 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift 0 | ||
731 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift) | ||
732 | |||
733 | /*define for y field*/ | ||
734 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8 | ||
735 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask 0x00003FFF | ||
736 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift 16 | ||
737 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift) | ||
738 | |||
739 | /*define for DW_9 word*/ | ||
740 | /*define for z field*/ | ||
741 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9 | ||
742 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask 0x00000FFF | ||
743 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift 0 | ||
744 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift) | ||
745 | |||
746 | /*define for DW_10 word*/ | ||
747 | /*define for dst2_sw field*/ | ||
748 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10 | ||
749 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask 0x00000003 | ||
750 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift 8 | ||
751 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift) | ||
752 | |||
753 | /*define for dst2_ha field*/ | ||
754 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10 | ||
755 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask 0x00000001 | ||
756 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift 14 | ||
757 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift) | ||
758 | |||
759 | /*define for linear_sw field*/ | ||
760 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10 | ||
761 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask 0x00000003 | ||
762 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift 16 | ||
763 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift) | ||
764 | |||
765 | /*define for tile_sw field*/ | ||
766 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10 | ||
767 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask 0x00000003 | ||
768 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift 24 | ||
769 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift) | ||
770 | |||
771 | /*define for LINEAR_ADDR_LO word*/ | ||
772 | /*define for linear_addr_31_0 field*/ | ||
773 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11 | ||
774 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
775 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
776 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
777 | |||
778 | /*define for LINEAR_ADDR_HI word*/ | ||
779 | /*define for linear_addr_63_32 field*/ | ||
780 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12 | ||
781 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
782 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
783 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
784 | |||
785 | /*define for LINEAR_PITCH word*/ | ||
786 | /*define for linear_pitch field*/ | ||
787 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13 | ||
788 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF | ||
789 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift 0 | ||
790 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift) | ||
791 | |||
792 | /*define for COUNT word*/ | ||
793 | /*define for count field*/ | ||
794 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14 | ||
795 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask 0x000FFFFF | ||
796 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift 0 | ||
797 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift) | ||
798 | |||
799 | |||
800 | /* | ||
801 | ** Definitions for SDMA_PKT_COPY_T2T packet | ||
802 | */ | ||
803 | |||
804 | /*define for HEADER word*/ | ||
805 | /*define for op field*/ | ||
806 | #define SDMA_PKT_COPY_T2T_HEADER_op_offset 0 | ||
807 | #define SDMA_PKT_COPY_T2T_HEADER_op_mask 0x000000FF | ||
808 | #define SDMA_PKT_COPY_T2T_HEADER_op_shift 0 | ||
809 | #define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift) | ||
810 | |||
811 | /*define for sub_op field*/ | ||
812 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0 | ||
813 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask 0x000000FF | ||
814 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift 8 | ||
815 | #define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift) | ||
816 | |||
817 | /*define for SRC_ADDR_LO word*/ | ||
818 | /*define for src_addr_31_0 field*/ | ||
819 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1 | ||
820 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
821 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
822 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift) | ||
823 | |||
824 | /*define for SRC_ADDR_HI word*/ | ||
825 | /*define for src_addr_63_32 field*/ | ||
826 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2 | ||
827 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
828 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
829 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift) | ||
830 | |||
831 | /*define for DW_3 word*/ | ||
832 | /*define for src_x field*/ | ||
833 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3 | ||
834 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_mask 0x00003FFF | ||
835 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_shift 0 | ||
836 | #define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift) | ||
837 | |||
838 | /*define for src_y field*/ | ||
839 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3 | ||
840 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_mask 0x00003FFF | ||
841 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_shift 16 | ||
842 | #define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift) | ||
843 | |||
844 | /*define for DW_4 word*/ | ||
845 | /*define for src_z field*/ | ||
846 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4 | ||
847 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_mask 0x000007FF | ||
848 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_shift 0 | ||
849 | #define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift) | ||
850 | |||
851 | /*define for src_pitch_in_tile field*/ | ||
852 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4 | ||
853 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask 0x00000FFF | ||
854 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift 16 | ||
855 | #define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift) | ||
856 | |||
857 | /*define for DW_5 word*/ | ||
858 | /*define for src_slice_pitch field*/ | ||
859 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5 | ||
860 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask 0x003FFFFF | ||
861 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift 0 | ||
862 | #define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift) | ||
863 | |||
864 | /*define for DW_6 word*/ | ||
865 | /*define for src_element_size field*/ | ||
866 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6 | ||
867 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask 0x00000007 | ||
868 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift 0 | ||
869 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift) | ||
870 | |||
871 | /*define for src_array_mode field*/ | ||
872 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6 | ||
873 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask 0x0000000F | ||
874 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift 3 | ||
875 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift) | ||
876 | |||
877 | /*define for src_mit_mode field*/ | ||
878 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6 | ||
879 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask 0x00000007 | ||
880 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift 8 | ||
881 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift) | ||
882 | |||
883 | /*define for src_tilesplit_size field*/ | ||
884 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6 | ||
885 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask 0x00000007 | ||
886 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift 11 | ||
887 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift) | ||
888 | |||
889 | /*define for src_bank_w field*/ | ||
890 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6 | ||
891 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask 0x00000003 | ||
892 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift 15 | ||
893 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift) | ||
894 | |||
895 | /*define for src_bank_h field*/ | ||
896 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6 | ||
897 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask 0x00000003 | ||
898 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift 18 | ||
899 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift) | ||
900 | |||
901 | /*define for src_num_bank field*/ | ||
902 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6 | ||
903 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask 0x00000003 | ||
904 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift 21 | ||
905 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift) | ||
906 | |||
907 | /*define for src_mat_aspt field*/ | ||
908 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6 | ||
909 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask 0x00000003 | ||
910 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift 24 | ||
911 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift) | ||
912 | |||
913 | /*define for src_pipe_config field*/ | ||
914 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6 | ||
915 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask 0x0000001F | ||
916 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift 26 | ||
917 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift) | ||
918 | |||
919 | /*define for DST_ADDR_LO word*/ | ||
920 | /*define for dst_addr_31_0 field*/ | ||
921 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7 | ||
922 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
923 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
924 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift) | ||
925 | |||
926 | /*define for DST_ADDR_HI word*/ | ||
927 | /*define for dst_addr_63_32 field*/ | ||
928 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8 | ||
929 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
930 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
931 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift) | ||
932 | |||
933 | /*define for DW_9 word*/ | ||
934 | /*define for dst_x field*/ | ||
935 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9 | ||
936 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask 0x00003FFF | ||
937 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift 0 | ||
938 | #define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift) | ||
939 | |||
940 | /*define for dst_y field*/ | ||
941 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9 | ||
942 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask 0x00003FFF | ||
943 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift 16 | ||
944 | #define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift) | ||
945 | |||
946 | /*define for DW_10 word*/ | ||
947 | /*define for dst_z field*/ | ||
948 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10 | ||
949 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask 0x000007FF | ||
950 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift 0 | ||
951 | #define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift) | ||
952 | |||
953 | /*define for dst_pitch_in_tile field*/ | ||
954 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10 | ||
955 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask 0x00000FFF | ||
956 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift 16 | ||
957 | #define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift) | ||
958 | |||
959 | /*define for DW_11 word*/ | ||
960 | /*define for dst_slice_pitch field*/ | ||
961 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11 | ||
962 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask 0x003FFFFF | ||
963 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift 0 | ||
964 | #define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift) | ||
965 | |||
966 | /*define for DW_12 word*/ | ||
967 | /*define for dst_array_mode field*/ | ||
968 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12 | ||
969 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask 0x0000000F | ||
970 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift 3 | ||
971 | #define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift) | ||
972 | |||
973 | /*define for dst_mit_mode field*/ | ||
974 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12 | ||
975 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask 0x00000007 | ||
976 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift 8 | ||
977 | #define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift) | ||
978 | |||
979 | /*define for dst_tilesplit_size field*/ | ||
980 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12 | ||
981 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask 0x00000007 | ||
982 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift 11 | ||
983 | #define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift) | ||
984 | |||
985 | /*define for dst_bank_w field*/ | ||
986 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12 | ||
987 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask 0x00000003 | ||
988 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift 15 | ||
989 | #define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift) | ||
990 | |||
991 | /*define for dst_bank_h field*/ | ||
992 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12 | ||
993 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask 0x00000003 | ||
994 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift 18 | ||
995 | #define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift) | ||
996 | |||
997 | /*define for dst_num_bank field*/ | ||
998 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12 | ||
999 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask 0x00000003 | ||
1000 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift 21 | ||
1001 | #define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift) | ||
1002 | |||
1003 | /*define for dst_mat_aspt field*/ | ||
1004 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12 | ||
1005 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask 0x00000003 | ||
1006 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift 24 | ||
1007 | #define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift) | ||
1008 | |||
1009 | /*define for dst_pipe_config field*/ | ||
1010 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12 | ||
1011 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask 0x0000001F | ||
1012 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift 26 | ||
1013 | #define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift) | ||
1014 | |||
1015 | /*define for DW_13 word*/ | ||
1016 | /*define for rect_x field*/ | ||
1017 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13 | ||
1018 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask 0x00003FFF | ||
1019 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift 0 | ||
1020 | #define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift) | ||
1021 | |||
1022 | /*define for rect_y field*/ | ||
1023 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13 | ||
1024 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask 0x00003FFF | ||
1025 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift 16 | ||
1026 | #define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift) | ||
1027 | |||
1028 | /*define for DW_14 word*/ | ||
1029 | /*define for rect_z field*/ | ||
1030 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14 | ||
1031 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask 0x000007FF | ||
1032 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift 0 | ||
1033 | #define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift) | ||
1034 | |||
1035 | /*define for dst_sw field*/ | ||
1036 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14 | ||
1037 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask 0x00000003 | ||
1038 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift 16 | ||
1039 | #define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift) | ||
1040 | |||
1041 | /*define for src_sw field*/ | ||
1042 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14 | ||
1043 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask 0x00000003 | ||
1044 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift 24 | ||
1045 | #define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift) | ||
1046 | |||
1047 | |||
1048 | /* | ||
1049 | ** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet | ||
1050 | */ | ||
1051 | |||
1052 | /*define for HEADER word*/ | ||
1053 | /*define for op field*/ | ||
1054 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0 | ||
1055 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask 0x000000FF | ||
1056 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift 0 | ||
1057 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift) | ||
1058 | |||
1059 | /*define for sub_op field*/ | ||
1060 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0 | ||
1061 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask 0x000000FF | ||
1062 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift 8 | ||
1063 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift) | ||
1064 | |||
1065 | /*define for detile field*/ | ||
1066 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0 | ||
1067 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask 0x00000001 | ||
1068 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift 31 | ||
1069 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift) | ||
1070 | |||
1071 | /*define for TILED_ADDR_LO word*/ | ||
1072 | /*define for tiled_addr_31_0 field*/ | ||
1073 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1 | ||
1074 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF | ||
1075 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift 0 | ||
1076 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift) | ||
1077 | |||
1078 | /*define for TILED_ADDR_HI word*/ | ||
1079 | /*define for tiled_addr_63_32 field*/ | ||
1080 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2 | ||
1081 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF | ||
1082 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift 0 | ||
1083 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift) | ||
1084 | |||
1085 | /*define for DW_3 word*/ | ||
1086 | /*define for tiled_x field*/ | ||
1087 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3 | ||
1088 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask 0x00003FFF | ||
1089 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift 0 | ||
1090 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift) | ||
1091 | |||
1092 | /*define for tiled_y field*/ | ||
1093 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3 | ||
1094 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask 0x00003FFF | ||
1095 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift 16 | ||
1096 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift) | ||
1097 | |||
1098 | /*define for DW_4 word*/ | ||
1099 | /*define for tiled_z field*/ | ||
1100 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4 | ||
1101 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask 0x000007FF | ||
1102 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift 0 | ||
1103 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift) | ||
1104 | |||
1105 | /*define for pitch_in_tile field*/ | ||
1106 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4 | ||
1107 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask 0x00000FFF | ||
1108 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift 16 | ||
1109 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift) | ||
1110 | |||
1111 | /*define for DW_5 word*/ | ||
1112 | /*define for slice_pitch field*/ | ||
1113 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5 | ||
1114 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask 0x003FFFFF | ||
1115 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift 0 | ||
1116 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift) | ||
1117 | |||
1118 | /*define for DW_6 word*/ | ||
1119 | /*define for element_size field*/ | ||
1120 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6 | ||
1121 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask 0x00000007 | ||
1122 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift 0 | ||
1123 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift) | ||
1124 | |||
1125 | /*define for array_mode field*/ | ||
1126 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6 | ||
1127 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask 0x0000000F | ||
1128 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift 3 | ||
1129 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift) | ||
1130 | |||
1131 | /*define for mit_mode field*/ | ||
1132 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6 | ||
1133 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask 0x00000007 | ||
1134 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift 8 | ||
1135 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift) | ||
1136 | |||
1137 | /*define for tilesplit_size field*/ | ||
1138 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6 | ||
1139 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask 0x00000007 | ||
1140 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift 11 | ||
1141 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift) | ||
1142 | |||
1143 | /*define for bank_w field*/ | ||
1144 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6 | ||
1145 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask 0x00000003 | ||
1146 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift 15 | ||
1147 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift) | ||
1148 | |||
1149 | /*define for bank_h field*/ | ||
1150 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6 | ||
1151 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask 0x00000003 | ||
1152 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift 18 | ||
1153 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift) | ||
1154 | |||
1155 | /*define for num_bank field*/ | ||
1156 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6 | ||
1157 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask 0x00000003 | ||
1158 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift 21 | ||
1159 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift) | ||
1160 | |||
1161 | /*define for mat_aspt field*/ | ||
1162 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6 | ||
1163 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask 0x00000003 | ||
1164 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift 24 | ||
1165 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift) | ||
1166 | |||
1167 | /*define for pipe_config field*/ | ||
1168 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6 | ||
1169 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask 0x0000001F | ||
1170 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift 26 | ||
1171 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift) | ||
1172 | |||
1173 | /*define for LINEAR_ADDR_LO word*/ | ||
1174 | /*define for linear_addr_31_0 field*/ | ||
1175 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7 | ||
1176 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
1177 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
1178 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
1179 | |||
1180 | /*define for LINEAR_ADDR_HI word*/ | ||
1181 | /*define for linear_addr_63_32 field*/ | ||
1182 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8 | ||
1183 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
1184 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
1185 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
1186 | |||
1187 | /*define for DW_9 word*/ | ||
1188 | /*define for linear_x field*/ | ||
1189 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9 | ||
1190 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask 0x00003FFF | ||
1191 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift 0 | ||
1192 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift) | ||
1193 | |||
1194 | /*define for linear_y field*/ | ||
1195 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9 | ||
1196 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask 0x00003FFF | ||
1197 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift 16 | ||
1198 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift) | ||
1199 | |||
1200 | /*define for DW_10 word*/ | ||
1201 | /*define for linear_z field*/ | ||
1202 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10 | ||
1203 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask 0x000007FF | ||
1204 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift 0 | ||
1205 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift) | ||
1206 | |||
1207 | /*define for linear_pitch field*/ | ||
1208 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10 | ||
1209 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask 0x00003FFF | ||
1210 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift 16 | ||
1211 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift) | ||
1212 | |||
1213 | /*define for DW_11 word*/ | ||
1214 | /*define for linear_slice_pitch field*/ | ||
1215 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11 | ||
1216 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask 0x0FFFFFFF | ||
1217 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift 0 | ||
1218 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift) | ||
1219 | |||
1220 | /*define for DW_12 word*/ | ||
1221 | /*define for rect_x field*/ | ||
1222 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12 | ||
1223 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask 0x00003FFF | ||
1224 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift 0 | ||
1225 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift) | ||
1226 | |||
1227 | /*define for rect_y field*/ | ||
1228 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12 | ||
1229 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask 0x00003FFF | ||
1230 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift 16 | ||
1231 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift) | ||
1232 | |||
1233 | /*define for DW_13 word*/ | ||
1234 | /*define for rect_z field*/ | ||
1235 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13 | ||
1236 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask 0x000007FF | ||
1237 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift 0 | ||
1238 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift) | ||
1239 | |||
1240 | /*define for linear_sw field*/ | ||
1241 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13 | ||
1242 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask 0x00000003 | ||
1243 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift 16 | ||
1244 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift) | ||
1245 | |||
1246 | /*define for tile_sw field*/ | ||
1247 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13 | ||
1248 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask 0x00000003 | ||
1249 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift 24 | ||
1250 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift) | ||
1251 | |||
1252 | |||
1253 | /* | ||
1254 | ** Definitions for SDMA_PKT_COPY_STRUCT packet | ||
1255 | */ | ||
1256 | |||
1257 | /*define for HEADER word*/ | ||
1258 | /*define for op field*/ | ||
1259 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0 | ||
1260 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_mask 0x000000FF | ||
1261 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_shift 0 | ||
1262 | #define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift) | ||
1263 | |||
1264 | /*define for sub_op field*/ | ||
1265 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0 | ||
1266 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask 0x000000FF | ||
1267 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift 8 | ||
1268 | #define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift) | ||
1269 | |||
1270 | /*define for detile field*/ | ||
1271 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0 | ||
1272 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask 0x00000001 | ||
1273 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift 31 | ||
1274 | #define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift) | ||
1275 | |||
1276 | /*define for SB_ADDR_LO word*/ | ||
1277 | /*define for sb_addr_31_0 field*/ | ||
1278 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1 | ||
1279 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask 0xFFFFFFFF | ||
1280 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift 0 | ||
1281 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift) | ||
1282 | |||
1283 | /*define for SB_ADDR_HI word*/ | ||
1284 | /*define for sb_addr_63_32 field*/ | ||
1285 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2 | ||
1286 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask 0xFFFFFFFF | ||
1287 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift 0 | ||
1288 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift) | ||
1289 | |||
1290 | /*define for START_INDEX word*/ | ||
1291 | /*define for start_index field*/ | ||
1292 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3 | ||
1293 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask 0xFFFFFFFF | ||
1294 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift 0 | ||
1295 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift) | ||
1296 | |||
1297 | /*define for COUNT word*/ | ||
1298 | /*define for count field*/ | ||
1299 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4 | ||
1300 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_mask 0xFFFFFFFF | ||
1301 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_shift 0 | ||
1302 | #define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift) | ||
1303 | |||
1304 | /*define for DW_5 word*/ | ||
1305 | /*define for stride field*/ | ||
1306 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5 | ||
1307 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask 0x000007FF | ||
1308 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift 0 | ||
1309 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift) | ||
1310 | |||
1311 | /*define for struct_sw field*/ | ||
1312 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5 | ||
1313 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask 0x00000003 | ||
1314 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift 16 | ||
1315 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift) | ||
1316 | |||
1317 | /*define for struct_ha field*/ | ||
1318 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5 | ||
1319 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask 0x00000001 | ||
1320 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift 22 | ||
1321 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift) | ||
1322 | |||
1323 | /*define for linear_sw field*/ | ||
1324 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5 | ||
1325 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask 0x00000003 | ||
1326 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift 24 | ||
1327 | #define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift) | ||
1328 | |||
1329 | /*define for linear_ha field*/ | ||
1330 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5 | ||
1331 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask 0x00000001 | ||
1332 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift 30 | ||
1333 | #define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift) | ||
1334 | |||
1335 | /*define for LINEAR_ADDR_LO word*/ | ||
1336 | /*define for linear_addr_31_0 field*/ | ||
1337 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6 | ||
1338 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
1339 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
1340 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
1341 | |||
1342 | /*define for LINEAR_ADDR_HI word*/ | ||
1343 | /*define for linear_addr_63_32 field*/ | ||
1344 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7 | ||
1345 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
1346 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
1347 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
1348 | |||
1349 | |||
1350 | /* | ||
1351 | ** Definitions for SDMA_PKT_WRITE_UNTILED packet | ||
1352 | */ | ||
1353 | |||
1354 | /*define for HEADER word*/ | ||
1355 | /*define for op field*/ | ||
1356 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0 | ||
1357 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask 0x000000FF | ||
1358 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift 0 | ||
1359 | #define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift) | ||
1360 | |||
1361 | /*define for sub_op field*/ | ||
1362 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0 | ||
1363 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask 0x000000FF | ||
1364 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift 8 | ||
1365 | #define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift) | ||
1366 | |||
1367 | /*define for DST_ADDR_LO word*/ | ||
1368 | /*define for dst_addr_31_0 field*/ | ||
1369 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1370 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1371 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1372 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1373 | |||
1374 | /*define for DST_ADDR_HI word*/ | ||
1375 | /*define for dst_addr_63_32 field*/ | ||
1376 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1377 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1378 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1379 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1380 | |||
1381 | /*define for DW_3 word*/ | ||
1382 | /*define for count field*/ | ||
1383 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3 | ||
1384 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask 0x003FFFFF | ||
1385 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift 0 | ||
1386 | #define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift) | ||
1387 | |||
1388 | /*define for sw field*/ | ||
1389 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3 | ||
1390 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask 0x00000003 | ||
1391 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift 24 | ||
1392 | #define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift) | ||
1393 | |||
1394 | /*define for DATA0 word*/ | ||
1395 | /*define for data0 field*/ | ||
1396 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4 | ||
1397 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask 0xFFFFFFFF | ||
1398 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift 0 | ||
1399 | #define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift) | ||
1400 | |||
1401 | |||
1402 | /* | ||
1403 | ** Definitions for SDMA_PKT_WRITE_TILED packet | ||
1404 | */ | ||
1405 | |||
1406 | /*define for HEADER word*/ | ||
1407 | /*define for op field*/ | ||
1408 | #define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0 | ||
1409 | #define SDMA_PKT_WRITE_TILED_HEADER_op_mask 0x000000FF | ||
1410 | #define SDMA_PKT_WRITE_TILED_HEADER_op_shift 0 | ||
1411 | #define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift) | ||
1412 | |||
1413 | /*define for sub_op field*/ | ||
1414 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0 | ||
1415 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask 0x000000FF | ||
1416 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift 8 | ||
1417 | #define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift) | ||
1418 | |||
1419 | /*define for DST_ADDR_LO word*/ | ||
1420 | /*define for dst_addr_31_0 field*/ | ||
1421 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1422 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1423 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1424 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1425 | |||
1426 | /*define for DST_ADDR_HI word*/ | ||
1427 | /*define for dst_addr_63_32 field*/ | ||
1428 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1429 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1430 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1431 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1432 | |||
1433 | /*define for DW_3 word*/ | ||
1434 | /*define for pitch_in_tile field*/ | ||
1435 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3 | ||
1436 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask 0x000007FF | ||
1437 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift 0 | ||
1438 | #define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift) | ||
1439 | |||
1440 | /*define for height field*/ | ||
1441 | #define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3 | ||
1442 | #define SDMA_PKT_WRITE_TILED_DW_3_height_mask 0x00003FFF | ||
1443 | #define SDMA_PKT_WRITE_TILED_DW_3_height_shift 16 | ||
1444 | #define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift) | ||
1445 | |||
1446 | /*define for DW_4 word*/ | ||
1447 | /*define for slice_pitch field*/ | ||
1448 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4 | ||
1449 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask 0x003FFFFF | ||
1450 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift 0 | ||
1451 | #define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift) | ||
1452 | |||
1453 | /*define for DW_5 word*/ | ||
1454 | /*define for element_size field*/ | ||
1455 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5 | ||
1456 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask 0x00000007 | ||
1457 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift 0 | ||
1458 | #define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift) | ||
1459 | |||
1460 | /*define for array_mode field*/ | ||
1461 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5 | ||
1462 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask 0x0000000F | ||
1463 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift 3 | ||
1464 | #define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift) | ||
1465 | |||
1466 | /*define for mit_mode field*/ | ||
1467 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5 | ||
1468 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask 0x00000007 | ||
1469 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift 8 | ||
1470 | #define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift) | ||
1471 | |||
1472 | /*define for tilesplit_size field*/ | ||
1473 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5 | ||
1474 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask 0x00000007 | ||
1475 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift 11 | ||
1476 | #define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift) | ||
1477 | |||
1478 | /*define for bank_w field*/ | ||
1479 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5 | ||
1480 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask 0x00000003 | ||
1481 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift 15 | ||
1482 | #define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift) | ||
1483 | |||
1484 | /*define for bank_h field*/ | ||
1485 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5 | ||
1486 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask 0x00000003 | ||
1487 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift 18 | ||
1488 | #define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift) | ||
1489 | |||
1490 | /*define for num_bank field*/ | ||
1491 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5 | ||
1492 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask 0x00000003 | ||
1493 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift 21 | ||
1494 | #define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift) | ||
1495 | |||
1496 | /*define for mat_aspt field*/ | ||
1497 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5 | ||
1498 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask 0x00000003 | ||
1499 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift 24 | ||
1500 | #define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift) | ||
1501 | |||
1502 | /*define for pipe_config field*/ | ||
1503 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5 | ||
1504 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask 0x0000001F | ||
1505 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift 26 | ||
1506 | #define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift) | ||
1507 | |||
1508 | /*define for DW_6 word*/ | ||
1509 | /*define for x field*/ | ||
1510 | #define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6 | ||
1511 | #define SDMA_PKT_WRITE_TILED_DW_6_x_mask 0x00003FFF | ||
1512 | #define SDMA_PKT_WRITE_TILED_DW_6_x_shift 0 | ||
1513 | #define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift) | ||
1514 | |||
1515 | /*define for y field*/ | ||
1516 | #define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6 | ||
1517 | #define SDMA_PKT_WRITE_TILED_DW_6_y_mask 0x00003FFF | ||
1518 | #define SDMA_PKT_WRITE_TILED_DW_6_y_shift 16 | ||
1519 | #define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift) | ||
1520 | |||
1521 | /*define for DW_7 word*/ | ||
1522 | /*define for z field*/ | ||
1523 | #define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7 | ||
1524 | #define SDMA_PKT_WRITE_TILED_DW_7_z_mask 0x00000FFF | ||
1525 | #define SDMA_PKT_WRITE_TILED_DW_7_z_shift 0 | ||
1526 | #define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift) | ||
1527 | |||
1528 | /*define for sw field*/ | ||
1529 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7 | ||
1530 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_mask 0x00000003 | ||
1531 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_shift 24 | ||
1532 | #define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift) | ||
1533 | |||
1534 | /*define for COUNT word*/ | ||
1535 | /*define for count field*/ | ||
1536 | #define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8 | ||
1537 | #define SDMA_PKT_WRITE_TILED_COUNT_count_mask 0x003FFFFF | ||
1538 | #define SDMA_PKT_WRITE_TILED_COUNT_count_shift 0 | ||
1539 | #define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift) | ||
1540 | |||
1541 | /*define for DATA0 word*/ | ||
1542 | /*define for data0 field*/ | ||
1543 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9 | ||
1544 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_mask 0xFFFFFFFF | ||
1545 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_shift 0 | ||
1546 | #define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift) | ||
1547 | |||
1548 | |||
1549 | /* | ||
1550 | ** Definitions for SDMA_PKT_WRITE_INCR packet | ||
1551 | */ | ||
1552 | |||
1553 | /*define for HEADER word*/ | ||
1554 | /*define for op field*/ | ||
1555 | #define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0 | ||
1556 | #define SDMA_PKT_WRITE_INCR_HEADER_op_mask 0x000000FF | ||
1557 | #define SDMA_PKT_WRITE_INCR_HEADER_op_shift 0 | ||
1558 | #define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift) | ||
1559 | |||
1560 | /*define for sub_op field*/ | ||
1561 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0 | ||
1562 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask 0x000000FF | ||
1563 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift 8 | ||
1564 | #define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift) | ||
1565 | |||
1566 | /*define for DST_ADDR_LO word*/ | ||
1567 | /*define for dst_addr_31_0 field*/ | ||
1568 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1569 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1570 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1571 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1572 | |||
1573 | /*define for DST_ADDR_HI word*/ | ||
1574 | /*define for dst_addr_63_32 field*/ | ||
1575 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1576 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1577 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1578 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1579 | |||
1580 | /*define for MASK_DW0 word*/ | ||
1581 | /*define for mask_dw0 field*/ | ||
1582 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3 | ||
1583 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask 0xFFFFFFFF | ||
1584 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift 0 | ||
1585 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift) | ||
1586 | |||
1587 | /*define for MASK_DW1 word*/ | ||
1588 | /*define for mask_dw1 field*/ | ||
1589 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4 | ||
1590 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask 0xFFFFFFFF | ||
1591 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift 0 | ||
1592 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift) | ||
1593 | |||
1594 | /*define for INIT_DW0 word*/ | ||
1595 | /*define for init_dw0 field*/ | ||
1596 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5 | ||
1597 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask 0xFFFFFFFF | ||
1598 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift 0 | ||
1599 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift) | ||
1600 | |||
1601 | /*define for INIT_DW1 word*/ | ||
1602 | /*define for init_dw1 field*/ | ||
1603 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6 | ||
1604 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask 0xFFFFFFFF | ||
1605 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift 0 | ||
1606 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift) | ||
1607 | |||
1608 | /*define for INCR_DW0 word*/ | ||
1609 | /*define for incr_dw0 field*/ | ||
1610 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7 | ||
1611 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask 0xFFFFFFFF | ||
1612 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift 0 | ||
1613 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift) | ||
1614 | |||
1615 | /*define for INCR_DW1 word*/ | ||
1616 | /*define for incr_dw1 field*/ | ||
1617 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8 | ||
1618 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask 0xFFFFFFFF | ||
1619 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift 0 | ||
1620 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift) | ||
1621 | |||
1622 | /*define for COUNT word*/ | ||
1623 | /*define for count field*/ | ||
1624 | #define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9 | ||
1625 | #define SDMA_PKT_WRITE_INCR_COUNT_count_mask 0x0007FFFF | ||
1626 | #define SDMA_PKT_WRITE_INCR_COUNT_count_shift 0 | ||
1627 | #define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift) | ||
1628 | |||
1629 | |||
1630 | /* | ||
1631 | ** Definitions for SDMA_PKT_INDIRECT packet | ||
1632 | */ | ||
1633 | |||
1634 | /*define for HEADER word*/ | ||
1635 | /*define for op field*/ | ||
1636 | #define SDMA_PKT_INDIRECT_HEADER_op_offset 0 | ||
1637 | #define SDMA_PKT_INDIRECT_HEADER_op_mask 0x000000FF | ||
1638 | #define SDMA_PKT_INDIRECT_HEADER_op_shift 0 | ||
1639 | #define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift) | ||
1640 | |||
1641 | /*define for sub_op field*/ | ||
1642 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0 | ||
1643 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_mask 0x000000FF | ||
1644 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_shift 8 | ||
1645 | #define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift) | ||
1646 | |||
1647 | /*define for vmid field*/ | ||
1648 | #define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0 | ||
1649 | #define SDMA_PKT_INDIRECT_HEADER_vmid_mask 0x0000000F | ||
1650 | #define SDMA_PKT_INDIRECT_HEADER_vmid_shift 16 | ||
1651 | #define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift) | ||
1652 | |||
1653 | /*define for BASE_LO word*/ | ||
1654 | /*define for ib_base_31_0 field*/ | ||
1655 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1 | ||
1656 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask 0xFFFFFFFF | ||
1657 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift 0 | ||
1658 | #define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift) | ||
1659 | |||
1660 | /*define for BASE_HI word*/ | ||
1661 | /*define for ib_base_63_32 field*/ | ||
1662 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2 | ||
1663 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask 0xFFFFFFFF | ||
1664 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift 0 | ||
1665 | #define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift) | ||
1666 | |||
1667 | /*define for IB_SIZE word*/ | ||
1668 | /*define for ib_size field*/ | ||
1669 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3 | ||
1670 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask 0x000FFFFF | ||
1671 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift 0 | ||
1672 | #define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift) | ||
1673 | |||
1674 | /*define for CSA_ADDR_LO word*/ | ||
1675 | /*define for csa_addr_31_0 field*/ | ||
1676 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4 | ||
1677 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask 0xFFFFFFFF | ||
1678 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift 0 | ||
1679 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift) | ||
1680 | |||
1681 | /*define for CSA_ADDR_HI word*/ | ||
1682 | /*define for csa_addr_63_32 field*/ | ||
1683 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5 | ||
1684 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask 0xFFFFFFFF | ||
1685 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift 0 | ||
1686 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift) | ||
1687 | |||
1688 | |||
1689 | /* | ||
1690 | ** Definitions for SDMA_PKT_SEMAPHORE packet | ||
1691 | */ | ||
1692 | |||
1693 | /*define for HEADER word*/ | ||
1694 | /*define for op field*/ | ||
1695 | #define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0 | ||
1696 | #define SDMA_PKT_SEMAPHORE_HEADER_op_mask 0x000000FF | ||
1697 | #define SDMA_PKT_SEMAPHORE_HEADER_op_shift 0 | ||
1698 | #define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift) | ||
1699 | |||
1700 | /*define for sub_op field*/ | ||
1701 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0 | ||
1702 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask 0x000000FF | ||
1703 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift 8 | ||
1704 | #define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift) | ||
1705 | |||
1706 | /*define for write_one field*/ | ||
1707 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0 | ||
1708 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask 0x00000001 | ||
1709 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift 29 | ||
1710 | #define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift) | ||
1711 | |||
1712 | /*define for signal field*/ | ||
1713 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0 | ||
1714 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_mask 0x00000001 | ||
1715 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_shift 30 | ||
1716 | #define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift) | ||
1717 | |||
1718 | /*define for mailbox field*/ | ||
1719 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0 | ||
1720 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask 0x00000001 | ||
1721 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift 31 | ||
1722 | #define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift) | ||
1723 | |||
1724 | /*define for ADDR_LO word*/ | ||
1725 | /*define for addr_31_0 field*/ | ||
1726 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1 | ||
1727 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1728 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift 0 | ||
1729 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift) | ||
1730 | |||
1731 | /*define for ADDR_HI word*/ | ||
1732 | /*define for addr_63_32 field*/ | ||
1733 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2 | ||
1734 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1735 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift 0 | ||
1736 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift) | ||
1737 | |||
1738 | |||
1739 | /* | ||
1740 | ** Definitions for SDMA_PKT_FENCE packet | ||
1741 | */ | ||
1742 | |||
1743 | /*define for HEADER word*/ | ||
1744 | /*define for op field*/ | ||
1745 | #define SDMA_PKT_FENCE_HEADER_op_offset 0 | ||
1746 | #define SDMA_PKT_FENCE_HEADER_op_mask 0x000000FF | ||
1747 | #define SDMA_PKT_FENCE_HEADER_op_shift 0 | ||
1748 | #define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift) | ||
1749 | |||
1750 | /*define for sub_op field*/ | ||
1751 | #define SDMA_PKT_FENCE_HEADER_sub_op_offset 0 | ||
1752 | #define SDMA_PKT_FENCE_HEADER_sub_op_mask 0x000000FF | ||
1753 | #define SDMA_PKT_FENCE_HEADER_sub_op_shift 8 | ||
1754 | #define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift) | ||
1755 | |||
1756 | /*define for ADDR_LO word*/ | ||
1757 | /*define for addr_31_0 field*/ | ||
1758 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1 | ||
1759 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1760 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift 0 | ||
1761 | #define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift) | ||
1762 | |||
1763 | /*define for ADDR_HI word*/ | ||
1764 | /*define for addr_63_32 field*/ | ||
1765 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2 | ||
1766 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1767 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift 0 | ||
1768 | #define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift) | ||
1769 | |||
1770 | /*define for DATA word*/ | ||
1771 | /*define for data field*/ | ||
1772 | #define SDMA_PKT_FENCE_DATA_data_offset 3 | ||
1773 | #define SDMA_PKT_FENCE_DATA_data_mask 0xFFFFFFFF | ||
1774 | #define SDMA_PKT_FENCE_DATA_data_shift 0 | ||
1775 | #define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift) | ||
1776 | |||
1777 | |||
1778 | /* | ||
1779 | ** Definitions for SDMA_PKT_SRBM_WRITE packet | ||
1780 | */ | ||
1781 | |||
1782 | /*define for HEADER word*/ | ||
1783 | /*define for op field*/ | ||
1784 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0 | ||
1785 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_mask 0x000000FF | ||
1786 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_shift 0 | ||
1787 | #define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift) | ||
1788 | |||
1789 | /*define for sub_op field*/ | ||
1790 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0 | ||
1791 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask 0x000000FF | ||
1792 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift 8 | ||
1793 | #define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift) | ||
1794 | |||
1795 | /*define for byte_en field*/ | ||
1796 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0 | ||
1797 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask 0x0000000F | ||
1798 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift 28 | ||
1799 | #define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift) | ||
1800 | |||
1801 | /*define for ADDR word*/ | ||
1802 | /*define for addr field*/ | ||
1803 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1 | ||
1804 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask 0x0000FFFF | ||
1805 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift 0 | ||
1806 | #define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift) | ||
1807 | |||
1808 | /*define for DATA word*/ | ||
1809 | /*define for data field*/ | ||
1810 | #define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2 | ||
1811 | #define SDMA_PKT_SRBM_WRITE_DATA_data_mask 0xFFFFFFFF | ||
1812 | #define SDMA_PKT_SRBM_WRITE_DATA_data_shift 0 | ||
1813 | #define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift) | ||
1814 | |||
1815 | |||
1816 | /* | ||
1817 | ** Definitions for SDMA_PKT_PRE_EXE packet | ||
1818 | */ | ||
1819 | |||
1820 | /*define for HEADER word*/ | ||
1821 | /*define for op field*/ | ||
1822 | #define SDMA_PKT_PRE_EXE_HEADER_op_offset 0 | ||
1823 | #define SDMA_PKT_PRE_EXE_HEADER_op_mask 0x000000FF | ||
1824 | #define SDMA_PKT_PRE_EXE_HEADER_op_shift 0 | ||
1825 | #define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift) | ||
1826 | |||
1827 | /*define for sub_op field*/ | ||
1828 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0 | ||
1829 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask 0x000000FF | ||
1830 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift 8 | ||
1831 | #define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift) | ||
1832 | |||
1833 | /*define for dev_sel field*/ | ||
1834 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0 | ||
1835 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask 0x000000FF | ||
1836 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift 16 | ||
1837 | #define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift) | ||
1838 | |||
1839 | /*define for EXEC_COUNT word*/ | ||
1840 | /*define for exec_count field*/ | ||
1841 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1 | ||
1842 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF | ||
1843 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift 0 | ||
1844 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift) | ||
1845 | |||
1846 | |||
1847 | /* | ||
1848 | ** Definitions for SDMA_PKT_COND_EXE packet | ||
1849 | */ | ||
1850 | |||
1851 | /*define for HEADER word*/ | ||
1852 | /*define for op field*/ | ||
1853 | #define SDMA_PKT_COND_EXE_HEADER_op_offset 0 | ||
1854 | #define SDMA_PKT_COND_EXE_HEADER_op_mask 0x000000FF | ||
1855 | #define SDMA_PKT_COND_EXE_HEADER_op_shift 0 | ||
1856 | #define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift) | ||
1857 | |||
1858 | /*define for sub_op field*/ | ||
1859 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0 | ||
1860 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_mask 0x000000FF | ||
1861 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_shift 8 | ||
1862 | #define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift) | ||
1863 | |||
1864 | /*define for ADDR_LO word*/ | ||
1865 | /*define for addr_31_0 field*/ | ||
1866 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1 | ||
1867 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1868 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift 0 | ||
1869 | #define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift) | ||
1870 | |||
1871 | /*define for ADDR_HI word*/ | ||
1872 | /*define for addr_63_32 field*/ | ||
1873 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2 | ||
1874 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1875 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift 0 | ||
1876 | #define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift) | ||
1877 | |||
1878 | /*define for REFERENCE word*/ | ||
1879 | /*define for reference field*/ | ||
1880 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3 | ||
1881 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_mask 0xFFFFFFFF | ||
1882 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_shift 0 | ||
1883 | #define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift) | ||
1884 | |||
1885 | /*define for EXEC_COUNT word*/ | ||
1886 | /*define for exec_count field*/ | ||
1887 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4 | ||
1888 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF | ||
1889 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift 0 | ||
1890 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift) | ||
1891 | |||
1892 | |||
1893 | /* | ||
1894 | ** Definitions for SDMA_PKT_CONSTANT_FILL packet | ||
1895 | */ | ||
1896 | |||
1897 | /*define for HEADER word*/ | ||
1898 | /*define for op field*/ | ||
1899 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0 | ||
1900 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask 0x000000FF | ||
1901 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift 0 | ||
1902 | #define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift) | ||
1903 | |||
1904 | /*define for sub_op field*/ | ||
1905 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0 | ||
1906 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask 0x000000FF | ||
1907 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift 8 | ||
1908 | #define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift) | ||
1909 | |||
1910 | /*define for sw field*/ | ||
1911 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0 | ||
1912 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask 0x00000003 | ||
1913 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift 16 | ||
1914 | #define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift) | ||
1915 | |||
1916 | /*define for fillsize field*/ | ||
1917 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0 | ||
1918 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask 0x00000003 | ||
1919 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift 30 | ||
1920 | #define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift) | ||
1921 | |||
1922 | /*define for DST_ADDR_LO word*/ | ||
1923 | /*define for dst_addr_31_0 field*/ | ||
1924 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1925 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1926 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1927 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1928 | |||
1929 | /*define for DST_ADDR_HI word*/ | ||
1930 | /*define for dst_addr_63_32 field*/ | ||
1931 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1932 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1933 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1934 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1935 | |||
1936 | /*define for DATA word*/ | ||
1937 | /*define for src_data_31_0 field*/ | ||
1938 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3 | ||
1939 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask 0xFFFFFFFF | ||
1940 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift 0 | ||
1941 | #define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift) | ||
1942 | |||
1943 | /*define for COUNT word*/ | ||
1944 | /*define for count field*/ | ||
1945 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4 | ||
1946 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask 0x003FFFFF | ||
1947 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift 0 | ||
1948 | #define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift) | ||
1949 | |||
1950 | |||
1951 | /* | ||
1952 | ** Definitions for SDMA_PKT_POLL_REGMEM packet | ||
1953 | */ | ||
1954 | |||
1955 | /*define for HEADER word*/ | ||
1956 | /*define for op field*/ | ||
1957 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0 | ||
1958 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_mask 0x000000FF | ||
1959 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_shift 0 | ||
1960 | #define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift) | ||
1961 | |||
1962 | /*define for sub_op field*/ | ||
1963 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0 | ||
1964 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask 0x000000FF | ||
1965 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift 8 | ||
1966 | #define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift) | ||
1967 | |||
1968 | /*define for hdp_flush field*/ | ||
1969 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0 | ||
1970 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask 0x00000001 | ||
1971 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift 26 | ||
1972 | #define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift) | ||
1973 | |||
1974 | /*define for func field*/ | ||
1975 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0 | ||
1976 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_mask 0x00000007 | ||
1977 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_shift 28 | ||
1978 | #define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift) | ||
1979 | |||
1980 | /*define for mem_poll field*/ | ||
1981 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0 | ||
1982 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask 0x00000001 | ||
1983 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift 31 | ||
1984 | #define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift) | ||
1985 | |||
1986 | /*define for ADDR_LO word*/ | ||
1987 | /*define for addr_31_0 field*/ | ||
1988 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1 | ||
1989 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1990 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift 0 | ||
1991 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift) | ||
1992 | |||
1993 | /*define for ADDR_HI word*/ | ||
1994 | /*define for addr_63_32 field*/ | ||
1995 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2 | ||
1996 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1997 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift 0 | ||
1998 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift) | ||
1999 | |||
2000 | /*define for VALUE word*/ | ||
2001 | /*define for value field*/ | ||
2002 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3 | ||
2003 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_mask 0xFFFFFFFF | ||
2004 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_shift 0 | ||
2005 | #define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift) | ||
2006 | |||
2007 | /*define for MASK word*/ | ||
2008 | /*define for mask field*/ | ||
2009 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4 | ||
2010 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_mask 0xFFFFFFFF | ||
2011 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_shift 0 | ||
2012 | #define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift) | ||
2013 | |||
2014 | /*define for DW5 word*/ | ||
2015 | /*define for interval field*/ | ||
2016 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5 | ||
2017 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_mask 0x0000FFFF | ||
2018 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_shift 0 | ||
2019 | #define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift) | ||
2020 | |||
2021 | /*define for retry_count field*/ | ||
2022 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5 | ||
2023 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask 0x00000FFF | ||
2024 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift 16 | ||
2025 | #define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift) | ||
2026 | |||
2027 | |||
2028 | /* | ||
2029 | ** Definitions for SDMA_PKT_TIMESTAMP_SET packet | ||
2030 | */ | ||
2031 | |||
2032 | /*define for HEADER word*/ | ||
2033 | /*define for op field*/ | ||
2034 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0 | ||
2035 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask 0x000000FF | ||
2036 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift 0 | ||
2037 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift) | ||
2038 | |||
2039 | /*define for sub_op field*/ | ||
2040 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0 | ||
2041 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask 0x000000FF | ||
2042 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift 8 | ||
2043 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift) | ||
2044 | |||
2045 | /*define for INIT_DATA_LO word*/ | ||
2046 | /*define for init_data_31_0 field*/ | ||
2047 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1 | ||
2048 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask 0xFFFFFFFF | ||
2049 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift 0 | ||
2050 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift) | ||
2051 | |||
2052 | /*define for INIT_DATA_HI word*/ | ||
2053 | /*define for init_data_63_32 field*/ | ||
2054 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2 | ||
2055 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask 0xFFFFFFFF | ||
2056 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift 0 | ||
2057 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift) | ||
2058 | |||
2059 | |||
2060 | /* | ||
2061 | ** Definitions for SDMA_PKT_TIMESTAMP_GET packet | ||
2062 | */ | ||
2063 | |||
2064 | /*define for HEADER word*/ | ||
2065 | /*define for op field*/ | ||
2066 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0 | ||
2067 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask 0x000000FF | ||
2068 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift 0 | ||
2069 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift) | ||
2070 | |||
2071 | /*define for sub_op field*/ | ||
2072 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0 | ||
2073 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask 0x000000FF | ||
2074 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift 8 | ||
2075 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift) | ||
2076 | |||
2077 | /*define for WRITE_ADDR_LO word*/ | ||
2078 | /*define for write_addr_31_3 field*/ | ||
2079 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1 | ||
2080 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF | ||
2081 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift 3 | ||
2082 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift) | ||
2083 | |||
2084 | /*define for WRITE_ADDR_HI word*/ | ||
2085 | /*define for write_addr_63_32 field*/ | ||
2086 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2 | ||
2087 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF | ||
2088 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift 0 | ||
2089 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift) | ||
2090 | |||
2091 | |||
2092 | /* | ||
2093 | ** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet | ||
2094 | */ | ||
2095 | |||
2096 | /*define for HEADER word*/ | ||
2097 | /*define for op field*/ | ||
2098 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0 | ||
2099 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask 0x000000FF | ||
2100 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift 0 | ||
2101 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift) | ||
2102 | |||
2103 | /*define for sub_op field*/ | ||
2104 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0 | ||
2105 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask 0x000000FF | ||
2106 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift 8 | ||
2107 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift) | ||
2108 | |||
2109 | /*define for WRITE_ADDR_LO word*/ | ||
2110 | /*define for write_addr_31_3 field*/ | ||
2111 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1 | ||
2112 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF | ||
2113 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift 3 | ||
2114 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift) | ||
2115 | |||
2116 | /*define for WRITE_ADDR_HI word*/ | ||
2117 | /*define for write_addr_63_32 field*/ | ||
2118 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2 | ||
2119 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF | ||
2120 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift 0 | ||
2121 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift) | ||
2122 | |||
2123 | |||
2124 | /* | ||
2125 | ** Definitions for SDMA_PKT_TRAP packet | ||
2126 | */ | ||
2127 | |||
2128 | /*define for HEADER word*/ | ||
2129 | /*define for op field*/ | ||
2130 | #define SDMA_PKT_TRAP_HEADER_op_offset 0 | ||
2131 | #define SDMA_PKT_TRAP_HEADER_op_mask 0x000000FF | ||
2132 | #define SDMA_PKT_TRAP_HEADER_op_shift 0 | ||
2133 | #define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift) | ||
2134 | |||
2135 | /*define for sub_op field*/ | ||
2136 | #define SDMA_PKT_TRAP_HEADER_sub_op_offset 0 | ||
2137 | #define SDMA_PKT_TRAP_HEADER_sub_op_mask 0x000000FF | ||
2138 | #define SDMA_PKT_TRAP_HEADER_sub_op_shift 8 | ||
2139 | #define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift) | ||
2140 | |||
2141 | /*define for INT_CONTEXT word*/ | ||
2142 | /*define for int_context field*/ | ||
2143 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1 | ||
2144 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask 0x0FFFFFFF | ||
2145 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift 0 | ||
2146 | #define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift) | ||
2147 | |||
2148 | |||
2149 | /* | ||
2150 | ** Definitions for SDMA_PKT_NOP packet | ||
2151 | */ | ||
2152 | |||
2153 | /*define for HEADER word*/ | ||
2154 | /*define for op field*/ | ||
2155 | #define SDMA_PKT_NOP_HEADER_op_offset 0 | ||
2156 | #define SDMA_PKT_NOP_HEADER_op_mask 0x000000FF | ||
2157 | #define SDMA_PKT_NOP_HEADER_op_shift 0 | ||
2158 | #define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift) | ||
2159 | |||
2160 | /*define for sub_op field*/ | ||
2161 | #define SDMA_PKT_NOP_HEADER_sub_op_offset 0 | ||
2162 | #define SDMA_PKT_NOP_HEADER_sub_op_mask 0x000000FF | ||
2163 | #define SDMA_PKT_NOP_HEADER_sub_op_shift 8 | ||
2164 | #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) | ||
2165 | |||
2166 | |||
2167 | #endif /* __ICELAND_SDMA_PKT_OPEN_H_ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c new file mode 100644 index 000000000000..c6f1e2f12b5f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c | |||
@@ -0,0 +1,675 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include "drmP.h" | ||
26 | #include "amdgpu.h" | ||
27 | #include "ppsmc.h" | ||
28 | #include "iceland_smumgr.h" | ||
29 | #include "smu_ucode_xfer_vi.h" | ||
30 | #include "amdgpu_ucode.h" | ||
31 | |||
32 | #include "smu/smu_7_1_1_d.h" | ||
33 | #include "smu/smu_7_1_1_sh_mask.h" | ||
34 | |||
35 | #define ICELAND_SMC_SIZE 0x20000 | ||
36 | |||
37 | static int iceland_set_smc_sram_address(struct amdgpu_device *adev, | ||
38 | uint32_t smc_address, uint32_t limit) | ||
39 | { | ||
40 | uint32_t val; | ||
41 | |||
42 | if (smc_address & 3) | ||
43 | return -EINVAL; | ||
44 | |||
45 | if ((smc_address + 3) > limit) | ||
46 | return -EINVAL; | ||
47 | |||
48 | WREG32(mmSMC_IND_INDEX_0, smc_address); | ||
49 | |||
50 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
51 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
52 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int iceland_copy_bytes_to_smc(struct amdgpu_device *adev, | ||
58 | uint32_t smc_start_address, | ||
59 | const uint8_t *src, | ||
60 | uint32_t byte_count, uint32_t limit) | ||
61 | { | ||
62 | uint32_t addr; | ||
63 | uint32_t data, orig_data; | ||
64 | int result = 0; | ||
65 | uint32_t extra_shift; | ||
66 | unsigned long flags; | ||
67 | |||
68 | if (smc_start_address & 3) | ||
69 | return -EINVAL; | ||
70 | |||
71 | if ((smc_start_address + byte_count) > limit) | ||
72 | return -EINVAL; | ||
73 | |||
74 | addr = smc_start_address; | ||
75 | |||
76 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
77 | while (byte_count >= 4) { | ||
78 | /* Bytes are written into the SMC addres space with the MSB first */ | ||
79 | data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; | ||
80 | |||
81 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
82 | |||
83 | if (result) | ||
84 | goto out; | ||
85 | |||
86 | WREG32(mmSMC_IND_DATA_0, data); | ||
87 | |||
88 | src += 4; | ||
89 | byte_count -= 4; | ||
90 | addr += 4; | ||
91 | } | ||
92 | |||
93 | if (0 != byte_count) { | ||
94 | /* Now write odd bytes left, do a read modify write cycle */ | ||
95 | data = 0; | ||
96 | |||
97 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
98 | if (result) | ||
99 | goto out; | ||
100 | |||
101 | orig_data = RREG32(mmSMC_IND_DATA_0); | ||
102 | extra_shift = 8 * (4 - byte_count); | ||
103 | |||
104 | while (byte_count > 0) { | ||
105 | data = (data << 8) + *src++; | ||
106 | byte_count--; | ||
107 | } | ||
108 | |||
109 | data <<= extra_shift; | ||
110 | data |= (orig_data & ~((~0UL) << extra_shift)); | ||
111 | |||
112 | result = iceland_set_smc_sram_address(adev, addr, limit); | ||
113 | if (result) | ||
114 | goto out; | ||
115 | |||
116 | WREG32(mmSMC_IND_DATA_0, data); | ||
117 | } | ||
118 | |||
119 | out: | ||
120 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
121 | return result; | ||
122 | } | ||
123 | |||
124 | void iceland_start_smc(struct amdgpu_device *adev) | ||
125 | { | ||
126 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
127 | |||
128 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); | ||
129 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
130 | } | ||
131 | |||
132 | void iceland_reset_smc(struct amdgpu_device *adev) | ||
133 | { | ||
134 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
135 | |||
136 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); | ||
137 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
138 | } | ||
139 | |||
140 | static int iceland_program_jump_on_start(struct amdgpu_device *adev) | ||
141 | { | ||
142 | static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; | ||
143 | iceland_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | void iceland_stop_smc_clock(struct amdgpu_device *adev) | ||
149 | { | ||
150 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
151 | |||
152 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); | ||
153 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
154 | } | ||
155 | |||
156 | void iceland_start_smc_clock(struct amdgpu_device *adev) | ||
157 | { | ||
158 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
159 | |||
160 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); | ||
161 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
162 | } | ||
163 | |||
164 | static bool iceland_is_smc_ram_running(struct amdgpu_device *adev) | ||
165 | { | ||
166 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
167 | val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); | ||
168 | |||
169 | return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); | ||
170 | } | ||
171 | |||
172 | static int wait_smu_response(struct amdgpu_device *adev) | ||
173 | { | ||
174 | int i; | ||
175 | uint32_t val; | ||
176 | |||
177 | for (i = 0; i < adev->usec_timeout; i++) { | ||
178 | val = RREG32(mmSMC_RESP_0); | ||
179 | if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) | ||
180 | break; | ||
181 | udelay(1); | ||
182 | } | ||
183 | |||
184 | if (i == adev->usec_timeout) | ||
185 | return -EINVAL; | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int iceland_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) | ||
191 | { | ||
192 | if (!iceland_is_smc_ram_running(adev)) | ||
193 | return -EINVAL; | ||
194 | |||
195 | if (wait_smu_response(adev)) { | ||
196 | DRM_ERROR("Failed to send previous message\n"); | ||
197 | return -EINVAL; | ||
198 | } | ||
199 | |||
200 | WREG32(mmSMC_MESSAGE_0, msg); | ||
201 | |||
202 | if (wait_smu_response(adev)) { | ||
203 | DRM_ERROR("Failed to send message\n"); | ||
204 | return -EINVAL; | ||
205 | } | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, | ||
211 | PPSMC_Msg msg) | ||
212 | { | ||
213 | if (!iceland_is_smc_ram_running(adev)) | ||
214 | return -EINVAL;; | ||
215 | |||
216 | if (wait_smu_response(adev)) { | ||
217 | DRM_ERROR("Failed to send previous message\n"); | ||
218 | return -EINVAL; | ||
219 | } | ||
220 | |||
221 | WREG32(mmSMC_MESSAGE_0, msg); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | |||
226 | static int iceland_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, | ||
227 | PPSMC_Msg msg, | ||
228 | uint32_t parameter) | ||
229 | { | ||
230 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
231 | |||
232 | return iceland_send_msg_to_smc(adev, msg); | ||
233 | } | ||
234 | |||
235 | static int iceland_send_msg_to_smc_with_parameter_without_waiting( | ||
236 | struct amdgpu_device *adev, | ||
237 | PPSMC_Msg msg, uint32_t parameter) | ||
238 | { | ||
239 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
240 | |||
241 | return iceland_send_msg_to_smc_without_waiting(adev, msg); | ||
242 | } | ||
243 | |||
244 | #if 0 /* not used yet */ | ||
245 | static int iceland_wait_for_smc_inactive(struct amdgpu_device *adev) | ||
246 | { | ||
247 | int i; | ||
248 | uint32_t val; | ||
249 | |||
250 | if (!iceland_is_smc_ram_running(adev)) | ||
251 | return -EINVAL; | ||
252 | |||
253 | for (i = 0; i < adev->usec_timeout; i++) { | ||
254 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
255 | if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) | ||
256 | break; | ||
257 | udelay(1); | ||
258 | } | ||
259 | |||
260 | if (i == adev->usec_timeout) | ||
261 | return -EINVAL; | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | #endif | ||
266 | |||
267 | static int iceland_smu_upload_firmware_image(struct amdgpu_device *adev) | ||
268 | { | ||
269 | const struct smc_firmware_header_v1_0 *hdr; | ||
270 | uint32_t ucode_size; | ||
271 | uint32_t ucode_start_address; | ||
272 | const uint8_t *src; | ||
273 | uint32_t val; | ||
274 | uint32_t byte_count; | ||
275 | uint32_t data; | ||
276 | unsigned long flags; | ||
277 | int i; | ||
278 | |||
279 | if (!adev->pm.fw) | ||
280 | return -EINVAL; | ||
281 | |||
282 | hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; | ||
283 | amdgpu_ucode_print_smc_hdr(&hdr->header); | ||
284 | |||
285 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
286 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | ||
287 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | ||
288 | src = (const uint8_t *) | ||
289 | (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
290 | |||
291 | if (ucode_size & 3) { | ||
292 | DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); | ||
293 | return -EINVAL; | ||
294 | } | ||
295 | |||
296 | if (ucode_size > ICELAND_SMC_SIZE) { | ||
297 | DRM_ERROR("SMC address is beyond the SMC RAM area\n"); | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | |||
301 | for (i = 0; i < adev->usec_timeout; i++) { | ||
302 | val = RREG32_SMC(ixRCU_UC_EVENTS); | ||
303 | if (REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done) == 0) | ||
304 | break; | ||
305 | udelay(1); | ||
306 | } | ||
307 | val = RREG32_SMC(ixSMC_SYSCON_MISC_CNTL); | ||
308 | WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, val | 1); | ||
309 | |||
310 | iceland_stop_smc_clock(adev); | ||
311 | iceland_reset_smc(adev); | ||
312 | |||
313 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
314 | WREG32(mmSMC_IND_INDEX_0, ucode_start_address); | ||
315 | |||
316 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
317 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); | ||
318 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
319 | |||
320 | byte_count = ucode_size; | ||
321 | while (byte_count >= 4) { | ||
322 | data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; | ||
323 | WREG32(mmSMC_IND_DATA_0, data); | ||
324 | src += 4; | ||
325 | byte_count -= 4; | ||
326 | } | ||
327 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
328 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
329 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
330 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
331 | |||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | #if 0 /* not used yet */ | ||
336 | static int iceland_read_smc_sram_dword(struct amdgpu_device *adev, | ||
337 | uint32_t smc_address, | ||
338 | uint32_t *value, | ||
339 | uint32_t limit) | ||
340 | { | ||
341 | int result; | ||
342 | unsigned long flags; | ||
343 | |||
344 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
345 | result = iceland_set_smc_sram_address(adev, smc_address, limit); | ||
346 | if (result == 0) | ||
347 | *value = RREG32(mmSMC_IND_DATA_0); | ||
348 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
349 | return result; | ||
350 | } | ||
351 | |||
352 | static int iceland_write_smc_sram_dword(struct amdgpu_device *adev, | ||
353 | uint32_t smc_address, | ||
354 | uint32_t value, | ||
355 | uint32_t limit) | ||
356 | { | ||
357 | int result; | ||
358 | unsigned long flags; | ||
359 | |||
360 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
361 | result = iceland_set_smc_sram_address(adev, smc_address, limit); | ||
362 | if (result == 0) | ||
363 | WREG32(mmSMC_IND_DATA_0, value); | ||
364 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
365 | return result; | ||
366 | } | ||
367 | |||
368 | static int iceland_smu_stop_smc(struct amdgpu_device *adev) | ||
369 | { | ||
370 | iceland_reset_smc(adev); | ||
371 | iceland_stop_smc_clock(adev); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | #endif | ||
376 | |||
377 | static int iceland_smu_start_smc(struct amdgpu_device *adev) | ||
378 | { | ||
379 | int i; | ||
380 | uint32_t val; | ||
381 | |||
382 | iceland_program_jump_on_start(adev); | ||
383 | iceland_start_smc_clock(adev); | ||
384 | iceland_start_smc(adev); | ||
385 | |||
386 | for (i = 0; i < adev->usec_timeout; i++) { | ||
387 | val = RREG32_SMC(ixFIRMWARE_FLAGS); | ||
388 | if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED) == 1) | ||
389 | break; | ||
390 | udelay(1); | ||
391 | } | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | static enum AMDGPU_UCODE_ID iceland_convert_fw_type(uint32_t fw_type) | ||
396 | { | ||
397 | switch (fw_type) { | ||
398 | case UCODE_ID_SDMA0: | ||
399 | return AMDGPU_UCODE_ID_SDMA0; | ||
400 | case UCODE_ID_SDMA1: | ||
401 | return AMDGPU_UCODE_ID_SDMA1; | ||
402 | case UCODE_ID_CP_CE: | ||
403 | return AMDGPU_UCODE_ID_CP_CE; | ||
404 | case UCODE_ID_CP_PFP: | ||
405 | return AMDGPU_UCODE_ID_CP_PFP; | ||
406 | case UCODE_ID_CP_ME: | ||
407 | return AMDGPU_UCODE_ID_CP_ME; | ||
408 | case UCODE_ID_CP_MEC: | ||
409 | case UCODE_ID_CP_MEC_JT1: | ||
410 | return AMDGPU_UCODE_ID_CP_MEC1; | ||
411 | case UCODE_ID_CP_MEC_JT2: | ||
412 | return AMDGPU_UCODE_ID_CP_MEC2; | ||
413 | case UCODE_ID_RLC_G: | ||
414 | return AMDGPU_UCODE_ID_RLC_G; | ||
415 | default: | ||
416 | DRM_ERROR("ucode type is out of range!\n"); | ||
417 | return AMDGPU_UCODE_ID_MAXIMUM; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) | ||
422 | { | ||
423 | switch (fw_type) { | ||
424 | case AMDGPU_UCODE_ID_SDMA0: | ||
425 | return UCODE_ID_SDMA0_MASK; | ||
426 | case AMDGPU_UCODE_ID_SDMA1: | ||
427 | return UCODE_ID_SDMA1_MASK; | ||
428 | case AMDGPU_UCODE_ID_CP_CE: | ||
429 | return UCODE_ID_CP_CE_MASK; | ||
430 | case AMDGPU_UCODE_ID_CP_PFP: | ||
431 | return UCODE_ID_CP_PFP_MASK; | ||
432 | case AMDGPU_UCODE_ID_CP_ME: | ||
433 | return UCODE_ID_CP_ME_MASK; | ||
434 | case AMDGPU_UCODE_ID_CP_MEC1: | ||
435 | return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; | ||
436 | case AMDGPU_UCODE_ID_CP_MEC2: | ||
437 | return UCODE_ID_CP_MEC_MASK; | ||
438 | case AMDGPU_UCODE_ID_RLC_G: | ||
439 | return UCODE_ID_RLC_G_MASK; | ||
440 | default: | ||
441 | DRM_ERROR("ucode type is out of range!\n"); | ||
442 | return 0; | ||
443 | } | ||
444 | } | ||
445 | |||
446 | static int iceland_smu_populate_single_firmware_entry(struct amdgpu_device *adev, | ||
447 | uint32_t fw_type, | ||
448 | struct SMU_Entry *entry) | ||
449 | { | ||
450 | enum AMDGPU_UCODE_ID id = iceland_convert_fw_type(fw_type); | ||
451 | struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; | ||
452 | const struct gfx_firmware_header_v1_0 *header = NULL; | ||
453 | uint64_t gpu_addr; | ||
454 | uint32_t data_size; | ||
455 | |||
456 | if (ucode->fw == NULL) | ||
457 | return -EINVAL; | ||
458 | |||
459 | gpu_addr = ucode->mc_addr; | ||
460 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
461 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | ||
462 | |||
463 | entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | ||
464 | entry->id = (uint16_t)fw_type; | ||
465 | entry->image_addr_high = upper_32_bits(gpu_addr); | ||
466 | entry->image_addr_low = lower_32_bits(gpu_addr); | ||
467 | entry->meta_data_addr_high = 0; | ||
468 | entry->meta_data_addr_low = 0; | ||
469 | entry->data_size_byte = data_size; | ||
470 | entry->num_register_entries = 0; | ||
471 | entry->flags = 0; | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static int iceland_smu_request_load_fw(struct amdgpu_device *adev) | ||
477 | { | ||
478 | struct iceland_smu_private_data *private = (struct iceland_smu_private_data *)adev->smu.priv; | ||
479 | struct SMU_DRAMData_TOC *toc; | ||
480 | uint32_t fw_to_load; | ||
481 | |||
482 | toc = (struct SMU_DRAMData_TOC *)private->header; | ||
483 | toc->num_entries = 0; | ||
484 | toc->structure_version = 1; | ||
485 | |||
486 | if (!adev->firmware.smu_load) | ||
487 | return 0; | ||
488 | |||
489 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, | ||
490 | &toc->entry[toc->num_entries++])) { | ||
491 | DRM_ERROR("Failed to get firmware entry for RLC\n"); | ||
492 | return -EINVAL; | ||
493 | } | ||
494 | |||
495 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, | ||
496 | &toc->entry[toc->num_entries++])) { | ||
497 | DRM_ERROR("Failed to get firmware entry for CE\n"); | ||
498 | return -EINVAL; | ||
499 | } | ||
500 | |||
501 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, | ||
502 | &toc->entry[toc->num_entries++])) { | ||
503 | DRM_ERROR("Failed to get firmware entry for PFP\n"); | ||
504 | return -EINVAL; | ||
505 | } | ||
506 | |||
507 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, | ||
508 | &toc->entry[toc->num_entries++])) { | ||
509 | DRM_ERROR("Failed to get firmware entry for ME\n"); | ||
510 | return -EINVAL; | ||
511 | } | ||
512 | |||
513 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, | ||
514 | &toc->entry[toc->num_entries++])) { | ||
515 | DRM_ERROR("Failed to get firmware entry for MEC\n"); | ||
516 | return -EINVAL; | ||
517 | } | ||
518 | |||
519 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, | ||
520 | &toc->entry[toc->num_entries++])) { | ||
521 | DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); | ||
522 | return -EINVAL; | ||
523 | } | ||
524 | |||
525 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, | ||
526 | &toc->entry[toc->num_entries++])) { | ||
527 | DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); | ||
528 | return -EINVAL; | ||
529 | } | ||
530 | |||
531 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, | ||
532 | &toc->entry[toc->num_entries++])) { | ||
533 | DRM_ERROR("Failed to get firmware entry for SDMA0\n"); | ||
534 | return -EINVAL; | ||
535 | } | ||
536 | |||
537 | if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, | ||
538 | &toc->entry[toc->num_entries++])) { | ||
539 | DRM_ERROR("Failed to get firmware entry for SDMA1\n"); | ||
540 | return -EINVAL; | ||
541 | } | ||
542 | |||
543 | iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); | ||
544 | iceland_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); | ||
545 | |||
546 | fw_to_load = UCODE_ID_RLC_G_MASK | | ||
547 | UCODE_ID_SDMA0_MASK | | ||
548 | UCODE_ID_SDMA1_MASK | | ||
549 | UCODE_ID_CP_CE_MASK | | ||
550 | UCODE_ID_CP_ME_MASK | | ||
551 | UCODE_ID_CP_PFP_MASK | | ||
552 | UCODE_ID_CP_MEC_MASK | | ||
553 | UCODE_ID_CP_MEC_JT1_MASK | | ||
554 | UCODE_ID_CP_MEC_JT2_MASK; | ||
555 | |||
556 | if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { | ||
557 | DRM_ERROR("Fail to request SMU load ucode\n"); | ||
558 | return -EINVAL; | ||
559 | } | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | static int iceland_smu_check_fw_load_finish(struct amdgpu_device *adev, | ||
565 | uint32_t fw_type) | ||
566 | { | ||
567 | uint32_t fw_mask = iceland_smu_get_mask_for_fw_type(fw_type); | ||
568 | int i; | ||
569 | |||
570 | for (i = 0; i < adev->usec_timeout; i++) { | ||
571 | if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_27) & fw_mask)) | ||
572 | break; | ||
573 | udelay(1); | ||
574 | } | ||
575 | |||
576 | if (i == adev->usec_timeout) { | ||
577 | DRM_ERROR("check firmware loading failed\n"); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | } | ||
583 | |||
584 | int iceland_smu_start(struct amdgpu_device *adev) | ||
585 | { | ||
586 | int result; | ||
587 | |||
588 | result = iceland_smu_upload_firmware_image(adev); | ||
589 | if (result) | ||
590 | return result; | ||
591 | result = iceland_smu_start_smc(adev); | ||
592 | if (result) | ||
593 | return result; | ||
594 | |||
595 | return iceland_smu_request_load_fw(adev); | ||
596 | } | ||
597 | |||
598 | static const struct amdgpu_smumgr_funcs iceland_smumgr_funcs = { | ||
599 | .check_fw_load_finish = iceland_smu_check_fw_load_finish, | ||
600 | .request_smu_load_fw = NULL, | ||
601 | .request_smu_specific_fw = NULL, | ||
602 | }; | ||
603 | |||
604 | int iceland_smu_init(struct amdgpu_device *adev) | ||
605 | { | ||
606 | struct iceland_smu_private_data *private; | ||
607 | uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; | ||
608 | struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; | ||
609 | uint64_t mc_addr; | ||
610 | void *toc_buf_ptr; | ||
611 | int ret; | ||
612 | |||
613 | private = kzalloc(sizeof(struct iceland_smu_private_data), GFP_KERNEL); | ||
614 | if (NULL == private) | ||
615 | return -ENOMEM; | ||
616 | |||
617 | /* allocate firmware buffers */ | ||
618 | if (adev->firmware.smu_load) | ||
619 | amdgpu_ucode_init_bo(adev); | ||
620 | |||
621 | adev->smu.priv = private; | ||
622 | adev->smu.fw_flags = 0; | ||
623 | |||
624 | /* Allocate FW image data structure and header buffer */ | ||
625 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | ||
626 | true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); | ||
627 | if (ret) { | ||
628 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | ||
629 | return -ENOMEM; | ||
630 | } | ||
631 | |||
632 | /* Retrieve GPU address for header buffer and internal buffer */ | ||
633 | ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); | ||
634 | if (ret) { | ||
635 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
636 | DRM_ERROR("Failed to reserve the TOC buffer\n"); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
640 | ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); | ||
641 | if (ret) { | ||
642 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
643 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
644 | DRM_ERROR("Failed to pin the TOC buffer\n"); | ||
645 | return -EINVAL; | ||
646 | } | ||
647 | |||
648 | ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); | ||
649 | if (ret) { | ||
650 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
651 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
652 | DRM_ERROR("Failed to map the TOC buffer\n"); | ||
653 | return -EINVAL; | ||
654 | } | ||
655 | |||
656 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
657 | private->header_addr_low = lower_32_bits(mc_addr); | ||
658 | private->header_addr_high = upper_32_bits(mc_addr); | ||
659 | private->header = toc_buf_ptr; | ||
660 | |||
661 | adev->smu.smumgr_funcs = &iceland_smumgr_funcs; | ||
662 | |||
663 | return 0; | ||
664 | } | ||
665 | |||
666 | int iceland_smu_fini(struct amdgpu_device *adev) | ||
667 | { | ||
668 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
669 | kfree(adev->smu.priv); | ||
670 | adev->smu.priv = NULL; | ||
671 | if (adev->firmware.fw_buf) | ||
672 | amdgpu_ucode_fini_bo(adev); | ||
673 | |||
674 | return 0; | ||
675 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h new file mode 100644 index 000000000000..1e0769e110fa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef ICELAND_SMUMGR_H | ||
25 | #define ICELAND_SMUMGR_H | ||
26 | |||
27 | #include "ppsmc.h" | ||
28 | |||
29 | extern int iceland_smu_init(struct amdgpu_device *adev); | ||
30 | extern int iceland_smu_fini(struct amdgpu_device *adev); | ||
31 | extern int iceland_smu_start(struct amdgpu_device *adev); | ||
32 | |||
33 | struct iceland_smu_private_data | ||
34 | { | ||
35 | uint8_t *header; | ||
36 | uint8_t *mec_image; | ||
37 | uint32_t header_addr_high; | ||
38 | uint32_t header_addr_low; | ||
39 | }; | ||
40 | |||
41 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c new file mode 100644 index 000000000000..a83029d548c1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
@@ -0,0 +1,1447 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | #include "amdgpu_ucode.h" | ||
28 | #include "amdgpu_trace.h" | ||
29 | #include "vi.h" | ||
30 | #include "vid.h" | ||
31 | |||
32 | #include "oss/oss_2_4_d.h" | ||
33 | #include "oss/oss_2_4_sh_mask.h" | ||
34 | |||
35 | #include "gmc/gmc_8_1_d.h" | ||
36 | #include "gmc/gmc_8_1_sh_mask.h" | ||
37 | |||
38 | #include "gca/gfx_8_0_d.h" | ||
39 | #include "gca/gfx_8_0_sh_mask.h" | ||
40 | |||
41 | #include "bif/bif_5_0_d.h" | ||
42 | #include "bif/bif_5_0_sh_mask.h" | ||
43 | |||
44 | #include "iceland_sdma_pkt_open.h" | ||
45 | |||
46 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev); | ||
47 | static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev); | ||
48 | static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev); | ||
49 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev); | ||
50 | |||
51 | MODULE_FIRMWARE("radeon/topaz_sdma.bin"); | ||
52 | MODULE_FIRMWARE("radeon/topaz_sdma1.bin"); | ||
53 | |||
54 | static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | ||
55 | { | ||
56 | SDMA0_REGISTER_OFFSET, | ||
57 | SDMA1_REGISTER_OFFSET | ||
58 | }; | ||
59 | |||
60 | static const u32 golden_settings_iceland_a11[] = | ||
61 | { | ||
62 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
63 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | ||
64 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
65 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | ||
66 | }; | ||
67 | |||
68 | static const u32 iceland_mgcg_cgcg_init[] = | ||
69 | { | ||
70 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | ||
71 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | ||
72 | }; | ||
73 | |||
74 | /* | ||
75 | * sDMA - System DMA | ||
76 | * Starting with CIK, the GPU has new asynchronous | ||
77 | * DMA engines. These engines are used for compute | ||
78 | * and gfx. There are two DMA engines (SDMA0, SDMA1) | ||
79 | * and each one supports 1 ring buffer used for gfx | ||
80 | * and 2 queues used for compute. | ||
81 | * | ||
82 | * The programming model is very similar to the CP | ||
83 | * (ring buffer, IBs, etc.), but sDMA has it's own | ||
84 | * packet format that is different from the PM4 format | ||
85 | * used by the CP. sDMA supports copying data, writing | ||
86 | * embedded data, solid fills, and a number of other | ||
87 | * things. It also has support for tiling/detiling of | ||
88 | * buffers. | ||
89 | */ | ||
90 | |||
91 | static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | ||
92 | { | ||
93 | switch (adev->asic_type) { | ||
94 | case CHIP_TOPAZ: | ||
95 | amdgpu_program_register_sequence(adev, | ||
96 | iceland_mgcg_cgcg_init, | ||
97 | (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); | ||
98 | amdgpu_program_register_sequence(adev, | ||
99 | golden_settings_iceland_a11, | ||
100 | (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); | ||
101 | break; | ||
102 | default: | ||
103 | break; | ||
104 | } | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * sdma_v2_4_init_microcode - load ucode images from disk | ||
109 | * | ||
110 | * @adev: amdgpu_device pointer | ||
111 | * | ||
112 | * Use the firmware interface to load the ucode images into | ||
113 | * the driver (not loaded into hw). | ||
114 | * Returns 0 on success, error on failure. | ||
115 | */ | ||
116 | static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) | ||
117 | { | ||
118 | const char *chip_name; | ||
119 | char fw_name[30]; | ||
120 | int err, i; | ||
121 | struct amdgpu_firmware_info *info = NULL; | ||
122 | const struct common_firmware_header *header = NULL; | ||
123 | |||
124 | DRM_DEBUG("\n"); | ||
125 | |||
126 | switch (adev->asic_type) { | ||
127 | case CHIP_TOPAZ: | ||
128 | chip_name = "topaz"; | ||
129 | break; | ||
130 | default: BUG(); | ||
131 | } | ||
132 | |||
133 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
134 | if (i == 0) | ||
135 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | ||
136 | else | ||
137 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); | ||
138 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | ||
139 | if (err) | ||
140 | goto out; | ||
141 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | ||
142 | if (err) | ||
143 | goto out; | ||
144 | |||
145 | if (adev->firmware.smu_load) { | ||
146 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | ||
147 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | ||
148 | info->fw = adev->sdma[i].fw; | ||
149 | header = (const struct common_firmware_header *)info->fw->data; | ||
150 | adev->firmware.fw_size += | ||
151 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | out: | ||
156 | if (err) { | ||
157 | printk(KERN_ERR | ||
158 | "sdma_v2_4: Failed to load firmware \"%s\"\n", | ||
159 | fw_name); | ||
160 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
161 | release_firmware(adev->sdma[i].fw); | ||
162 | adev->sdma[i].fw = NULL; | ||
163 | } | ||
164 | } | ||
165 | return err; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * sdma_v2_4_ring_get_rptr - get the current read pointer | ||
170 | * | ||
171 | * @ring: amdgpu ring pointer | ||
172 | * | ||
173 | * Get the current rptr from the hardware (VI+). | ||
174 | */ | ||
175 | static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) | ||
176 | { | ||
177 | u32 rptr; | ||
178 | |||
179 | /* XXX check if swapping is necessary on BE */ | ||
180 | rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; | ||
181 | |||
182 | return rptr; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * sdma_v2_4_ring_get_wptr - get the current write pointer | ||
187 | * | ||
188 | * @ring: amdgpu ring pointer | ||
189 | * | ||
190 | * Get the current wptr from the hardware (VI+). | ||
191 | */ | ||
192 | static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) | ||
193 | { | ||
194 | struct amdgpu_device *adev = ring->adev; | ||
195 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | ||
196 | u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; | ||
197 | |||
198 | return wptr; | ||
199 | } | ||
200 | |||
201 | /** | ||
202 | * sdma_v2_4_ring_set_wptr - commit the write pointer | ||
203 | * | ||
204 | * @ring: amdgpu ring pointer | ||
205 | * | ||
206 | * Write the wptr back to the hardware (VI+). | ||
207 | */ | ||
208 | static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) | ||
209 | { | ||
210 | struct amdgpu_device *adev = ring->adev; | ||
211 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | ||
212 | |||
213 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); | ||
214 | } | ||
215 | |||
216 | static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *); | ||
217 | |||
218 | /** | ||
219 | * sdma_v2_4_ring_emit_ib - Schedule an IB on the DMA engine | ||
220 | * | ||
221 | * @ring: amdgpu ring pointer | ||
222 | * @ib: IB object to schedule | ||
223 | * | ||
224 | * Schedule an IB in the DMA ring (VI). | ||
225 | */ | ||
226 | static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, | ||
227 | struct amdgpu_ib *ib) | ||
228 | { | ||
229 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | ||
230 | u32 next_rptr = ring->wptr + 5; | ||
231 | |||
232 | if (ib->flush_hdp_writefifo) | ||
233 | next_rptr += 6; | ||
234 | |||
235 | while ((next_rptr & 7) != 2) | ||
236 | next_rptr++; | ||
237 | |||
238 | next_rptr += 6; | ||
239 | |||
240 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
241 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||
242 | amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); | ||
243 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); | ||
244 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||
245 | amdgpu_ring_write(ring, next_rptr); | ||
246 | |||
247 | if (ib->flush_hdp_writefifo) { | ||
248 | /* flush HDP */ | ||
249 | sdma_v2_4_hdp_flush_ring_emit(ring); | ||
250 | } | ||
251 | |||
252 | /* IB packet must end on a 8 DW boundary */ | ||
253 | while ((ring->wptr & 7) != 2) | ||
254 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); | ||
255 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | | ||
256 | SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); | ||
257 | /* base must be 32 byte aligned */ | ||
258 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); | ||
259 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
260 | amdgpu_ring_write(ring, ib->length_dw); | ||
261 | amdgpu_ring_write(ring, 0); | ||
262 | amdgpu_ring_write(ring, 0); | ||
263 | |||
264 | } | ||
265 | |||
266 | /** | ||
267 | * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring | ||
268 | * | ||
269 | * @ring: amdgpu ring pointer | ||
270 | * | ||
271 | * Emit an hdp flush packet on the requested DMA ring. | ||
272 | */ | ||
273 | static void sdma_v2_4_hdp_flush_ring_emit(struct amdgpu_ring *ring) | ||
274 | { | ||
275 | u32 ref_and_mask = 0; | ||
276 | |||
277 | if (ring == &ring->adev->sdma[0].ring) | ||
278 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); | ||
279 | else | ||
280 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); | ||
281 | |||
282 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | ||
283 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | | ||
284 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ | ||
285 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); | ||
286 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); | ||
287 | amdgpu_ring_write(ring, ref_and_mask); /* reference */ | ||
288 | amdgpu_ring_write(ring, ref_and_mask); /* mask */ | ||
289 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | ||
290 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring | ||
295 | * | ||
296 | * @ring: amdgpu ring pointer | ||
297 | * @fence: amdgpu fence object | ||
298 | * | ||
299 | * Add a DMA fence packet to the ring to write | ||
300 | * the fence seq number and DMA trap packet to generate | ||
301 | * an interrupt if needed (VI). | ||
302 | */ | ||
303 | static void sdma_v2_4_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
304 | bool write64bits) | ||
305 | { | ||
306 | /* write the fence */ | ||
307 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | ||
308 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
309 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
310 | amdgpu_ring_write(ring, lower_32_bits(seq)); | ||
311 | |||
312 | /* optionally write high bits as well */ | ||
313 | if (write64bits) { | ||
314 | addr += 4; | ||
315 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | ||
316 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
317 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
318 | amdgpu_ring_write(ring, upper_32_bits(seq)); | ||
319 | } | ||
320 | |||
321 | /* generate an interrupt */ | ||
322 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); | ||
323 | amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); | ||
324 | } | ||
325 | |||
326 | /** | ||
327 | * sdma_v2_4_ring_emit_semaphore - emit a semaphore on the dma ring | ||
328 | * | ||
329 | * @ring: amdgpu_ring structure holding ring information | ||
330 | * @semaphore: amdgpu semaphore object | ||
331 | * @emit_wait: wait or signal semaphore | ||
332 | * | ||
333 | * Add a DMA semaphore packet to the ring wait on or signal | ||
334 | * other rings (VI). | ||
335 | */ | ||
336 | static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
337 | struct amdgpu_semaphore *semaphore, | ||
338 | bool emit_wait) | ||
339 | { | ||
340 | u64 addr = semaphore->gpu_addr; | ||
341 | u32 sig = emit_wait ? 0 : 1; | ||
342 | |||
343 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | | ||
344 | SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); | ||
345 | amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); | ||
346 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
347 | |||
348 | return true; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * sdma_v2_4_gfx_stop - stop the gfx async dma engines | ||
353 | * | ||
354 | * @adev: amdgpu_device pointer | ||
355 | * | ||
356 | * Stop the gfx async dma ring buffers (VI). | ||
357 | */ | ||
358 | static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) | ||
359 | { | ||
360 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | ||
361 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | ||
362 | u32 rb_cntl, ib_cntl; | ||
363 | int i; | ||
364 | |||
365 | if ((adev->mman.buffer_funcs_ring == sdma0) || | ||
366 | (adev->mman.buffer_funcs_ring == sdma1)) | ||
367 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | ||
368 | |||
369 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
370 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | ||
371 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | ||
372 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
373 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | ||
374 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | ||
375 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | ||
376 | } | ||
377 | sdma0->ready = false; | ||
378 | sdma1->ready = false; | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * sdma_v2_4_rlc_stop - stop the compute async dma engines | ||
383 | * | ||
384 | * @adev: amdgpu_device pointer | ||
385 | * | ||
386 | * Stop the compute async dma queues (VI). | ||
387 | */ | ||
388 | static void sdma_v2_4_rlc_stop(struct amdgpu_device *adev) | ||
389 | { | ||
390 | /* XXX todo */ | ||
391 | } | ||
392 | |||
393 | /** | ||
394 | * sdma_v2_4_enable - stop the async dma engines | ||
395 | * | ||
396 | * @adev: amdgpu_device pointer | ||
397 | * @enable: enable/disable the DMA MEs. | ||
398 | * | ||
399 | * Halt or unhalt the async dma engines (VI). | ||
400 | */ | ||
401 | static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) | ||
402 | { | ||
403 | u32 f32_cntl; | ||
404 | int i; | ||
405 | |||
406 | if (enable == false) { | ||
407 | sdma_v2_4_gfx_stop(adev); | ||
408 | sdma_v2_4_rlc_stop(adev); | ||
409 | } | ||
410 | |||
411 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
412 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | ||
413 | if (enable) | ||
414 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); | ||
415 | else | ||
416 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); | ||
417 | WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * sdma_v2_4_gfx_resume - setup and start the async dma engines | ||
423 | * | ||
424 | * @adev: amdgpu_device pointer | ||
425 | * | ||
426 | * Set up the gfx DMA ring buffers and enable them (VI). | ||
427 | * Returns 0 for success, error for failure. | ||
428 | */ | ||
429 | static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | ||
430 | { | ||
431 | struct amdgpu_ring *ring; | ||
432 | u32 rb_cntl, ib_cntl; | ||
433 | u32 rb_bufsz; | ||
434 | u32 wb_offset; | ||
435 | int i, j, r; | ||
436 | |||
437 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
438 | ring = &adev->sdma[i].ring; | ||
439 | wb_offset = (ring->rptr_offs * 4); | ||
440 | |||
441 | mutex_lock(&adev->srbm_mutex); | ||
442 | for (j = 0; j < 16; j++) { | ||
443 | vi_srbm_select(adev, 0, 0, 0, j); | ||
444 | /* SDMA GFX */ | ||
445 | WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); | ||
446 | WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); | ||
447 | } | ||
448 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
449 | mutex_unlock(&adev->srbm_mutex); | ||
450 | |||
451 | WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | ||
452 | |||
453 | /* Set ring buffer size in dwords */ | ||
454 | rb_bufsz = order_base_2(ring->ring_size / 4); | ||
455 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | ||
456 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); | ||
457 | #ifdef __BIG_ENDIAN | ||
458 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); | ||
459 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, | ||
460 | RPTR_WRITEBACK_SWAP_ENABLE, 1); | ||
461 | #endif | ||
462 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
463 | |||
464 | /* Initialize the ring buffer's read and write pointers */ | ||
465 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | ||
466 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | ||
467 | |||
468 | /* set the wb address whether it's enabled or not */ | ||
469 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | ||
470 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); | ||
471 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], | ||
472 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); | ||
473 | |||
474 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); | ||
475 | |||
476 | WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | ||
477 | WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); | ||
478 | |||
479 | ring->wptr = 0; | ||
480 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); | ||
481 | |||
482 | /* enable DMA RB */ | ||
483 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | ||
484 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
485 | |||
486 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | ||
487 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); | ||
488 | #ifdef __BIG_ENDIAN | ||
489 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); | ||
490 | #endif | ||
491 | /* enable DMA IBs */ | ||
492 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | ||
493 | |||
494 | ring->ready = true; | ||
495 | |||
496 | r = amdgpu_ring_test_ring(ring); | ||
497 | if (r) { | ||
498 | ring->ready = false; | ||
499 | return r; | ||
500 | } | ||
501 | |||
502 | if (adev->mman.buffer_funcs_ring == ring) | ||
503 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); | ||
504 | } | ||
505 | |||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * sdma_v2_4_rlc_resume - setup and start the async dma engines | ||
511 | * | ||
512 | * @adev: amdgpu_device pointer | ||
513 | * | ||
514 | * Set up the compute DMA queues and enable them (VI). | ||
515 | * Returns 0 for success, error for failure. | ||
516 | */ | ||
517 | static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) | ||
518 | { | ||
519 | /* XXX todo */ | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * sdma_v2_4_load_microcode - load the sDMA ME ucode | ||
525 | * | ||
526 | * @adev: amdgpu_device pointer | ||
527 | * | ||
528 | * Loads the sDMA0/1 ucode. | ||
529 | * Returns 0 for success, -EINVAL if the ucode is not available. | ||
530 | */ | ||
531 | static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) | ||
532 | { | ||
533 | const struct sdma_firmware_header_v1_0 *hdr; | ||
534 | const __le32 *fw_data; | ||
535 | u32 fw_size; | ||
536 | int i, j; | ||
537 | bool smc_loads_fw = false; /* XXX fix me */ | ||
538 | |||
539 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
540 | return -EINVAL; | ||
541 | |||
542 | /* halt the MEs */ | ||
543 | sdma_v2_4_enable(adev, false); | ||
544 | |||
545 | if (smc_loads_fw) { | ||
546 | /* XXX query SMC for fw load complete */ | ||
547 | } else { | ||
548 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
549 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | ||
550 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | ||
551 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
552 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
553 | |||
554 | fw_data = (const __le32 *) | ||
555 | (adev->sdma[i].fw->data + | ||
556 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
557 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | ||
558 | for (j = 0; j < fw_size; j++) | ||
559 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | ||
560 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | ||
561 | } | ||
562 | } | ||
563 | |||
564 | return 0; | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * sdma_v2_4_start - setup and start the async dma engines | ||
569 | * | ||
570 | * @adev: amdgpu_device pointer | ||
571 | * | ||
572 | * Set up the DMA engines and enable them (VI). | ||
573 | * Returns 0 for success, error for failure. | ||
574 | */ | ||
575 | static int sdma_v2_4_start(struct amdgpu_device *adev) | ||
576 | { | ||
577 | int r; | ||
578 | |||
579 | if (!adev->firmware.smu_load) { | ||
580 | r = sdma_v2_4_load_microcode(adev); | ||
581 | if (r) | ||
582 | return r; | ||
583 | } else { | ||
584 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
585 | AMDGPU_UCODE_ID_SDMA0); | ||
586 | if (r) | ||
587 | return -EINVAL; | ||
588 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
589 | AMDGPU_UCODE_ID_SDMA1); | ||
590 | if (r) | ||
591 | return -EINVAL; | ||
592 | } | ||
593 | |||
594 | /* unhalt the MEs */ | ||
595 | sdma_v2_4_enable(adev, true); | ||
596 | |||
597 | /* start the gfx rings and rlc compute queues */ | ||
598 | r = sdma_v2_4_gfx_resume(adev); | ||
599 | if (r) | ||
600 | return r; | ||
601 | r = sdma_v2_4_rlc_resume(adev); | ||
602 | if (r) | ||
603 | return r; | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * sdma_v2_4_ring_test_ring - simple async dma engine test | ||
610 | * | ||
611 | * @ring: amdgpu_ring structure holding ring information | ||
612 | * | ||
613 | * Test the DMA engine by writing using it to write an | ||
614 | * value to memory. (VI). | ||
615 | * Returns 0 for success, error for failure. | ||
616 | */ | ||
617 | static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) | ||
618 | { | ||
619 | struct amdgpu_device *adev = ring->adev; | ||
620 | unsigned i; | ||
621 | unsigned index; | ||
622 | int r; | ||
623 | u32 tmp; | ||
624 | u64 gpu_addr; | ||
625 | |||
626 | r = amdgpu_wb_get(adev, &index); | ||
627 | if (r) { | ||
628 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
629 | return r; | ||
630 | } | ||
631 | |||
632 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||
633 | tmp = 0xCAFEDEAD; | ||
634 | adev->wb.wb[index] = cpu_to_le32(tmp); | ||
635 | |||
636 | r = amdgpu_ring_lock(ring, 5); | ||
637 | if (r) { | ||
638 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | ||
639 | amdgpu_wb_free(adev, index); | ||
640 | return r; | ||
641 | } | ||
642 | |||
643 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
644 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||
645 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | ||
646 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | ||
647 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||
648 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
649 | amdgpu_ring_unlock_commit(ring); | ||
650 | |||
651 | for (i = 0; i < adev->usec_timeout; i++) { | ||
652 | tmp = le32_to_cpu(adev->wb.wb[index]); | ||
653 | if (tmp == 0xDEADBEEF) | ||
654 | break; | ||
655 | DRM_UDELAY(1); | ||
656 | } | ||
657 | |||
658 | if (i < adev->usec_timeout) { | ||
659 | DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); | ||
660 | } else { | ||
661 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
662 | ring->idx, tmp); | ||
663 | r = -EINVAL; | ||
664 | } | ||
665 | amdgpu_wb_free(adev, index); | ||
666 | |||
667 | return r; | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * sdma_v2_4_ring_test_ib - test an IB on the DMA engine | ||
672 | * | ||
673 | * @ring: amdgpu_ring structure holding ring information | ||
674 | * | ||
675 | * Test a simple IB in the DMA ring (VI). | ||
676 | * Returns 0 on success, error on failure. | ||
677 | */ | ||
678 | static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) | ||
679 | { | ||
680 | struct amdgpu_device *adev = ring->adev; | ||
681 | struct amdgpu_ib ib; | ||
682 | unsigned i; | ||
683 | unsigned index; | ||
684 | int r; | ||
685 | u32 tmp = 0; | ||
686 | u64 gpu_addr; | ||
687 | |||
688 | r = amdgpu_wb_get(adev, &index); | ||
689 | if (r) { | ||
690 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
691 | return r; | ||
692 | } | ||
693 | |||
694 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||
695 | tmp = 0xCAFEDEAD; | ||
696 | adev->wb.wb[index] = cpu_to_le32(tmp); | ||
697 | |||
698 | r = amdgpu_ib_get(ring, NULL, 256, &ib); | ||
699 | if (r) { | ||
700 | amdgpu_wb_free(adev, index); | ||
701 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
702 | return r; | ||
703 | } | ||
704 | |||
705 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
706 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | ||
707 | ib.ptr[1] = lower_32_bits(gpu_addr); | ||
708 | ib.ptr[2] = upper_32_bits(gpu_addr); | ||
709 | ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); | ||
710 | ib.ptr[4] = 0xDEADBEEF; | ||
711 | ib.ptr[5] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | ||
712 | ib.ptr[6] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | ||
713 | ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | ||
714 | ib.length_dw = 8; | ||
715 | |||
716 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
717 | if (r) { | ||
718 | amdgpu_ib_free(adev, &ib); | ||
719 | amdgpu_wb_free(adev, index); | ||
720 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
721 | return r; | ||
722 | } | ||
723 | r = amdgpu_fence_wait(ib.fence, false); | ||
724 | if (r) { | ||
725 | amdgpu_ib_free(adev, &ib); | ||
726 | amdgpu_wb_free(adev, index); | ||
727 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
728 | return r; | ||
729 | } | ||
730 | for (i = 0; i < adev->usec_timeout; i++) { | ||
731 | tmp = le32_to_cpu(adev->wb.wb[index]); | ||
732 | if (tmp == 0xDEADBEEF) | ||
733 | break; | ||
734 | DRM_UDELAY(1); | ||
735 | } | ||
736 | if (i < adev->usec_timeout) { | ||
737 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", | ||
738 | ib.fence->ring->idx, i); | ||
739 | } else { | ||
740 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
741 | r = -EINVAL; | ||
742 | } | ||
743 | amdgpu_ib_free(adev, &ib); | ||
744 | amdgpu_wb_free(adev, index); | ||
745 | return r; | ||
746 | } | ||
747 | |||
748 | /** | ||
749 | * sdma_v2_4_vm_copy_pte - update PTEs by copying them from the GART | ||
750 | * | ||
751 | * @ib: indirect buffer to fill with commands | ||
752 | * @pe: addr of the page entry | ||
753 | * @src: src addr to copy from | ||
754 | * @count: number of page entries to update | ||
755 | * | ||
756 | * Update PTEs by copying them from the GART using sDMA (CIK). | ||
757 | */ | ||
758 | static void sdma_v2_4_vm_copy_pte(struct amdgpu_ib *ib, | ||
759 | uint64_t pe, uint64_t src, | ||
760 | unsigned count) | ||
761 | { | ||
762 | while (count) { | ||
763 | unsigned bytes = count * 8; | ||
764 | if (bytes > 0x1FFFF8) | ||
765 | bytes = 0x1FFFF8; | ||
766 | |||
767 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | | ||
768 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | ||
769 | ib->ptr[ib->length_dw++] = bytes; | ||
770 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||
771 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | ||
772 | ib->ptr[ib->length_dw++] = upper_32_bits(src); | ||
773 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | ||
774 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
775 | |||
776 | pe += bytes; | ||
777 | src += bytes; | ||
778 | count -= bytes / 8; | ||
779 | } | ||
780 | } | ||
781 | |||
782 | /** | ||
783 | * sdma_v2_4_vm_write_pte - update PTEs by writing them manually | ||
784 | * | ||
785 | * @ib: indirect buffer to fill with commands | ||
786 | * @pe: addr of the page entry | ||
787 | * @addr: dst addr to write into pe | ||
788 | * @count: number of page entries to update | ||
789 | * @incr: increase next addr by incr bytes | ||
790 | * @flags: access flags | ||
791 | * | ||
792 | * Update PTEs by writing them manually using sDMA (CIK). | ||
793 | */ | ||
794 | static void sdma_v2_4_vm_write_pte(struct amdgpu_ib *ib, | ||
795 | uint64_t pe, | ||
796 | uint64_t addr, unsigned count, | ||
797 | uint32_t incr, uint32_t flags) | ||
798 | { | ||
799 | uint64_t value; | ||
800 | unsigned ndw; | ||
801 | |||
802 | while (count) { | ||
803 | ndw = count * 2; | ||
804 | if (ndw > 0xFFFFE) | ||
805 | ndw = 0xFFFFE; | ||
806 | |||
807 | /* for non-physically contiguous pages (system) */ | ||
808 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
809 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | ||
810 | ib->ptr[ib->length_dw++] = pe; | ||
811 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
812 | ib->ptr[ib->length_dw++] = ndw; | ||
813 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | ||
814 | if (flags & AMDGPU_PTE_SYSTEM) { | ||
815 | value = amdgpu_vm_map_gart(ib->ring->adev, addr); | ||
816 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
817 | } else if (flags & AMDGPU_PTE_VALID) { | ||
818 | value = addr; | ||
819 | } else { | ||
820 | value = 0; | ||
821 | } | ||
822 | addr += incr; | ||
823 | value |= flags; | ||
824 | ib->ptr[ib->length_dw++] = value; | ||
825 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
826 | } | ||
827 | } | ||
828 | } | ||
829 | |||
830 | /** | ||
831 | * sdma_v2_4_vm_set_pte_pde - update the page tables using sDMA | ||
832 | * | ||
833 | * @ib: indirect buffer to fill with commands | ||
834 | * @pe: addr of the page entry | ||
835 | * @addr: dst addr to write into pe | ||
836 | * @count: number of page entries to update | ||
837 | * @incr: increase next addr by incr bytes | ||
838 | * @flags: access flags | ||
839 | * | ||
840 | * Update the page tables using sDMA (CIK). | ||
841 | */ | ||
842 | static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, | ||
843 | uint64_t pe, | ||
844 | uint64_t addr, unsigned count, | ||
845 | uint32_t incr, uint32_t flags) | ||
846 | { | ||
847 | uint64_t value; | ||
848 | unsigned ndw; | ||
849 | |||
850 | while (count) { | ||
851 | ndw = count; | ||
852 | if (ndw > 0x7FFFF) | ||
853 | ndw = 0x7FFFF; | ||
854 | |||
855 | if (flags & AMDGPU_PTE_VALID) | ||
856 | value = addr; | ||
857 | else | ||
858 | value = 0; | ||
859 | |||
860 | /* for physically contiguous pages (vram) */ | ||
861 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); | ||
862 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | ||
863 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
864 | ib->ptr[ib->length_dw++] = flags; /* mask */ | ||
865 | ib->ptr[ib->length_dw++] = 0; | ||
866 | ib->ptr[ib->length_dw++] = value; /* value */ | ||
867 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
868 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | ||
869 | ib->ptr[ib->length_dw++] = 0; | ||
870 | ib->ptr[ib->length_dw++] = ndw; /* number of entries */ | ||
871 | |||
872 | pe += ndw * 8; | ||
873 | addr += ndw * incr; | ||
874 | count -= ndw; | ||
875 | } | ||
876 | } | ||
877 | |||
878 | /** | ||
879 | * sdma_v2_4_vm_pad_ib - pad the IB to the required number of dw | ||
880 | * | ||
881 | * @ib: indirect buffer to fill with padding | ||
882 | * | ||
883 | */ | ||
884 | static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) | ||
885 | { | ||
886 | while (ib->length_dw & 0x7) | ||
887 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | ||
888 | } | ||
889 | |||
890 | /** | ||
891 | * sdma_v2_4_ring_emit_vm_flush - cik vm flush using sDMA | ||
892 | * | ||
893 | * @ring: amdgpu_ring pointer | ||
894 | * @vm: amdgpu_vm pointer | ||
895 | * | ||
896 | * Update the page table base and flush the VM TLB | ||
897 | * using sDMA (VI). | ||
898 | */ | ||
899 | static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||
900 | unsigned vm_id, uint64_t pd_addr) | ||
901 | { | ||
902 | u32 srbm_gfx_cntl = 0; | ||
903 | |||
904 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
905 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
906 | if (vm_id < 8) { | ||
907 | amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); | ||
908 | } else { | ||
909 | amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); | ||
910 | } | ||
911 | amdgpu_ring_write(ring, pd_addr >> 12); | ||
912 | |||
913 | /* update SH_MEM_* regs */ | ||
914 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); | ||
915 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
916 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
917 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
918 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
919 | |||
920 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
921 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
922 | amdgpu_ring_write(ring, mmSH_MEM_BASES); | ||
923 | amdgpu_ring_write(ring, 0); | ||
924 | |||
925 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
926 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
927 | amdgpu_ring_write(ring, mmSH_MEM_CONFIG); | ||
928 | amdgpu_ring_write(ring, 0); | ||
929 | |||
930 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
931 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
932 | amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE); | ||
933 | amdgpu_ring_write(ring, 1); | ||
934 | |||
935 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
936 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
937 | amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT); | ||
938 | amdgpu_ring_write(ring, 0); | ||
939 | |||
940 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); | ||
941 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
942 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
943 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
944 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
945 | |||
946 | |||
947 | /* flush TLB */ | ||
948 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
949 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
950 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); | ||
951 | amdgpu_ring_write(ring, 1 << vm_id); | ||
952 | |||
953 | /* wait for flush */ | ||
954 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | ||
955 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | | ||
956 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ | ||
957 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); | ||
958 | amdgpu_ring_write(ring, 0); | ||
959 | amdgpu_ring_write(ring, 0); /* reference */ | ||
960 | amdgpu_ring_write(ring, 0); /* mask */ | ||
961 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | ||
962 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | ||
963 | } | ||
964 | |||
965 | static int sdma_v2_4_early_init(struct amdgpu_device *adev) | ||
966 | { | ||
967 | sdma_v2_4_set_ring_funcs(adev); | ||
968 | sdma_v2_4_set_buffer_funcs(adev); | ||
969 | sdma_v2_4_set_vm_pte_funcs(adev); | ||
970 | sdma_v2_4_set_irq_funcs(adev); | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | static int sdma_v2_4_sw_init(struct amdgpu_device *adev) | ||
976 | { | ||
977 | struct amdgpu_ring *ring; | ||
978 | int r; | ||
979 | |||
980 | /* SDMA trap event */ | ||
981 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | ||
982 | if (r) | ||
983 | return r; | ||
984 | |||
985 | /* SDMA Privileged inst */ | ||
986 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | ||
987 | if (r) | ||
988 | return r; | ||
989 | |||
990 | /* SDMA Privileged inst */ | ||
991 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | ||
992 | if (r) | ||
993 | return r; | ||
994 | |||
995 | r = sdma_v2_4_init_microcode(adev); | ||
996 | if (r) { | ||
997 | DRM_ERROR("Failed to load sdma firmware!\n"); | ||
998 | return r; | ||
999 | } | ||
1000 | |||
1001 | ring = &adev->sdma[0].ring; | ||
1002 | ring->ring_obj = NULL; | ||
1003 | ring->use_doorbell = false; | ||
1004 | |||
1005 | ring = &adev->sdma[1].ring; | ||
1006 | ring->ring_obj = NULL; | ||
1007 | ring->use_doorbell = false; | ||
1008 | |||
1009 | ring = &adev->sdma[0].ring; | ||
1010 | sprintf(ring->name, "sdma0"); | ||
1011 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
1012 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1013 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | ||
1014 | AMDGPU_RING_TYPE_SDMA); | ||
1015 | if (r) | ||
1016 | return r; | ||
1017 | |||
1018 | ring = &adev->sdma[1].ring; | ||
1019 | sprintf(ring->name, "sdma1"); | ||
1020 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
1021 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1022 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | ||
1023 | AMDGPU_RING_TYPE_SDMA); | ||
1024 | if (r) | ||
1025 | return r; | ||
1026 | |||
1027 | return r; | ||
1028 | } | ||
1029 | |||
1030 | static int sdma_v2_4_sw_fini(struct amdgpu_device *adev) | ||
1031 | { | ||
1032 | amdgpu_ring_fini(&adev->sdma[0].ring); | ||
1033 | amdgpu_ring_fini(&adev->sdma[1].ring); | ||
1034 | |||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static int sdma_v2_4_hw_init(struct amdgpu_device *adev) | ||
1039 | { | ||
1040 | int r; | ||
1041 | |||
1042 | sdma_v2_4_init_golden_registers(adev); | ||
1043 | |||
1044 | r = sdma_v2_4_start(adev); | ||
1045 | if (r) | ||
1046 | return r; | ||
1047 | |||
1048 | return r; | ||
1049 | } | ||
1050 | |||
1051 | static int sdma_v2_4_hw_fini(struct amdgpu_device *adev) | ||
1052 | { | ||
1053 | sdma_v2_4_enable(adev, false); | ||
1054 | |||
1055 | return 0; | ||
1056 | } | ||
1057 | |||
1058 | static int sdma_v2_4_suspend(struct amdgpu_device *adev) | ||
1059 | { | ||
1060 | |||
1061 | return sdma_v2_4_hw_fini(adev); | ||
1062 | } | ||
1063 | |||
1064 | static int sdma_v2_4_resume(struct amdgpu_device *adev) | ||
1065 | { | ||
1066 | |||
1067 | return sdma_v2_4_hw_init(adev); | ||
1068 | } | ||
1069 | |||
1070 | static bool sdma_v2_4_is_idle(struct amdgpu_device *adev) | ||
1071 | { | ||
1072 | u32 tmp = RREG32(mmSRBM_STATUS2); | ||
1073 | |||
1074 | if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | | ||
1075 | SRBM_STATUS2__SDMA1_BUSY_MASK)) | ||
1076 | return false; | ||
1077 | |||
1078 | return true; | ||
1079 | } | ||
1080 | |||
1081 | static int sdma_v2_4_wait_for_idle(struct amdgpu_device *adev) | ||
1082 | { | ||
1083 | unsigned i; | ||
1084 | u32 tmp; | ||
1085 | |||
1086 | for (i = 0; i < adev->usec_timeout; i++) { | ||
1087 | tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | | ||
1088 | SRBM_STATUS2__SDMA1_BUSY_MASK); | ||
1089 | |||
1090 | if (!tmp) | ||
1091 | return 0; | ||
1092 | udelay(1); | ||
1093 | } | ||
1094 | return -ETIMEDOUT; | ||
1095 | } | ||
1096 | |||
1097 | static void sdma_v2_4_print_status(struct amdgpu_device *adev) | ||
1098 | { | ||
1099 | int i, j; | ||
1100 | |||
1101 | dev_info(adev->dev, "VI SDMA registers\n"); | ||
1102 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
1103 | RREG32(mmSRBM_STATUS2)); | ||
1104 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
1105 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | ||
1106 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | ||
1107 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", | ||
1108 | i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); | ||
1109 | dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", | ||
1110 | i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); | ||
1111 | dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", | ||
1112 | i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); | ||
1113 | dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", | ||
1114 | i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); | ||
1115 | dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", | ||
1116 | i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); | ||
1117 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", | ||
1118 | i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); | ||
1119 | dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", | ||
1120 | i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); | ||
1121 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", | ||
1122 | i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); | ||
1123 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", | ||
1124 | i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); | ||
1125 | dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", | ||
1126 | i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); | ||
1127 | dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", | ||
1128 | i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); | ||
1129 | mutex_lock(&adev->srbm_mutex); | ||
1130 | for (j = 0; j < 16; j++) { | ||
1131 | vi_srbm_select(adev, 0, 0, 0, j); | ||
1132 | dev_info(adev->dev, " VM %d:\n", j); | ||
1133 | dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", | ||
1134 | i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); | ||
1135 | dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", | ||
1136 | i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); | ||
1137 | } | ||
1138 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
1139 | mutex_unlock(&adev->srbm_mutex); | ||
1140 | } | ||
1141 | } | ||
1142 | |||
1143 | static int sdma_v2_4_soft_reset(struct amdgpu_device *adev) | ||
1144 | { | ||
1145 | u32 srbm_soft_reset = 0; | ||
1146 | u32 tmp = RREG32(mmSRBM_STATUS2); | ||
1147 | |||
1148 | if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { | ||
1149 | /* sdma0 */ | ||
1150 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | ||
1151 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); | ||
1152 | WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
1153 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; | ||
1154 | } | ||
1155 | if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { | ||
1156 | /* sdma1 */ | ||
1157 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | ||
1158 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); | ||
1159 | WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
1160 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; | ||
1161 | } | ||
1162 | |||
1163 | if (srbm_soft_reset) { | ||
1164 | sdma_v2_4_print_status(adev); | ||
1165 | |||
1166 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1167 | tmp |= srbm_soft_reset; | ||
1168 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
1169 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1170 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1171 | |||
1172 | udelay(50); | ||
1173 | |||
1174 | tmp &= ~srbm_soft_reset; | ||
1175 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1176 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1177 | |||
1178 | /* Wait a little for things to settle down */ | ||
1179 | udelay(50); | ||
1180 | |||
1181 | sdma_v2_4_print_status(adev); | ||
1182 | } | ||
1183 | |||
1184 | return 0; | ||
1185 | } | ||
1186 | |||
1187 | static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev, | ||
1188 | struct amdgpu_irq_src *src, | ||
1189 | unsigned type, | ||
1190 | enum amdgpu_interrupt_state state) | ||
1191 | { | ||
1192 | u32 sdma_cntl; | ||
1193 | |||
1194 | switch (type) { | ||
1195 | case AMDGPU_SDMA_IRQ_TRAP0: | ||
1196 | switch (state) { | ||
1197 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1198 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | ||
1199 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | ||
1200 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | ||
1201 | break; | ||
1202 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1203 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | ||
1204 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | ||
1205 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | ||
1206 | break; | ||
1207 | default: | ||
1208 | break; | ||
1209 | } | ||
1210 | break; | ||
1211 | case AMDGPU_SDMA_IRQ_TRAP1: | ||
1212 | switch (state) { | ||
1213 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1214 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | ||
1215 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | ||
1216 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | ||
1217 | break; | ||
1218 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1219 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | ||
1220 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | ||
1221 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | ||
1222 | break; | ||
1223 | default: | ||
1224 | break; | ||
1225 | } | ||
1226 | break; | ||
1227 | default: | ||
1228 | break; | ||
1229 | } | ||
1230 | return 0; | ||
1231 | } | ||
1232 | |||
1233 | static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, | ||
1234 | struct amdgpu_irq_src *source, | ||
1235 | struct amdgpu_iv_entry *entry) | ||
1236 | { | ||
1237 | u8 instance_id, queue_id; | ||
1238 | |||
1239 | instance_id = (entry->ring_id & 0x3) >> 0; | ||
1240 | queue_id = (entry->ring_id & 0xc) >> 2; | ||
1241 | DRM_DEBUG("IH: SDMA trap\n"); | ||
1242 | switch (instance_id) { | ||
1243 | case 0: | ||
1244 | switch (queue_id) { | ||
1245 | case 0: | ||
1246 | amdgpu_fence_process(&adev->sdma[0].ring); | ||
1247 | break; | ||
1248 | case 1: | ||
1249 | /* XXX compute */ | ||
1250 | break; | ||
1251 | case 2: | ||
1252 | /* XXX compute */ | ||
1253 | break; | ||
1254 | } | ||
1255 | break; | ||
1256 | case 1: | ||
1257 | switch (queue_id) { | ||
1258 | case 0: | ||
1259 | amdgpu_fence_process(&adev->sdma[1].ring); | ||
1260 | break; | ||
1261 | case 1: | ||
1262 | /* XXX compute */ | ||
1263 | break; | ||
1264 | case 2: | ||
1265 | /* XXX compute */ | ||
1266 | break; | ||
1267 | } | ||
1268 | break; | ||
1269 | } | ||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev, | ||
1274 | struct amdgpu_irq_src *source, | ||
1275 | struct amdgpu_iv_entry *entry) | ||
1276 | { | ||
1277 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | ||
1278 | schedule_work(&adev->reset_work); | ||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | static int sdma_v2_4_set_clockgating_state(struct amdgpu_device *adev, | ||
1283 | enum amdgpu_clockgating_state state) | ||
1284 | { | ||
1285 | /* XXX handled via the smc on VI */ | ||
1286 | |||
1287 | return 0; | ||
1288 | } | ||
1289 | |||
1290 | static int sdma_v2_4_set_powergating_state(struct amdgpu_device *adev, | ||
1291 | enum amdgpu_powergating_state state) | ||
1292 | { | ||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs = { | ||
1297 | .early_init = sdma_v2_4_early_init, | ||
1298 | .late_init = NULL, | ||
1299 | .sw_init = sdma_v2_4_sw_init, | ||
1300 | .sw_fini = sdma_v2_4_sw_fini, | ||
1301 | .hw_init = sdma_v2_4_hw_init, | ||
1302 | .hw_fini = sdma_v2_4_hw_fini, | ||
1303 | .suspend = sdma_v2_4_suspend, | ||
1304 | .resume = sdma_v2_4_resume, | ||
1305 | .is_idle = sdma_v2_4_is_idle, | ||
1306 | .wait_for_idle = sdma_v2_4_wait_for_idle, | ||
1307 | .soft_reset = sdma_v2_4_soft_reset, | ||
1308 | .print_status = sdma_v2_4_print_status, | ||
1309 | .set_clockgating_state = sdma_v2_4_set_clockgating_state, | ||
1310 | .set_powergating_state = sdma_v2_4_set_powergating_state, | ||
1311 | }; | ||
1312 | |||
1313 | /** | ||
1314 | * sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up | ||
1315 | * | ||
1316 | * @ring: amdgpu_ring structure holding ring information | ||
1317 | * | ||
1318 | * Check if the async DMA engine is locked up (VI). | ||
1319 | * Returns true if the engine appears to be locked up, false if not. | ||
1320 | */ | ||
1321 | static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring) | ||
1322 | { | ||
1323 | |||
1324 | if (sdma_v2_4_is_idle(ring->adev)) { | ||
1325 | amdgpu_ring_lockup_update(ring); | ||
1326 | return false; | ||
1327 | } | ||
1328 | return amdgpu_ring_test_lockup(ring); | ||
1329 | } | ||
1330 | |||
1331 | static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | ||
1332 | .get_rptr = sdma_v2_4_ring_get_rptr, | ||
1333 | .get_wptr = sdma_v2_4_ring_get_wptr, | ||
1334 | .set_wptr = sdma_v2_4_ring_set_wptr, | ||
1335 | .parse_cs = NULL, | ||
1336 | .emit_ib = sdma_v2_4_ring_emit_ib, | ||
1337 | .emit_fence = sdma_v2_4_ring_emit_fence, | ||
1338 | .emit_semaphore = sdma_v2_4_ring_emit_semaphore, | ||
1339 | .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, | ||
1340 | .test_ring = sdma_v2_4_ring_test_ring, | ||
1341 | .test_ib = sdma_v2_4_ring_test_ib, | ||
1342 | .is_lockup = sdma_v2_4_ring_is_lockup, | ||
1343 | }; | ||
1344 | |||
1345 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) | ||
1346 | { | ||
1347 | adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; | ||
1348 | adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; | ||
1349 | } | ||
1350 | |||
1351 | static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { | ||
1352 | .set = sdma_v2_4_set_trap_irq_state, | ||
1353 | .process = sdma_v2_4_process_trap_irq, | ||
1354 | }; | ||
1355 | |||
1356 | static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { | ||
1357 | .process = sdma_v2_4_process_illegal_inst_irq, | ||
1358 | }; | ||
1359 | |||
1360 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) | ||
1361 | { | ||
1362 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | ||
1363 | adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; | ||
1364 | adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; | ||
1365 | } | ||
1366 | |||
1367 | /** | ||
1368 | * sdma_v2_4_emit_copy_buffer - copy buffer using the sDMA engine | ||
1369 | * | ||
1370 | * @ring: amdgpu_ring structure holding ring information | ||
1371 | * @src_offset: src GPU address | ||
1372 | * @dst_offset: dst GPU address | ||
1373 | * @byte_count: number of bytes to xfer | ||
1374 | * | ||
1375 | * Copy GPU buffers using the DMA engine (VI). | ||
1376 | * Used by the amdgpu ttm implementation to move pages if | ||
1377 | * registered as the asic copy callback. | ||
1378 | */ | ||
1379 | static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ring *ring, | ||
1380 | uint64_t src_offset, | ||
1381 | uint64_t dst_offset, | ||
1382 | uint32_t byte_count) | ||
1383 | { | ||
1384 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | | ||
1385 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); | ||
1386 | amdgpu_ring_write(ring, byte_count); | ||
1387 | amdgpu_ring_write(ring, 0); /* src/dst endian swap */ | ||
1388 | amdgpu_ring_write(ring, lower_32_bits(src_offset)); | ||
1389 | amdgpu_ring_write(ring, upper_32_bits(src_offset)); | ||
1390 | amdgpu_ring_write(ring, lower_32_bits(dst_offset)); | ||
1391 | amdgpu_ring_write(ring, upper_32_bits(dst_offset)); | ||
1392 | } | ||
1393 | |||
1394 | /** | ||
1395 | * sdma_v2_4_emit_fill_buffer - fill buffer using the sDMA engine | ||
1396 | * | ||
1397 | * @ring: amdgpu_ring structure holding ring information | ||
1398 | * @src_data: value to write to buffer | ||
1399 | * @dst_offset: dst GPU address | ||
1400 | * @byte_count: number of bytes to xfer | ||
1401 | * | ||
1402 | * Fill GPU buffers using the DMA engine (VI). | ||
1403 | */ | ||
1404 | static void sdma_v2_4_emit_fill_buffer(struct amdgpu_ring *ring, | ||
1405 | uint32_t src_data, | ||
1406 | uint64_t dst_offset, | ||
1407 | uint32_t byte_count) | ||
1408 | { | ||
1409 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); | ||
1410 | amdgpu_ring_write(ring, lower_32_bits(dst_offset)); | ||
1411 | amdgpu_ring_write(ring, upper_32_bits(dst_offset)); | ||
1412 | amdgpu_ring_write(ring, src_data); | ||
1413 | amdgpu_ring_write(ring, byte_count); | ||
1414 | } | ||
1415 | |||
1416 | static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { | ||
1417 | .copy_max_bytes = 0x1fffff, | ||
1418 | .copy_num_dw = 7, | ||
1419 | .emit_copy_buffer = sdma_v2_4_emit_copy_buffer, | ||
1420 | |||
1421 | .fill_max_bytes = 0x1fffff, | ||
1422 | .fill_num_dw = 7, | ||
1423 | .emit_fill_buffer = sdma_v2_4_emit_fill_buffer, | ||
1424 | }; | ||
1425 | |||
1426 | static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) | ||
1427 | { | ||
1428 | if (adev->mman.buffer_funcs == NULL) { | ||
1429 | adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; | ||
1430 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { | ||
1435 | .copy_pte = sdma_v2_4_vm_copy_pte, | ||
1436 | .write_pte = sdma_v2_4_vm_write_pte, | ||
1437 | .set_pte_pde = sdma_v2_4_vm_set_pte_pde, | ||
1438 | .pad_ib = sdma_v2_4_vm_pad_ib, | ||
1439 | }; | ||
1440 | |||
1441 | static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) | ||
1442 | { | ||
1443 | if (adev->vm_manager.vm_pte_funcs == NULL) { | ||
1444 | adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; | ||
1445 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | ||
1446 | } | ||
1447 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h new file mode 100644 index 000000000000..6cdf8941c577 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __SDMA_V2_4_H__ | ||
25 | #define __SDMA_V2_4_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs sdma_v2_4_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c new file mode 100644 index 000000000000..dd547c7f6cbc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
@@ -0,0 +1,1514 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Alex Deucher | ||
23 | */ | ||
24 | #include <linux/firmware.h> | ||
25 | #include <drm/drmP.h> | ||
26 | #include "amdgpu.h" | ||
27 | #include "amdgpu_ucode.h" | ||
28 | #include "amdgpu_trace.h" | ||
29 | #include "vi.h" | ||
30 | #include "vid.h" | ||
31 | |||
32 | #include "oss/oss_3_0_d.h" | ||
33 | #include "oss/oss_3_0_sh_mask.h" | ||
34 | |||
35 | #include "gmc/gmc_8_1_d.h" | ||
36 | #include "gmc/gmc_8_1_sh_mask.h" | ||
37 | |||
38 | #include "gca/gfx_8_0_d.h" | ||
39 | #include "gca/gfx_8_0_sh_mask.h" | ||
40 | |||
41 | #include "bif/bif_5_0_d.h" | ||
42 | #include "bif/bif_5_0_sh_mask.h" | ||
43 | |||
44 | #include "tonga_sdma_pkt_open.h" | ||
45 | |||
46 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); | ||
47 | static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); | ||
48 | static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); | ||
49 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev); | ||
50 | |||
51 | MODULE_FIRMWARE("radeon/tonga_sdma.bin"); | ||
52 | MODULE_FIRMWARE("radeon/tonga_sdma1.bin"); | ||
53 | MODULE_FIRMWARE("radeon/carrizo_sdma.bin"); | ||
54 | MODULE_FIRMWARE("radeon/carrizo_sdma1.bin"); | ||
55 | |||
56 | static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = | ||
57 | { | ||
58 | SDMA0_REGISTER_OFFSET, | ||
59 | SDMA1_REGISTER_OFFSET | ||
60 | }; | ||
61 | |||
62 | static const u32 golden_settings_tonga_a11[] = | ||
63 | { | ||
64 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
65 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | ||
66 | mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, | ||
67 | mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | ||
68 | mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | ||
69 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
70 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | ||
71 | mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, | ||
72 | mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, | ||
73 | mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, | ||
74 | }; | ||
75 | |||
76 | static const u32 tonga_mgcg_cgcg_init[] = | ||
77 | { | ||
78 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | ||
79 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | ||
80 | }; | ||
81 | |||
82 | static const u32 cz_golden_settings_a11[] = | ||
83 | { | ||
84 | mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
85 | mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, | ||
86 | mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, | ||
87 | mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, | ||
88 | mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, | ||
89 | mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, | ||
90 | mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, | ||
91 | mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, | ||
92 | mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100, | ||
93 | mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800, | ||
94 | mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100, | ||
95 | mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100, | ||
96 | }; | ||
97 | |||
98 | static const u32 cz_mgcg_cgcg_init[] = | ||
99 | { | ||
100 | mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, | ||
101 | mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * sDMA - System DMA | ||
106 | * Starting with CIK, the GPU has new asynchronous | ||
107 | * DMA engines. These engines are used for compute | ||
108 | * and gfx. There are two DMA engines (SDMA0, SDMA1) | ||
109 | * and each one supports 1 ring buffer used for gfx | ||
110 | * and 2 queues used for compute. | ||
111 | * | ||
112 | * The programming model is very similar to the CP | ||
113 | * (ring buffer, IBs, etc.), but sDMA has it's own | ||
114 | * packet format that is different from the PM4 format | ||
115 | * used by the CP. sDMA supports copying data, writing | ||
116 | * embedded data, solid fills, and a number of other | ||
117 | * things. It also has support for tiling/detiling of | ||
118 | * buffers. | ||
119 | */ | ||
120 | |||
121 | static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | ||
122 | { | ||
123 | switch (adev->asic_type) { | ||
124 | case CHIP_TONGA: | ||
125 | amdgpu_program_register_sequence(adev, | ||
126 | tonga_mgcg_cgcg_init, | ||
127 | (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); | ||
128 | amdgpu_program_register_sequence(adev, | ||
129 | golden_settings_tonga_a11, | ||
130 | (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); | ||
131 | break; | ||
132 | case CHIP_CARRIZO: | ||
133 | amdgpu_program_register_sequence(adev, | ||
134 | cz_mgcg_cgcg_init, | ||
135 | (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); | ||
136 | amdgpu_program_register_sequence(adev, | ||
137 | cz_golden_settings_a11, | ||
138 | (const u32)ARRAY_SIZE(cz_golden_settings_a11)); | ||
139 | break; | ||
140 | default: | ||
141 | break; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * sdma_v3_0_init_microcode - load ucode images from disk | ||
147 | * | ||
148 | * @adev: amdgpu_device pointer | ||
149 | * | ||
150 | * Use the firmware interface to load the ucode images into | ||
151 | * the driver (not loaded into hw). | ||
152 | * Returns 0 on success, error on failure. | ||
153 | */ | ||
154 | static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) | ||
155 | { | ||
156 | const char *chip_name; | ||
157 | char fw_name[30]; | ||
158 | int err, i; | ||
159 | struct amdgpu_firmware_info *info = NULL; | ||
160 | const struct common_firmware_header *header = NULL; | ||
161 | |||
162 | DRM_DEBUG("\n"); | ||
163 | |||
164 | switch (adev->asic_type) { | ||
165 | case CHIP_TONGA: | ||
166 | chip_name = "tonga"; | ||
167 | break; | ||
168 | case CHIP_CARRIZO: | ||
169 | chip_name = "carrizo"; | ||
170 | break; | ||
171 | default: BUG(); | ||
172 | } | ||
173 | |||
174 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
175 | if (i == 0) | ||
176 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | ||
177 | else | ||
178 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); | ||
179 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | ||
180 | if (err) | ||
181 | goto out; | ||
182 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | ||
183 | if (err) | ||
184 | goto out; | ||
185 | |||
186 | if (adev->firmware.smu_load) { | ||
187 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | ||
188 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | ||
189 | info->fw = adev->sdma[i].fw; | ||
190 | header = (const struct common_firmware_header *)info->fw->data; | ||
191 | adev->firmware.fw_size += | ||
192 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | ||
193 | } | ||
194 | } | ||
195 | out: | ||
196 | if (err) { | ||
197 | printk(KERN_ERR | ||
198 | "sdma_v3_0: Failed to load firmware \"%s\"\n", | ||
199 | fw_name); | ||
200 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
201 | release_firmware(adev->sdma[i].fw); | ||
202 | adev->sdma[i].fw = NULL; | ||
203 | } | ||
204 | } | ||
205 | return err; | ||
206 | } | ||
207 | |||
208 | /** | ||
209 | * sdma_v3_0_ring_get_rptr - get the current read pointer | ||
210 | * | ||
211 | * @ring: amdgpu ring pointer | ||
212 | * | ||
213 | * Get the current rptr from the hardware (VI+). | ||
214 | */ | ||
215 | static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
216 | { | ||
217 | u32 rptr; | ||
218 | |||
219 | /* XXX check if swapping is necessary on BE */ | ||
220 | rptr = ring->adev->wb.wb[ring->rptr_offs] >> 2; | ||
221 | |||
222 | return rptr; | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * sdma_v3_0_ring_get_wptr - get the current write pointer | ||
227 | * | ||
228 | * @ring: amdgpu ring pointer | ||
229 | * | ||
230 | * Get the current wptr from the hardware (VI+). | ||
231 | */ | ||
232 | static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
233 | { | ||
234 | struct amdgpu_device *adev = ring->adev; | ||
235 | u32 wptr; | ||
236 | |||
237 | if (ring->use_doorbell) { | ||
238 | /* XXX check if swapping is necessary on BE */ | ||
239 | wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; | ||
240 | } else { | ||
241 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | ||
242 | |||
243 | wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; | ||
244 | } | ||
245 | |||
246 | return wptr; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * sdma_v3_0_ring_set_wptr - commit the write pointer | ||
251 | * | ||
252 | * @ring: amdgpu ring pointer | ||
253 | * | ||
254 | * Write the wptr back to the hardware (VI+). | ||
255 | */ | ||
256 | static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
257 | { | ||
258 | struct amdgpu_device *adev = ring->adev; | ||
259 | |||
260 | if (ring->use_doorbell) { | ||
261 | /* XXX check if swapping is necessary on BE */ | ||
262 | adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; | ||
263 | WDOORBELL32(ring->doorbell_index, ring->wptr << 2); | ||
264 | } else { | ||
265 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | ||
266 | |||
267 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *); | ||
272 | |||
273 | /** | ||
274 | * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine | ||
275 | * | ||
276 | * @ring: amdgpu ring pointer | ||
277 | * @ib: IB object to schedule | ||
278 | * | ||
279 | * Schedule an IB in the DMA ring (VI). | ||
280 | */ | ||
281 | static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
282 | struct amdgpu_ib *ib) | ||
283 | { | ||
284 | u32 vmid = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf; | ||
285 | u32 next_rptr = ring->wptr + 5; | ||
286 | |||
287 | if (ib->flush_hdp_writefifo) | ||
288 | next_rptr += 6; | ||
289 | |||
290 | while ((next_rptr & 7) != 2) | ||
291 | next_rptr++; | ||
292 | next_rptr += 6; | ||
293 | |||
294 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
295 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||
296 | amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); | ||
297 | amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); | ||
298 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||
299 | amdgpu_ring_write(ring, next_rptr); | ||
300 | |||
301 | /* flush HDP */ | ||
302 | if (ib->flush_hdp_writefifo) { | ||
303 | sdma_v3_0_hdp_flush_ring_emit(ring); | ||
304 | } | ||
305 | |||
306 | /* IB packet must end on a 8 DW boundary */ | ||
307 | while ((ring->wptr & 7) != 2) | ||
308 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_NOP)); | ||
309 | |||
310 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | | ||
311 | SDMA_PKT_INDIRECT_HEADER_VMID(vmid)); | ||
312 | /* base must be 32 byte aligned */ | ||
313 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); | ||
314 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
315 | amdgpu_ring_write(ring, ib->length_dw); | ||
316 | amdgpu_ring_write(ring, 0); | ||
317 | amdgpu_ring_write(ring, 0); | ||
318 | |||
319 | } | ||
320 | |||
321 | /** | ||
322 | * sdma_v3_0_hdp_flush_ring_emit - emit an hdp flush on the DMA ring | ||
323 | * | ||
324 | * @ring: amdgpu ring pointer | ||
325 | * | ||
326 | * Emit an hdp flush packet on the requested DMA ring. | ||
327 | */ | ||
328 | static void sdma_v3_0_hdp_flush_ring_emit(struct amdgpu_ring *ring) | ||
329 | { | ||
330 | u32 ref_and_mask = 0; | ||
331 | |||
332 | if (ring == &ring->adev->sdma[0].ring) | ||
333 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); | ||
334 | else | ||
335 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); | ||
336 | |||
337 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | ||
338 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | | ||
339 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ | ||
340 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); | ||
341 | amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); | ||
342 | amdgpu_ring_write(ring, ref_and_mask); /* reference */ | ||
343 | amdgpu_ring_write(ring, ref_and_mask); /* mask */ | ||
344 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | ||
345 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | ||
346 | } | ||
347 | |||
348 | /** | ||
349 | * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring | ||
350 | * | ||
351 | * @ring: amdgpu ring pointer | ||
352 | * @fence: amdgpu fence object | ||
353 | * | ||
354 | * Add a DMA fence packet to the ring to write | ||
355 | * the fence seq number and DMA trap packet to generate | ||
356 | * an interrupt if needed (VI). | ||
357 | */ | ||
358 | static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
359 | bool write64bits) | ||
360 | { | ||
361 | /* write the fence */ | ||
362 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | ||
363 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
364 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
365 | amdgpu_ring_write(ring, lower_32_bits(seq)); | ||
366 | |||
367 | /* optionally write high bits as well */ | ||
368 | if (write64bits) { | ||
369 | addr += 4; | ||
370 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); | ||
371 | amdgpu_ring_write(ring, lower_32_bits(addr)); | ||
372 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
373 | amdgpu_ring_write(ring, upper_32_bits(seq)); | ||
374 | } | ||
375 | |||
376 | /* generate an interrupt */ | ||
377 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); | ||
378 | amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); | ||
379 | } | ||
380 | |||
381 | |||
382 | /** | ||
383 | * sdma_v3_0_ring_emit_semaphore - emit a semaphore on the dma ring | ||
384 | * | ||
385 | * @ring: amdgpu_ring structure holding ring information | ||
386 | * @semaphore: amdgpu semaphore object | ||
387 | * @emit_wait: wait or signal semaphore | ||
388 | * | ||
389 | * Add a DMA semaphore packet to the ring wait on or signal | ||
390 | * other rings (VI). | ||
391 | */ | ||
392 | static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
393 | struct amdgpu_semaphore *semaphore, | ||
394 | bool emit_wait) | ||
395 | { | ||
396 | u64 addr = semaphore->gpu_addr; | ||
397 | u32 sig = emit_wait ? 0 : 1; | ||
398 | |||
399 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SEM) | | ||
400 | SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(sig)); | ||
401 | amdgpu_ring_write(ring, lower_32_bits(addr) & 0xfffffff8); | ||
402 | amdgpu_ring_write(ring, upper_32_bits(addr)); | ||
403 | |||
404 | return true; | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * sdma_v3_0_gfx_stop - stop the gfx async dma engines | ||
409 | * | ||
410 | * @adev: amdgpu_device pointer | ||
411 | * | ||
412 | * Stop the gfx async dma ring buffers (VI). | ||
413 | */ | ||
414 | static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) | ||
415 | { | ||
416 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | ||
417 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | ||
418 | u32 rb_cntl, ib_cntl; | ||
419 | int i; | ||
420 | |||
421 | if ((adev->mman.buffer_funcs_ring == sdma0) || | ||
422 | (adev->mman.buffer_funcs_ring == sdma1)) | ||
423 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | ||
424 | |||
425 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
426 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | ||
427 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | ||
428 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
429 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | ||
430 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); | ||
431 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | ||
432 | } | ||
433 | sdma0->ready = false; | ||
434 | sdma1->ready = false; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * sdma_v3_0_rlc_stop - stop the compute async dma engines | ||
439 | * | ||
440 | * @adev: amdgpu_device pointer | ||
441 | * | ||
442 | * Stop the compute async dma queues (VI). | ||
443 | */ | ||
444 | static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) | ||
445 | { | ||
446 | /* XXX todo */ | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * sdma_v3_0_enable - stop the async dma engines | ||
451 | * | ||
452 | * @adev: amdgpu_device pointer | ||
453 | * @enable: enable/disable the DMA MEs. | ||
454 | * | ||
455 | * Halt or unhalt the async dma engines (VI). | ||
456 | */ | ||
457 | static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) | ||
458 | { | ||
459 | u32 f32_cntl; | ||
460 | int i; | ||
461 | |||
462 | if (enable == false) { | ||
463 | sdma_v3_0_gfx_stop(adev); | ||
464 | sdma_v3_0_rlc_stop(adev); | ||
465 | } | ||
466 | |||
467 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
468 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | ||
469 | if (enable) | ||
470 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); | ||
471 | else | ||
472 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); | ||
473 | WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * sdma_v3_0_gfx_resume - setup and start the async dma engines | ||
479 | * | ||
480 | * @adev: amdgpu_device pointer | ||
481 | * | ||
482 | * Set up the gfx DMA ring buffers and enable them (VI). | ||
483 | * Returns 0 for success, error for failure. | ||
484 | */ | ||
485 | static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | ||
486 | { | ||
487 | struct amdgpu_ring *ring; | ||
488 | u32 rb_cntl, ib_cntl; | ||
489 | u32 rb_bufsz; | ||
490 | u32 wb_offset; | ||
491 | u32 doorbell; | ||
492 | int i, j, r; | ||
493 | |||
494 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
495 | ring = &adev->sdma[i].ring; | ||
496 | wb_offset = (ring->rptr_offs * 4); | ||
497 | |||
498 | mutex_lock(&adev->srbm_mutex); | ||
499 | for (j = 0; j < 16; j++) { | ||
500 | vi_srbm_select(adev, 0, 0, 0, j); | ||
501 | /* SDMA GFX */ | ||
502 | WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); | ||
503 | WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); | ||
504 | } | ||
505 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
506 | mutex_unlock(&adev->srbm_mutex); | ||
507 | |||
508 | WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); | ||
509 | |||
510 | /* Set ring buffer size in dwords */ | ||
511 | rb_bufsz = order_base_2(ring->ring_size / 4); | ||
512 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | ||
513 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); | ||
514 | #ifdef __BIG_ENDIAN | ||
515 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); | ||
516 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, | ||
517 | RPTR_WRITEBACK_SWAP_ENABLE, 1); | ||
518 | #endif | ||
519 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
520 | |||
521 | /* Initialize the ring buffer's read and write pointers */ | ||
522 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | ||
523 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | ||
524 | |||
525 | /* set the wb address whether it's enabled or not */ | ||
526 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | ||
527 | upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); | ||
528 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], | ||
529 | lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); | ||
530 | |||
531 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); | ||
532 | |||
533 | WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); | ||
534 | WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); | ||
535 | |||
536 | ring->wptr = 0; | ||
537 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2); | ||
538 | |||
539 | doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]); | ||
540 | |||
541 | if (ring->use_doorbell) { | ||
542 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, | ||
543 | OFFSET, ring->doorbell_index); | ||
544 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); | ||
545 | } else { | ||
546 | doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); | ||
547 | } | ||
548 | WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); | ||
549 | |||
550 | /* enable DMA RB */ | ||
551 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | ||
552 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | ||
553 | |||
554 | ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); | ||
555 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); | ||
556 | #ifdef __BIG_ENDIAN | ||
557 | ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); | ||
558 | #endif | ||
559 | /* enable DMA IBs */ | ||
560 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | ||
561 | |||
562 | ring->ready = true; | ||
563 | |||
564 | r = amdgpu_ring_test_ring(ring); | ||
565 | if (r) { | ||
566 | ring->ready = false; | ||
567 | return r; | ||
568 | } | ||
569 | |||
570 | if (adev->mman.buffer_funcs_ring == ring) | ||
571 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); | ||
572 | } | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * sdma_v3_0_rlc_resume - setup and start the async dma engines | ||
579 | * | ||
580 | * @adev: amdgpu_device pointer | ||
581 | * | ||
582 | * Set up the compute DMA queues and enable them (VI). | ||
583 | * Returns 0 for success, error for failure. | ||
584 | */ | ||
585 | static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) | ||
586 | { | ||
587 | /* XXX todo */ | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | /** | ||
592 | * sdma_v3_0_load_microcode - load the sDMA ME ucode | ||
593 | * | ||
594 | * @adev: amdgpu_device pointer | ||
595 | * | ||
596 | * Loads the sDMA0/1 ucode. | ||
597 | * Returns 0 for success, -EINVAL if the ucode is not available. | ||
598 | */ | ||
599 | static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) | ||
600 | { | ||
601 | const struct sdma_firmware_header_v1_0 *hdr; | ||
602 | const __le32 *fw_data; | ||
603 | u32 fw_size; | ||
604 | int i, j; | ||
605 | |||
606 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
607 | return -EINVAL; | ||
608 | |||
609 | /* halt the MEs */ | ||
610 | sdma_v3_0_enable(adev, false); | ||
611 | |||
612 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
613 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | ||
614 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | ||
615 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
616 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
617 | |||
618 | fw_data = (const __le32 *) | ||
619 | (adev->sdma[i].fw->data + | ||
620 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
621 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | ||
622 | for (j = 0; j < fw_size; j++) | ||
623 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | ||
624 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | ||
625 | } | ||
626 | |||
627 | return 0; | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * sdma_v3_0_start - setup and start the async dma engines | ||
632 | * | ||
633 | * @adev: amdgpu_device pointer | ||
634 | * | ||
635 | * Set up the DMA engines and enable them (VI). | ||
636 | * Returns 0 for success, error for failure. | ||
637 | */ | ||
638 | static int sdma_v3_0_start(struct amdgpu_device *adev) | ||
639 | { | ||
640 | int r; | ||
641 | |||
642 | if (!adev->firmware.smu_load) { | ||
643 | r = sdma_v3_0_load_microcode(adev); | ||
644 | if (r) | ||
645 | return r; | ||
646 | } else { | ||
647 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
648 | AMDGPU_UCODE_ID_SDMA0); | ||
649 | if (r) | ||
650 | return -EINVAL; | ||
651 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | ||
652 | AMDGPU_UCODE_ID_SDMA1); | ||
653 | if (r) | ||
654 | return -EINVAL; | ||
655 | } | ||
656 | |||
657 | /* unhalt the MEs */ | ||
658 | sdma_v3_0_enable(adev, true); | ||
659 | |||
660 | /* start the gfx rings and rlc compute queues */ | ||
661 | r = sdma_v3_0_gfx_resume(adev); | ||
662 | if (r) | ||
663 | return r; | ||
664 | r = sdma_v3_0_rlc_resume(adev); | ||
665 | if (r) | ||
666 | return r; | ||
667 | |||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | /** | ||
672 | * sdma_v3_0_ring_test_ring - simple async dma engine test | ||
673 | * | ||
674 | * @ring: amdgpu_ring structure holding ring information | ||
675 | * | ||
676 | * Test the DMA engine by writing using it to write an | ||
677 | * value to memory. (VI). | ||
678 | * Returns 0 for success, error for failure. | ||
679 | */ | ||
680 | static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) | ||
681 | { | ||
682 | struct amdgpu_device *adev = ring->adev; | ||
683 | unsigned i; | ||
684 | unsigned index; | ||
685 | int r; | ||
686 | u32 tmp; | ||
687 | u64 gpu_addr; | ||
688 | |||
689 | r = amdgpu_wb_get(adev, &index); | ||
690 | if (r) { | ||
691 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
692 | return r; | ||
693 | } | ||
694 | |||
695 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||
696 | tmp = 0xCAFEDEAD; | ||
697 | adev->wb.wb[index] = cpu_to_le32(tmp); | ||
698 | |||
699 | r = amdgpu_ring_lock(ring, 5); | ||
700 | if (r) { | ||
701 | DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r); | ||
702 | amdgpu_wb_free(adev, index); | ||
703 | return r; | ||
704 | } | ||
705 | |||
706 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
707 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); | ||
708 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); | ||
709 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); | ||
710 | amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); | ||
711 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
712 | amdgpu_ring_unlock_commit(ring); | ||
713 | |||
714 | for (i = 0; i < adev->usec_timeout; i++) { | ||
715 | tmp = le32_to_cpu(adev->wb.wb[index]); | ||
716 | if (tmp == 0xDEADBEEF) | ||
717 | break; | ||
718 | DRM_UDELAY(1); | ||
719 | } | ||
720 | |||
721 | if (i < adev->usec_timeout) { | ||
722 | DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); | ||
723 | } else { | ||
724 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
725 | ring->idx, tmp); | ||
726 | r = -EINVAL; | ||
727 | } | ||
728 | amdgpu_wb_free(adev, index); | ||
729 | |||
730 | return r; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * sdma_v3_0_ring_test_ib - test an IB on the DMA engine | ||
735 | * | ||
736 | * @ring: amdgpu_ring structure holding ring information | ||
737 | * | ||
738 | * Test a simple IB in the DMA ring (VI). | ||
739 | * Returns 0 on success, error on failure. | ||
740 | */ | ||
741 | static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) | ||
742 | { | ||
743 | struct amdgpu_device *adev = ring->adev; | ||
744 | struct amdgpu_ib ib; | ||
745 | unsigned i; | ||
746 | unsigned index; | ||
747 | int r; | ||
748 | u32 tmp = 0; | ||
749 | u64 gpu_addr; | ||
750 | |||
751 | r = amdgpu_wb_get(adev, &index); | ||
752 | if (r) { | ||
753 | dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); | ||
754 | return r; | ||
755 | } | ||
756 | |||
757 | gpu_addr = adev->wb.gpu_addr + (index * 4); | ||
758 | tmp = 0xCAFEDEAD; | ||
759 | adev->wb.wb[index] = cpu_to_le32(tmp); | ||
760 | |||
761 | r = amdgpu_ib_get(ring, NULL, 256, &ib); | ||
762 | if (r) { | ||
763 | amdgpu_wb_free(adev, index); | ||
764 | DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); | ||
765 | return r; | ||
766 | } | ||
767 | |||
768 | ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
769 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); | ||
770 | ib.ptr[1] = lower_32_bits(gpu_addr); | ||
771 | ib.ptr[2] = upper_32_bits(gpu_addr); | ||
772 | ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); | ||
773 | ib.ptr[4] = 0xDEADBEEF; | ||
774 | ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | ||
775 | ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | ||
776 | ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); | ||
777 | ib.length_dw = 8; | ||
778 | |||
779 | r = amdgpu_ib_schedule(adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | ||
780 | if (r) { | ||
781 | amdgpu_ib_free(adev, &ib); | ||
782 | amdgpu_wb_free(adev, index); | ||
783 | DRM_ERROR("amdgpu: failed to schedule ib (%d).\n", r); | ||
784 | return r; | ||
785 | } | ||
786 | r = amdgpu_fence_wait(ib.fence, false); | ||
787 | if (r) { | ||
788 | amdgpu_ib_free(adev, &ib); | ||
789 | amdgpu_wb_free(adev, index); | ||
790 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
791 | return r; | ||
792 | } | ||
793 | for (i = 0; i < adev->usec_timeout; i++) { | ||
794 | tmp = le32_to_cpu(adev->wb.wb[index]); | ||
795 | if (tmp == 0xDEADBEEF) | ||
796 | break; | ||
797 | DRM_UDELAY(1); | ||
798 | } | ||
799 | if (i < adev->usec_timeout) { | ||
800 | DRM_INFO("ib test on ring %d succeeded in %u usecs\n", | ||
801 | ib.fence->ring->idx, i); | ||
802 | } else { | ||
803 | DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); | ||
804 | r = -EINVAL; | ||
805 | } | ||
806 | amdgpu_ib_free(adev, &ib); | ||
807 | amdgpu_wb_free(adev, index); | ||
808 | return r; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART | ||
813 | * | ||
814 | * @ib: indirect buffer to fill with commands | ||
815 | * @pe: addr of the page entry | ||
816 | * @src: src addr to copy from | ||
817 | * @count: number of page entries to update | ||
818 | * | ||
819 | * Update PTEs by copying them from the GART using sDMA (CIK). | ||
820 | */ | ||
821 | static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, | ||
822 | uint64_t pe, uint64_t src, | ||
823 | unsigned count) | ||
824 | { | ||
825 | while (count) { | ||
826 | unsigned bytes = count * 8; | ||
827 | if (bytes > 0x1FFFF8) | ||
828 | bytes = 0x1FFFF8; | ||
829 | |||
830 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | | ||
831 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | ||
832 | ib->ptr[ib->length_dw++] = bytes; | ||
833 | ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ | ||
834 | ib->ptr[ib->length_dw++] = lower_32_bits(src); | ||
835 | ib->ptr[ib->length_dw++] = upper_32_bits(src); | ||
836 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); | ||
837 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
838 | |||
839 | pe += bytes; | ||
840 | src += bytes; | ||
841 | count -= bytes / 8; | ||
842 | } | ||
843 | } | ||
844 | |||
845 | /** | ||
846 | * sdma_v3_0_vm_write_pte - update PTEs by writing them manually | ||
847 | * | ||
848 | * @ib: indirect buffer to fill with commands | ||
849 | * @pe: addr of the page entry | ||
850 | * @addr: dst addr to write into pe | ||
851 | * @count: number of page entries to update | ||
852 | * @incr: increase next addr by incr bytes | ||
853 | * @flags: access flags | ||
854 | * | ||
855 | * Update PTEs by writing them manually using sDMA (CIK). | ||
856 | */ | ||
857 | static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, | ||
858 | uint64_t pe, | ||
859 | uint64_t addr, unsigned count, | ||
860 | uint32_t incr, uint32_t flags) | ||
861 | { | ||
862 | uint64_t value; | ||
863 | unsigned ndw; | ||
864 | |||
865 | while (count) { | ||
866 | ndw = count * 2; | ||
867 | if (ndw > 0xFFFFE) | ||
868 | ndw = 0xFFFFE; | ||
869 | |||
870 | /* for non-physically contiguous pages (system) */ | ||
871 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | | ||
872 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); | ||
873 | ib->ptr[ib->length_dw++] = pe; | ||
874 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
875 | ib->ptr[ib->length_dw++] = ndw; | ||
876 | for (; ndw > 0; ndw -= 2, --count, pe += 8) { | ||
877 | if (flags & AMDGPU_PTE_SYSTEM) { | ||
878 | value = amdgpu_vm_map_gart(ib->ring->adev, addr); | ||
879 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
880 | } else if (flags & AMDGPU_PTE_VALID) { | ||
881 | value = addr; | ||
882 | } else { | ||
883 | value = 0; | ||
884 | } | ||
885 | addr += incr; | ||
886 | value |= flags; | ||
887 | ib->ptr[ib->length_dw++] = value; | ||
888 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
889 | } | ||
890 | } | ||
891 | } | ||
892 | |||
893 | /** | ||
894 | * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA | ||
895 | * | ||
896 | * @ib: indirect buffer to fill with commands | ||
897 | * @pe: addr of the page entry | ||
898 | * @addr: dst addr to write into pe | ||
899 | * @count: number of page entries to update | ||
900 | * @incr: increase next addr by incr bytes | ||
901 | * @flags: access flags | ||
902 | * | ||
903 | * Update the page tables using sDMA (CIK). | ||
904 | */ | ||
905 | static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, | ||
906 | uint64_t pe, | ||
907 | uint64_t addr, unsigned count, | ||
908 | uint32_t incr, uint32_t flags) | ||
909 | { | ||
910 | uint64_t value; | ||
911 | unsigned ndw; | ||
912 | |||
913 | while (count) { | ||
914 | ndw = count; | ||
915 | if (ndw > 0x7FFFF) | ||
916 | ndw = 0x7FFFF; | ||
917 | |||
918 | if (flags & AMDGPU_PTE_VALID) | ||
919 | value = addr; | ||
920 | else | ||
921 | value = 0; | ||
922 | |||
923 | /* for physically contiguous pages (vram) */ | ||
924 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); | ||
925 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ | ||
926 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); | ||
927 | ib->ptr[ib->length_dw++] = flags; /* mask */ | ||
928 | ib->ptr[ib->length_dw++] = 0; | ||
929 | ib->ptr[ib->length_dw++] = value; /* value */ | ||
930 | ib->ptr[ib->length_dw++] = upper_32_bits(value); | ||
931 | ib->ptr[ib->length_dw++] = incr; /* increment size */ | ||
932 | ib->ptr[ib->length_dw++] = 0; | ||
933 | ib->ptr[ib->length_dw++] = ndw; /* number of entries */ | ||
934 | |||
935 | pe += ndw * 8; | ||
936 | addr += ndw * incr; | ||
937 | count -= ndw; | ||
938 | } | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * sdma_v3_0_vm_pad_ib - pad the IB to the required number of dw | ||
943 | * | ||
944 | * @ib: indirect buffer to fill with padding | ||
945 | * | ||
946 | */ | ||
947 | static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) | ||
948 | { | ||
949 | while (ib->length_dw & 0x7) | ||
950 | ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); | ||
951 | } | ||
952 | |||
953 | /** | ||
954 | * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA | ||
955 | * | ||
956 | * @ring: amdgpu_ring pointer | ||
957 | * @vm: amdgpu_vm pointer | ||
958 | * | ||
959 | * Update the page table base and flush the VM TLB | ||
960 | * using sDMA (VI). | ||
961 | */ | ||
962 | static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | ||
963 | unsigned vm_id, uint64_t pd_addr) | ||
964 | { | ||
965 | u32 srbm_gfx_cntl = 0; | ||
966 | |||
967 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
968 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
969 | if (vm_id < 8) { | ||
970 | amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id)); | ||
971 | } else { | ||
972 | amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8)); | ||
973 | } | ||
974 | amdgpu_ring_write(ring, pd_addr >> 12); | ||
975 | |||
976 | /* update SH_MEM_* regs */ | ||
977 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vm_id); | ||
978 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
979 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
980 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
981 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
982 | |||
983 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
984 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
985 | amdgpu_ring_write(ring, mmSH_MEM_BASES); | ||
986 | amdgpu_ring_write(ring, 0); | ||
987 | |||
988 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
989 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
990 | amdgpu_ring_write(ring, mmSH_MEM_CONFIG); | ||
991 | amdgpu_ring_write(ring, 0); | ||
992 | |||
993 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
994 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
995 | amdgpu_ring_write(ring, mmSH_MEM_APE1_BASE); | ||
996 | amdgpu_ring_write(ring, 1); | ||
997 | |||
998 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
999 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
1000 | amdgpu_ring_write(ring, mmSH_MEM_APE1_LIMIT); | ||
1001 | amdgpu_ring_write(ring, 0); | ||
1002 | |||
1003 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, 0); | ||
1004 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
1005 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
1006 | amdgpu_ring_write(ring, mmSRBM_GFX_CNTL); | ||
1007 | amdgpu_ring_write(ring, srbm_gfx_cntl); | ||
1008 | |||
1009 | |||
1010 | /* flush TLB */ | ||
1011 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | | ||
1012 | SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); | ||
1013 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); | ||
1014 | amdgpu_ring_write(ring, 1 << vm_id); | ||
1015 | |||
1016 | /* wait for flush */ | ||
1017 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | | ||
1018 | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | | ||
1019 | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ | ||
1020 | amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); | ||
1021 | amdgpu_ring_write(ring, 0); | ||
1022 | amdgpu_ring_write(ring, 0); /* reference */ | ||
1023 | amdgpu_ring_write(ring, 0); /* mask */ | ||
1024 | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | | ||
1025 | SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ | ||
1026 | } | ||
1027 | |||
1028 | static int sdma_v3_0_early_init(struct amdgpu_device *adev) | ||
1029 | { | ||
1030 | sdma_v3_0_set_ring_funcs(adev); | ||
1031 | sdma_v3_0_set_buffer_funcs(adev); | ||
1032 | sdma_v3_0_set_vm_pte_funcs(adev); | ||
1033 | sdma_v3_0_set_irq_funcs(adev); | ||
1034 | |||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static int sdma_v3_0_sw_init(struct amdgpu_device *adev) | ||
1039 | { | ||
1040 | struct amdgpu_ring *ring; | ||
1041 | int r; | ||
1042 | |||
1043 | /* SDMA trap event */ | ||
1044 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | ||
1045 | if (r) | ||
1046 | return r; | ||
1047 | |||
1048 | /* SDMA Privileged inst */ | ||
1049 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | ||
1050 | if (r) | ||
1051 | return r; | ||
1052 | |||
1053 | /* SDMA Privileged inst */ | ||
1054 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | ||
1055 | if (r) | ||
1056 | return r; | ||
1057 | |||
1058 | r = sdma_v3_0_init_microcode(adev); | ||
1059 | if (r) { | ||
1060 | DRM_ERROR("Failed to load sdma firmware!\n"); | ||
1061 | return r; | ||
1062 | } | ||
1063 | |||
1064 | ring = &adev->sdma[0].ring; | ||
1065 | ring->ring_obj = NULL; | ||
1066 | ring->use_doorbell = true; | ||
1067 | ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; | ||
1068 | |||
1069 | ring = &adev->sdma[1].ring; | ||
1070 | ring->ring_obj = NULL; | ||
1071 | ring->use_doorbell = true; | ||
1072 | ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; | ||
1073 | |||
1074 | ring = &adev->sdma[0].ring; | ||
1075 | sprintf(ring->name, "sdma0"); | ||
1076 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
1077 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1078 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | ||
1079 | AMDGPU_RING_TYPE_SDMA); | ||
1080 | if (r) | ||
1081 | return r; | ||
1082 | |||
1083 | ring = &adev->sdma[1].ring; | ||
1084 | sprintf(ring->name, "sdma1"); | ||
1085 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
1086 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
1087 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | ||
1088 | AMDGPU_RING_TYPE_SDMA); | ||
1089 | if (r) | ||
1090 | return r; | ||
1091 | |||
1092 | return r; | ||
1093 | } | ||
1094 | |||
1095 | static int sdma_v3_0_sw_fini(struct amdgpu_device *adev) | ||
1096 | { | ||
1097 | amdgpu_ring_fini(&adev->sdma[0].ring); | ||
1098 | amdgpu_ring_fini(&adev->sdma[1].ring); | ||
1099 | |||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | static int sdma_v3_0_hw_init(struct amdgpu_device *adev) | ||
1104 | { | ||
1105 | int r; | ||
1106 | |||
1107 | sdma_v3_0_init_golden_registers(adev); | ||
1108 | |||
1109 | r = sdma_v3_0_start(adev); | ||
1110 | if (r) | ||
1111 | return r; | ||
1112 | |||
1113 | return r; | ||
1114 | } | ||
1115 | |||
1116 | static int sdma_v3_0_hw_fini(struct amdgpu_device *adev) | ||
1117 | { | ||
1118 | sdma_v3_0_enable(adev, false); | ||
1119 | |||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1123 | static int sdma_v3_0_suspend(struct amdgpu_device *adev) | ||
1124 | { | ||
1125 | |||
1126 | return sdma_v3_0_hw_fini(adev); | ||
1127 | } | ||
1128 | |||
1129 | static int sdma_v3_0_resume(struct amdgpu_device *adev) | ||
1130 | { | ||
1131 | |||
1132 | return sdma_v3_0_hw_init(adev); | ||
1133 | } | ||
1134 | |||
1135 | static bool sdma_v3_0_is_idle(struct amdgpu_device *adev) | ||
1136 | { | ||
1137 | u32 tmp = RREG32(mmSRBM_STATUS2); | ||
1138 | |||
1139 | if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | | ||
1140 | SRBM_STATUS2__SDMA1_BUSY_MASK)) | ||
1141 | return false; | ||
1142 | |||
1143 | return true; | ||
1144 | } | ||
1145 | |||
1146 | static int sdma_v3_0_wait_for_idle(struct amdgpu_device *adev) | ||
1147 | { | ||
1148 | unsigned i; | ||
1149 | u32 tmp; | ||
1150 | |||
1151 | for (i = 0; i < adev->usec_timeout; i++) { | ||
1152 | tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | | ||
1153 | SRBM_STATUS2__SDMA1_BUSY_MASK); | ||
1154 | |||
1155 | if (!tmp) | ||
1156 | return 0; | ||
1157 | udelay(1); | ||
1158 | } | ||
1159 | return -ETIMEDOUT; | ||
1160 | } | ||
1161 | |||
1162 | static void sdma_v3_0_print_status(struct amdgpu_device *adev) | ||
1163 | { | ||
1164 | int i, j; | ||
1165 | |||
1166 | dev_info(adev->dev, "VI SDMA registers\n"); | ||
1167 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
1168 | RREG32(mmSRBM_STATUS2)); | ||
1169 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | ||
1170 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | ||
1171 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | ||
1172 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", | ||
1173 | i, RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i])); | ||
1174 | dev_info(adev->dev, " SDMA%d_CNTL=0x%08X\n", | ||
1175 | i, RREG32(mmSDMA0_CNTL + sdma_offsets[i])); | ||
1176 | dev_info(adev->dev, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n", | ||
1177 | i, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i])); | ||
1178 | dev_info(adev->dev, " SDMA%d_GFX_IB_CNTL=0x%08X\n", | ||
1179 | i, RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i])); | ||
1180 | dev_info(adev->dev, " SDMA%d_GFX_RB_CNTL=0x%08X\n", | ||
1181 | i, RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i])); | ||
1182 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR=0x%08X\n", | ||
1183 | i, RREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i])); | ||
1184 | dev_info(adev->dev, " SDMA%d_GFX_RB_WPTR=0x%08X\n", | ||
1185 | i, RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i])); | ||
1186 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n", | ||
1187 | i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i])); | ||
1188 | dev_info(adev->dev, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n", | ||
1189 | i, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i])); | ||
1190 | dev_info(adev->dev, " SDMA%d_GFX_RB_BASE=0x%08X\n", | ||
1191 | i, RREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i])); | ||
1192 | dev_info(adev->dev, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n", | ||
1193 | i, RREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i])); | ||
1194 | dev_info(adev->dev, " SDMA%d_GFX_DOORBELL=0x%08X\n", | ||
1195 | i, RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i])); | ||
1196 | mutex_lock(&adev->srbm_mutex); | ||
1197 | for (j = 0; j < 16; j++) { | ||
1198 | vi_srbm_select(adev, 0, 0, 0, j); | ||
1199 | dev_info(adev->dev, " VM %d:\n", j); | ||
1200 | dev_info(adev->dev, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n", | ||
1201 | i, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i])); | ||
1202 | dev_info(adev->dev, " SDMA%d_GFX_APE1_CNTL=0x%08X\n", | ||
1203 | i, RREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i])); | ||
1204 | } | ||
1205 | vi_srbm_select(adev, 0, 0, 0, 0); | ||
1206 | mutex_unlock(&adev->srbm_mutex); | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | static int sdma_v3_0_soft_reset(struct amdgpu_device *adev) | ||
1211 | { | ||
1212 | u32 srbm_soft_reset = 0; | ||
1213 | u32 tmp = RREG32(mmSRBM_STATUS2); | ||
1214 | |||
1215 | if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) { | ||
1216 | /* sdma0 */ | ||
1217 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | ||
1218 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); | ||
1219 | WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
1220 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; | ||
1221 | } | ||
1222 | if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) { | ||
1223 | /* sdma1 */ | ||
1224 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | ||
1225 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 0); | ||
1226 | WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
1227 | srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; | ||
1228 | } | ||
1229 | |||
1230 | if (srbm_soft_reset) { | ||
1231 | sdma_v3_0_print_status(adev); | ||
1232 | |||
1233 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1234 | tmp |= srbm_soft_reset; | ||
1235 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
1236 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1237 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1238 | |||
1239 | udelay(50); | ||
1240 | |||
1241 | tmp &= ~srbm_soft_reset; | ||
1242 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1243 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1244 | |||
1245 | /* Wait a little for things to settle down */ | ||
1246 | udelay(50); | ||
1247 | |||
1248 | sdma_v3_0_print_status(adev); | ||
1249 | } | ||
1250 | |||
1251 | return 0; | ||
1252 | } | ||
1253 | |||
1254 | static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, | ||
1255 | struct amdgpu_irq_src *source, | ||
1256 | unsigned type, | ||
1257 | enum amdgpu_interrupt_state state) | ||
1258 | { | ||
1259 | u32 sdma_cntl; | ||
1260 | |||
1261 | switch (type) { | ||
1262 | case AMDGPU_SDMA_IRQ_TRAP0: | ||
1263 | switch (state) { | ||
1264 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1265 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | ||
1266 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | ||
1267 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | ||
1268 | break; | ||
1269 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1270 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); | ||
1271 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | ||
1272 | WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); | ||
1273 | break; | ||
1274 | default: | ||
1275 | break; | ||
1276 | } | ||
1277 | break; | ||
1278 | case AMDGPU_SDMA_IRQ_TRAP1: | ||
1279 | switch (state) { | ||
1280 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1281 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | ||
1282 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); | ||
1283 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | ||
1284 | break; | ||
1285 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1286 | sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); | ||
1287 | sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); | ||
1288 | WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); | ||
1289 | break; | ||
1290 | default: | ||
1291 | break; | ||
1292 | } | ||
1293 | break; | ||
1294 | default: | ||
1295 | break; | ||
1296 | } | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, | ||
1301 | struct amdgpu_irq_src *source, | ||
1302 | struct amdgpu_iv_entry *entry) | ||
1303 | { | ||
1304 | u8 instance_id, queue_id; | ||
1305 | |||
1306 | instance_id = (entry->ring_id & 0x3) >> 0; | ||
1307 | queue_id = (entry->ring_id & 0xc) >> 2; | ||
1308 | DRM_DEBUG("IH: SDMA trap\n"); | ||
1309 | switch (instance_id) { | ||
1310 | case 0: | ||
1311 | switch (queue_id) { | ||
1312 | case 0: | ||
1313 | amdgpu_fence_process(&adev->sdma[0].ring); | ||
1314 | break; | ||
1315 | case 1: | ||
1316 | /* XXX compute */ | ||
1317 | break; | ||
1318 | case 2: | ||
1319 | /* XXX compute */ | ||
1320 | break; | ||
1321 | } | ||
1322 | break; | ||
1323 | case 1: | ||
1324 | switch (queue_id) { | ||
1325 | case 0: | ||
1326 | amdgpu_fence_process(&adev->sdma[1].ring); | ||
1327 | break; | ||
1328 | case 1: | ||
1329 | /* XXX compute */ | ||
1330 | break; | ||
1331 | case 2: | ||
1332 | /* XXX compute */ | ||
1333 | break; | ||
1334 | } | ||
1335 | break; | ||
1336 | } | ||
1337 | return 0; | ||
1338 | } | ||
1339 | |||
1340 | static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, | ||
1341 | struct amdgpu_irq_src *source, | ||
1342 | struct amdgpu_iv_entry *entry) | ||
1343 | { | ||
1344 | DRM_ERROR("Illegal instruction in SDMA command stream\n"); | ||
1345 | schedule_work(&adev->reset_work); | ||
1346 | return 0; | ||
1347 | } | ||
1348 | |||
1349 | static int sdma_v3_0_set_clockgating_state(struct amdgpu_device *adev, | ||
1350 | enum amdgpu_clockgating_state state) | ||
1351 | { | ||
1352 | /* XXX handled via the smc on VI */ | ||
1353 | |||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | static int sdma_v3_0_set_powergating_state(struct amdgpu_device *adev, | ||
1358 | enum amdgpu_powergating_state state) | ||
1359 | { | ||
1360 | return 0; | ||
1361 | } | ||
1362 | |||
1363 | const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs = { | ||
1364 | .early_init = sdma_v3_0_early_init, | ||
1365 | .late_init = NULL, | ||
1366 | .sw_init = sdma_v3_0_sw_init, | ||
1367 | .sw_fini = sdma_v3_0_sw_fini, | ||
1368 | .hw_init = sdma_v3_0_hw_init, | ||
1369 | .hw_fini = sdma_v3_0_hw_fini, | ||
1370 | .suspend = sdma_v3_0_suspend, | ||
1371 | .resume = sdma_v3_0_resume, | ||
1372 | .is_idle = sdma_v3_0_is_idle, | ||
1373 | .wait_for_idle = sdma_v3_0_wait_for_idle, | ||
1374 | .soft_reset = sdma_v3_0_soft_reset, | ||
1375 | .print_status = sdma_v3_0_print_status, | ||
1376 | .set_clockgating_state = sdma_v3_0_set_clockgating_state, | ||
1377 | .set_powergating_state = sdma_v3_0_set_powergating_state, | ||
1378 | }; | ||
1379 | |||
1380 | /** | ||
1381 | * sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up | ||
1382 | * | ||
1383 | * @ring: amdgpu_ring structure holding ring information | ||
1384 | * | ||
1385 | * Check if the async DMA engine is locked up (VI). | ||
1386 | * Returns true if the engine appears to be locked up, false if not. | ||
1387 | */ | ||
1388 | static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring) | ||
1389 | { | ||
1390 | |||
1391 | if (sdma_v3_0_is_idle(ring->adev)) { | ||
1392 | amdgpu_ring_lockup_update(ring); | ||
1393 | return false; | ||
1394 | } | ||
1395 | return amdgpu_ring_test_lockup(ring); | ||
1396 | } | ||
1397 | |||
1398 | static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | ||
1399 | .get_rptr = sdma_v3_0_ring_get_rptr, | ||
1400 | .get_wptr = sdma_v3_0_ring_get_wptr, | ||
1401 | .set_wptr = sdma_v3_0_ring_set_wptr, | ||
1402 | .parse_cs = NULL, | ||
1403 | .emit_ib = sdma_v3_0_ring_emit_ib, | ||
1404 | .emit_fence = sdma_v3_0_ring_emit_fence, | ||
1405 | .emit_semaphore = sdma_v3_0_ring_emit_semaphore, | ||
1406 | .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, | ||
1407 | .test_ring = sdma_v3_0_ring_test_ring, | ||
1408 | .test_ib = sdma_v3_0_ring_test_ib, | ||
1409 | .is_lockup = sdma_v3_0_ring_is_lockup, | ||
1410 | }; | ||
1411 | |||
1412 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) | ||
1413 | { | ||
1414 | adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; | ||
1415 | adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs; | ||
1416 | } | ||
1417 | |||
1418 | static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { | ||
1419 | .set = sdma_v3_0_set_trap_irq_state, | ||
1420 | .process = sdma_v3_0_process_trap_irq, | ||
1421 | }; | ||
1422 | |||
1423 | static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = { | ||
1424 | .process = sdma_v3_0_process_illegal_inst_irq, | ||
1425 | }; | ||
1426 | |||
1427 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) | ||
1428 | { | ||
1429 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | ||
1430 | adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; | ||
1431 | adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; | ||
1432 | } | ||
1433 | |||
1434 | /** | ||
1435 | * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine | ||
1436 | * | ||
1437 | * @ring: amdgpu_ring structure holding ring information | ||
1438 | * @src_offset: src GPU address | ||
1439 | * @dst_offset: dst GPU address | ||
1440 | * @byte_count: number of bytes to xfer | ||
1441 | * | ||
1442 | * Copy GPU buffers using the DMA engine (VI). | ||
1443 | * Used by the amdgpu ttm implementation to move pages if | ||
1444 | * registered as the asic copy callback. | ||
1445 | */ | ||
1446 | static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ring *ring, | ||
1447 | uint64_t src_offset, | ||
1448 | uint64_t dst_offset, | ||
1449 | uint32_t byte_count) | ||
1450 | { | ||
1451 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | | ||
1452 | SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR)); | ||
1453 | amdgpu_ring_write(ring, byte_count); | ||
1454 | amdgpu_ring_write(ring, 0); /* src/dst endian swap */ | ||
1455 | amdgpu_ring_write(ring, lower_32_bits(src_offset)); | ||
1456 | amdgpu_ring_write(ring, upper_32_bits(src_offset)); | ||
1457 | amdgpu_ring_write(ring, lower_32_bits(dst_offset)); | ||
1458 | amdgpu_ring_write(ring, upper_32_bits(dst_offset)); | ||
1459 | } | ||
1460 | |||
1461 | /** | ||
1462 | * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine | ||
1463 | * | ||
1464 | * @ring: amdgpu_ring structure holding ring information | ||
1465 | * @src_data: value to write to buffer | ||
1466 | * @dst_offset: dst GPU address | ||
1467 | * @byte_count: number of bytes to xfer | ||
1468 | * | ||
1469 | * Fill GPU buffers using the DMA engine (VI). | ||
1470 | */ | ||
1471 | static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ring *ring, | ||
1472 | uint32_t src_data, | ||
1473 | uint64_t dst_offset, | ||
1474 | uint32_t byte_count) | ||
1475 | { | ||
1476 | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL)); | ||
1477 | amdgpu_ring_write(ring, lower_32_bits(dst_offset)); | ||
1478 | amdgpu_ring_write(ring, upper_32_bits(dst_offset)); | ||
1479 | amdgpu_ring_write(ring, src_data); | ||
1480 | amdgpu_ring_write(ring, byte_count); | ||
1481 | } | ||
1482 | |||
1483 | static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { | ||
1484 | .copy_max_bytes = 0x1fffff, | ||
1485 | .copy_num_dw = 7, | ||
1486 | .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, | ||
1487 | |||
1488 | .fill_max_bytes = 0x1fffff, | ||
1489 | .fill_num_dw = 5, | ||
1490 | .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, | ||
1491 | }; | ||
1492 | |||
1493 | static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) | ||
1494 | { | ||
1495 | if (adev->mman.buffer_funcs == NULL) { | ||
1496 | adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; | ||
1497 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | ||
1498 | } | ||
1499 | } | ||
1500 | |||
1501 | static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { | ||
1502 | .copy_pte = sdma_v3_0_vm_copy_pte, | ||
1503 | .write_pte = sdma_v3_0_vm_write_pte, | ||
1504 | .set_pte_pde = sdma_v3_0_vm_set_pte_pde, | ||
1505 | .pad_ib = sdma_v3_0_vm_pad_ib, | ||
1506 | }; | ||
1507 | |||
1508 | static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) | ||
1509 | { | ||
1510 | if (adev->vm_manager.vm_pte_funcs == NULL) { | ||
1511 | adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; | ||
1512 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | ||
1513 | } | ||
1514 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h new file mode 100644 index 000000000000..85bf2ac59252 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __SDMA_V3_0_H__ | ||
25 | #define __SDMA_V3_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs sdma_v3_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8.h b/drivers/gpu/drm/amd/amdgpu/smu8.h new file mode 100644 index 000000000000..d758d07b6a31 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu8.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef SMU8_H | ||
25 | #define SMU8_H | ||
26 | |||
27 | #pragma pack(push, 1) | ||
28 | |||
29 | #define ENABLE_DEBUG_FEATURES | ||
30 | |||
31 | struct SMU8_Firmware_Header { | ||
32 | uint32_t Version; | ||
33 | uint32_t ImageSize; | ||
34 | uint32_t CodeSize; | ||
35 | uint32_t HeaderSize; | ||
36 | uint32_t EntryPoint; | ||
37 | uint32_t Rtos; | ||
38 | uint32_t UcodeLoadStatus; | ||
39 | uint32_t DpmTable; | ||
40 | uint32_t FanTable; | ||
41 | uint32_t PmFuseTable; | ||
42 | uint32_t Globals; | ||
43 | uint32_t Reserved[20]; | ||
44 | uint32_t Signature; | ||
45 | }; | ||
46 | |||
47 | struct SMU8_MultimediaPowerLogData { | ||
48 | uint32_t avgTotalPower; | ||
49 | uint32_t avgGpuPower; | ||
50 | uint32_t avgUvdPower; | ||
51 | uint32_t avgVcePower; | ||
52 | |||
53 | uint32_t avgSclk; | ||
54 | uint32_t avgDclk; | ||
55 | uint32_t avgVclk; | ||
56 | uint32_t avgEclk; | ||
57 | |||
58 | uint32_t startTimeHi; | ||
59 | uint32_t startTimeLo; | ||
60 | |||
61 | uint32_t endTimeHi; | ||
62 | uint32_t endTimeLo; | ||
63 | }; | ||
64 | |||
65 | #define SMU8_FIRMWARE_HEADER_LOCATION 0x1FF80 | ||
66 | #define SMU8_UNBCSR_START_ADDR 0xC0100000 | ||
67 | |||
68 | #define SMN_MP1_SRAM_START_ADDR 0x10000000 | ||
69 | |||
70 | #pragma pack(pop) | ||
71 | |||
72 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h b/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h new file mode 100644 index 000000000000..5c9cc3c0bbfa --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu8_fusion.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef SMU8_FUSION_H | ||
25 | #define SMU8_FUSION_H | ||
26 | |||
27 | #include "smu8.h" | ||
28 | |||
29 | #pragma pack(push, 1) | ||
30 | |||
31 | #define SMU8_MAX_CUS 2 | ||
32 | #define SMU8_PSMS_PER_CU 4 | ||
33 | #define SMU8_CACS_PER_CU 4 | ||
34 | |||
35 | struct SMU8_GfxCuPgScoreboard { | ||
36 | uint8_t Enabled; | ||
37 | uint8_t spare[3]; | ||
38 | }; | ||
39 | |||
40 | struct SMU8_Port80MonitorTable { | ||
41 | uint32_t MmioAddress; | ||
42 | uint32_t MemoryBaseHi; | ||
43 | uint32_t MemoryBaseLo; | ||
44 | uint16_t MemoryBufferSize; | ||
45 | uint16_t MemoryPosition; | ||
46 | uint16_t PollingInterval; | ||
47 | uint8_t EnableCsrShadow; | ||
48 | uint8_t EnableDramShadow; | ||
49 | }; | ||
50 | |||
51 | /* Clock Table Definitions */ | ||
52 | #define NUM_SCLK_LEVELS 8 | ||
53 | #define NUM_LCLK_LEVELS 8 | ||
54 | #define NUM_UVD_LEVELS 8 | ||
55 | #define NUM_ECLK_LEVELS 8 | ||
56 | #define NUM_ACLK_LEVELS 8 | ||
57 | |||
58 | struct SMU8_Fusion_ClkLevel { | ||
59 | uint8_t GnbVid; | ||
60 | uint8_t GfxVid; | ||
61 | uint8_t DfsDid; | ||
62 | uint8_t DeepSleepDid; | ||
63 | uint32_t DfsBypass; | ||
64 | uint32_t Frequency; | ||
65 | }; | ||
66 | |||
67 | struct SMU8_Fusion_SclkBreakdownTable { | ||
68 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_SCLK_LEVELS]; | ||
69 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
70 | /* SMU8_Fusion_ClkLevel PwrOffLevel; */ | ||
71 | uint32_t SclkValidMask; | ||
72 | uint32_t MaxSclkIndex; | ||
73 | }; | ||
74 | |||
75 | struct SMU8_Fusion_LclkBreakdownTable { | ||
76 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_LCLK_LEVELS]; | ||
77 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
78 | /* SMU8_Fusion_ClkLevel PwrOffLevel; */ | ||
79 | uint32_t LclkValidMask; | ||
80 | uint32_t MaxLclkIndex; | ||
81 | }; | ||
82 | |||
83 | struct SMU8_Fusion_EclkBreakdownTable { | ||
84 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ECLK_LEVELS]; | ||
85 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
86 | struct SMU8_Fusion_ClkLevel PwrOffLevel; | ||
87 | uint32_t EclkValidMask; | ||
88 | uint32_t MaxEclkIndex; | ||
89 | }; | ||
90 | |||
91 | struct SMU8_Fusion_VclkBreakdownTable { | ||
92 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS]; | ||
93 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
94 | struct SMU8_Fusion_ClkLevel PwrOffLevel; | ||
95 | uint32_t VclkValidMask; | ||
96 | uint32_t MaxVclkIndex; | ||
97 | }; | ||
98 | |||
99 | struct SMU8_Fusion_DclkBreakdownTable { | ||
100 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_UVD_LEVELS]; | ||
101 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
102 | struct SMU8_Fusion_ClkLevel PwrOffLevel; | ||
103 | uint32_t DclkValidMask; | ||
104 | uint32_t MaxDclkIndex; | ||
105 | }; | ||
106 | |||
107 | struct SMU8_Fusion_AclkBreakdownTable { | ||
108 | struct SMU8_Fusion_ClkLevel ClkLevel[NUM_ACLK_LEVELS]; | ||
109 | struct SMU8_Fusion_ClkLevel DpmOffLevel; | ||
110 | struct SMU8_Fusion_ClkLevel PwrOffLevel; | ||
111 | uint32_t AclkValidMask; | ||
112 | uint32_t MaxAclkIndex; | ||
113 | }; | ||
114 | |||
115 | |||
116 | struct SMU8_Fusion_ClkTable { | ||
117 | struct SMU8_Fusion_SclkBreakdownTable SclkBreakdownTable; | ||
118 | struct SMU8_Fusion_LclkBreakdownTable LclkBreakdownTable; | ||
119 | struct SMU8_Fusion_EclkBreakdownTable EclkBreakdownTable; | ||
120 | struct SMU8_Fusion_VclkBreakdownTable VclkBreakdownTable; | ||
121 | struct SMU8_Fusion_DclkBreakdownTable DclkBreakdownTable; | ||
122 | struct SMU8_Fusion_AclkBreakdownTable AclkBreakdownTable; | ||
123 | }; | ||
124 | |||
125 | #pragma pack(pop) | ||
126 | |||
127 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h new file mode 100644 index 000000000000..f8ba071f39c8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_cz.h | |||
@@ -0,0 +1,147 @@ | |||
1 | // CZ Ucode Loading Definitions | ||
2 | #ifndef SMU_UCODE_XFER_CZ_H | ||
3 | #define SMU_UCODE_XFER_CZ_H | ||
4 | |||
5 | #define NUM_JOBLIST_ENTRIES 32 | ||
6 | |||
7 | #define TASK_TYPE_NO_ACTION 0 | ||
8 | #define TASK_TYPE_UCODE_LOAD 1 | ||
9 | #define TASK_TYPE_UCODE_SAVE 2 | ||
10 | #define TASK_TYPE_REG_LOAD 3 | ||
11 | #define TASK_TYPE_REG_SAVE 4 | ||
12 | #define TASK_TYPE_INITIALIZE 5 | ||
13 | |||
14 | #define TASK_ARG_REG_SMCIND 0 | ||
15 | #define TASK_ARG_REG_MMIO 1 | ||
16 | #define TASK_ARG_REG_FCH 2 | ||
17 | #define TASK_ARG_REG_UNB 3 | ||
18 | |||
19 | #define TASK_ARG_INIT_MM_PWR_LOG 0 | ||
20 | #define TASK_ARG_INIT_CLK_TABLE 1 | ||
21 | |||
22 | #define JOB_GFX_SAVE 0 | ||
23 | #define JOB_GFX_RESTORE 1 | ||
24 | #define JOB_FCH_SAVE 2 | ||
25 | #define JOB_FCH_RESTORE 3 | ||
26 | #define JOB_UNB_SAVE 4 | ||
27 | #define JOB_UNB_RESTORE 5 | ||
28 | #define JOB_GMC_SAVE 6 | ||
29 | #define JOB_GMC_RESTORE 7 | ||
30 | #define JOB_GNB_SAVE 8 | ||
31 | #define JOB_GNB_RESTORE 9 | ||
32 | |||
33 | #define IGNORE_JOB 0xff | ||
34 | #define END_OF_TASK_LIST (uint16_t)0xffff | ||
35 | |||
36 | // Size of DRAM regions (in bytes) requested by SMU: | ||
37 | #define SMU_DRAM_REQ_MM_PWR_LOG 48 | ||
38 | |||
39 | #define UCODE_ID_SDMA0 0 | ||
40 | #define UCODE_ID_SDMA1 1 | ||
41 | #define UCODE_ID_CP_CE 2 | ||
42 | #define UCODE_ID_CP_PFP 3 | ||
43 | #define UCODE_ID_CP_ME 4 | ||
44 | #define UCODE_ID_CP_MEC_JT1 5 | ||
45 | #define UCODE_ID_CP_MEC_JT2 6 | ||
46 | #define UCODE_ID_GMCON_RENG 7 | ||
47 | #define UCODE_ID_RLC_G 8 | ||
48 | #define UCODE_ID_RLC_SCRATCH 9 | ||
49 | #define UCODE_ID_RLC_SRM_ARAM 10 | ||
50 | #define UCODE_ID_RLC_SRM_DRAM 11 | ||
51 | #define UCODE_ID_DMCU_ERAM 12 | ||
52 | #define UCODE_ID_DMCU_IRAM 13 | ||
53 | |||
54 | #define UCODE_ID_SDMA0_MASK 0x00000001 | ||
55 | #define UCODE_ID_SDMA1_MASK 0x00000002 | ||
56 | #define UCODE_ID_CP_CE_MASK 0x00000004 | ||
57 | #define UCODE_ID_CP_PFP_MASK 0x00000008 | ||
58 | #define UCODE_ID_CP_ME_MASK 0x00000010 | ||
59 | #define UCODE_ID_CP_MEC_JT1_MASK 0x00000020 | ||
60 | #define UCODE_ID_CP_MEC_JT2_MASK 0x00000040 | ||
61 | #define UCODE_ID_GMCON_RENG_MASK 0x00000080 | ||
62 | #define UCODE_ID_RLC_G_MASK 0x00000100 | ||
63 | #define UCODE_ID_RLC_SCRATCH_MASK 0x00000200 | ||
64 | #define UCODE_ID_RLC_SRM_ARAM_MASK 0x00000400 | ||
65 | #define UCODE_ID_RLC_SRM_DRAM_MASK 0x00000800 | ||
66 | #define UCODE_ID_DMCU_ERAM_MASK 0x00001000 | ||
67 | #define UCODE_ID_DMCU_IRAM_MASK 0x00002000 | ||
68 | |||
69 | #define UCODE_ID_SDMA0_SIZE_BYTE 10368 | ||
70 | #define UCODE_ID_SDMA1_SIZE_BYTE 10368 | ||
71 | #define UCODE_ID_CP_CE_SIZE_BYTE 8576 | ||
72 | #define UCODE_ID_CP_PFP_SIZE_BYTE 16768 | ||
73 | #define UCODE_ID_CP_ME_SIZE_BYTE 16768 | ||
74 | #define UCODE_ID_CP_MEC_JT1_SIZE_BYTE 384 | ||
75 | #define UCODE_ID_CP_MEC_JT2_SIZE_BYTE 384 | ||
76 | #define UCODE_ID_GMCON_RENG_SIZE_BYTE 4096 | ||
77 | #define UCODE_ID_RLC_G_SIZE_BYTE 2048 | ||
78 | #define UCODE_ID_RLC_SCRATCH_SIZE_BYTE 132 | ||
79 | #define UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE 8192 | ||
80 | #define UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE 4096 | ||
81 | #define UCODE_ID_DMCU_ERAM_SIZE_BYTE 24576 | ||
82 | #define UCODE_ID_DMCU_IRAM_SIZE_BYTE 1024 | ||
83 | |||
84 | #define NUM_UCODES 14 | ||
85 | |||
86 | typedef struct { | ||
87 | uint32_t high; | ||
88 | uint32_t low; | ||
89 | } data_64_t; | ||
90 | |||
91 | struct SMU_Task { | ||
92 | uint8_t type; | ||
93 | uint8_t arg; | ||
94 | uint16_t next; | ||
95 | data_64_t addr; | ||
96 | uint32_t size_bytes; | ||
97 | }; | ||
98 | typedef struct SMU_Task SMU_Task; | ||
99 | |||
100 | struct TOC { | ||
101 | uint8_t JobList[NUM_JOBLIST_ENTRIES]; | ||
102 | SMU_Task tasks[1]; | ||
103 | }; | ||
104 | |||
105 | // META DATA COMMAND Definitions | ||
106 | #define METADATA_CMD_MODE0 0x00000103 | ||
107 | #define METADATA_CMD_MODE1 0x00000113 | ||
108 | #define METADATA_CMD_MODE2 0x00000123 | ||
109 | #define METADATA_CMD_MODE3 0x00000133 | ||
110 | #define METADATA_CMD_DELAY 0x00000203 | ||
111 | #define METADATA_CMD_CHNG_REGSPACE 0x00000303 | ||
112 | #define METADATA_PERFORM_ON_SAVE 0x00001000 | ||
113 | #define METADATA_PERFORM_ON_LOAD 0x00002000 | ||
114 | #define METADATA_CMD_ARG_MASK 0xFFFF0000 | ||
115 | #define METADATA_CMD_ARG_SHIFT 16 | ||
116 | |||
117 | // Simple register addr/data fields | ||
118 | struct SMU_MetaData_Mode0 { | ||
119 | uint32_t register_address; | ||
120 | uint32_t register_data; | ||
121 | }; | ||
122 | typedef struct SMU_MetaData_Mode0 SMU_MetaData_Mode0; | ||
123 | |||
124 | // Register addr/data with mask | ||
125 | struct SMU_MetaData_Mode1 { | ||
126 | uint32_t register_address; | ||
127 | uint32_t register_mask; | ||
128 | uint32_t register_data; | ||
129 | }; | ||
130 | typedef struct SMU_MetaData_Mode1 SMU_MetaData_Mode1; | ||
131 | |||
132 | struct SMU_MetaData_Mode2 { | ||
133 | uint32_t register_address; | ||
134 | uint32_t register_mask; | ||
135 | uint32_t target_value; | ||
136 | }; | ||
137 | typedef struct SMU_MetaData_Mode2 SMU_MetaData_Mode2; | ||
138 | |||
139 | // Always write data (even on a save operation) | ||
140 | struct SMU_MetaData_Mode3 { | ||
141 | uint32_t register_address; | ||
142 | uint32_t register_mask; | ||
143 | uint32_t register_data; | ||
144 | }; | ||
145 | typedef struct SMU_MetaData_Mode3 SMU_MetaData_Mode3; | ||
146 | |||
147 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h new file mode 100644 index 000000000000..c24a81eebc7c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smu_ucode_xfer_vi.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef SMU_UCODE_XFER_VI_H | ||
25 | #define SMU_UCODE_XFER_VI_H | ||
26 | |||
27 | #define SMU_DRAMData_TOC_VERSION 1 | ||
28 | #define MAX_IH_REGISTER_COUNT 65535 | ||
29 | #define SMU_DIGEST_SIZE_BYTES 20 | ||
30 | #define SMU_FB_SIZE_BYTES 1048576 | ||
31 | #define SMU_MAX_ENTRIES 12 | ||
32 | |||
33 | #define UCODE_ID_SMU 0 | ||
34 | #define UCODE_ID_SDMA0 1 | ||
35 | #define UCODE_ID_SDMA1 2 | ||
36 | #define UCODE_ID_CP_CE 3 | ||
37 | #define UCODE_ID_CP_PFP 4 | ||
38 | #define UCODE_ID_CP_ME 5 | ||
39 | #define UCODE_ID_CP_MEC 6 | ||
40 | #define UCODE_ID_CP_MEC_JT1 7 | ||
41 | #define UCODE_ID_CP_MEC_JT2 8 | ||
42 | #define UCODE_ID_GMCON_RENG 9 | ||
43 | #define UCODE_ID_RLC_G 10 | ||
44 | #define UCODE_ID_IH_REG_RESTORE 11 | ||
45 | #define UCODE_ID_VBIOS 12 | ||
46 | #define UCODE_ID_MISC_METADATA 13 | ||
47 | #define UCODE_ID_RLC_SCRATCH 32 | ||
48 | #define UCODE_ID_RLC_SRM_ARAM 33 | ||
49 | #define UCODE_ID_RLC_SRM_DRAM 34 | ||
50 | #define UCODE_ID_MEC_STORAGE 35 | ||
51 | #define UCODE_ID_VBIOS_PARAMETERS 36 | ||
52 | #define UCODE_META_DATA 0xFF | ||
53 | |||
54 | #define UCODE_ID_SMU_MASK 0x00000001 | ||
55 | #define UCODE_ID_SDMA0_MASK 0x00000002 | ||
56 | #define UCODE_ID_SDMA1_MASK 0x00000004 | ||
57 | #define UCODE_ID_CP_CE_MASK 0x00000008 | ||
58 | #define UCODE_ID_CP_PFP_MASK 0x00000010 | ||
59 | #define UCODE_ID_CP_ME_MASK 0x00000020 | ||
60 | #define UCODE_ID_CP_MEC_MASK 0x00000040 | ||
61 | #define UCODE_ID_CP_MEC_JT1_MASK 0x00000080 | ||
62 | #define UCODE_ID_CP_MEC_JT2_MASK 0x00000100 | ||
63 | #define UCODE_ID_GMCON_RENG_MASK 0x00000200 | ||
64 | #define UCODE_ID_RLC_G_MASK 0x00000400 | ||
65 | #define UCODE_ID_IH_REG_RESTORE_MASK 0x00000800 | ||
66 | #define UCODE_ID_VBIOS_MASK 0x00001000 | ||
67 | |||
68 | #define UCODE_FLAG_UNHALT_MASK 0x1 | ||
69 | |||
70 | struct SMU_Entry { | ||
71 | #ifndef __BIG_ENDIAN | ||
72 | uint16_t id; | ||
73 | uint16_t version; | ||
74 | uint32_t image_addr_high; | ||
75 | uint32_t image_addr_low; | ||
76 | uint32_t meta_data_addr_high; | ||
77 | uint32_t meta_data_addr_low; | ||
78 | uint32_t data_size_byte; | ||
79 | uint16_t flags; | ||
80 | uint16_t num_register_entries; | ||
81 | #else | ||
82 | uint16_t version; | ||
83 | uint16_t id; | ||
84 | uint32_t image_addr_high; | ||
85 | uint32_t image_addr_low; | ||
86 | uint32_t meta_data_addr_high; | ||
87 | uint32_t meta_data_addr_low; | ||
88 | uint32_t data_size_byte; | ||
89 | uint16_t num_register_entries; | ||
90 | uint16_t flags; | ||
91 | #endif | ||
92 | }; | ||
93 | |||
94 | struct SMU_DRAMData_TOC { | ||
95 | uint32_t structure_version; | ||
96 | uint32_t num_entries; | ||
97 | struct SMU_Entry entry[SMU_MAX_ENTRIES]; | ||
98 | }; | ||
99 | |||
100 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c new file mode 100644 index 000000000000..98bd707ac5dc --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include "drmP.h" | ||
26 | #include "amdgpu.h" | ||
27 | #include "tonga_smumgr.h" | ||
28 | |||
29 | MODULE_FIRMWARE("radeon/tonga_smc.bin"); | ||
30 | |||
31 | static void tonga_dpm_set_funcs(struct amdgpu_device *adev); | ||
32 | |||
33 | static int tonga_dpm_early_init(struct amdgpu_device *adev) | ||
34 | { | ||
35 | tonga_dpm_set_funcs(adev); | ||
36 | |||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static int tonga_dpm_init_microcode(struct amdgpu_device *adev) | ||
41 | { | ||
42 | char fw_name[30] = "radeon/tonga_smc.bin"; | ||
43 | int err; | ||
44 | |||
45 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | ||
46 | if (err) | ||
47 | goto out; | ||
48 | err = amdgpu_ucode_validate(adev->pm.fw); | ||
49 | |||
50 | out: | ||
51 | if (err) { | ||
52 | DRM_ERROR("Failed to load firmware \"%s\"", fw_name); | ||
53 | release_firmware(adev->pm.fw); | ||
54 | adev->pm.fw = NULL; | ||
55 | } | ||
56 | return err; | ||
57 | } | ||
58 | |||
59 | static int tonga_dpm_sw_init(struct amdgpu_device *adev) | ||
60 | { | ||
61 | int ret; | ||
62 | |||
63 | ret = tonga_dpm_init_microcode(adev); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int tonga_dpm_sw_fini(struct amdgpu_device *adev) | ||
71 | { | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int tonga_dpm_hw_init(struct amdgpu_device *adev) | ||
76 | { | ||
77 | int ret; | ||
78 | |||
79 | mutex_lock(&adev->pm.mutex); | ||
80 | |||
81 | ret = tonga_smu_init(adev); | ||
82 | if (ret) { | ||
83 | DRM_ERROR("SMU initialization failed\n"); | ||
84 | goto fail; | ||
85 | } | ||
86 | |||
87 | ret = tonga_smu_start(adev); | ||
88 | if (ret) { | ||
89 | DRM_ERROR("SMU start failed\n"); | ||
90 | goto fail; | ||
91 | } | ||
92 | |||
93 | mutex_unlock(&adev->pm.mutex); | ||
94 | return 0; | ||
95 | |||
96 | fail: | ||
97 | adev->firmware.smu_load = false; | ||
98 | mutex_unlock(&adev->pm.mutex); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | |||
102 | static int tonga_dpm_hw_fini(struct amdgpu_device *adev) | ||
103 | { | ||
104 | mutex_lock(&adev->pm.mutex); | ||
105 | tonga_smu_fini(adev); | ||
106 | mutex_unlock(&adev->pm.mutex); | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int tonga_dpm_suspend(struct amdgpu_device *adev) | ||
111 | { | ||
112 | tonga_dpm_hw_fini(adev); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int tonga_dpm_resume(struct amdgpu_device *adev) | ||
118 | { | ||
119 | tonga_dpm_hw_init(adev); | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static int tonga_dpm_set_clockgating_state(struct amdgpu_device *adev, | ||
125 | enum amdgpu_clockgating_state state) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | static int tonga_dpm_set_powergating_state(struct amdgpu_device *adev, | ||
131 | enum amdgpu_powergating_state state) | ||
132 | { | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | const struct amdgpu_ip_funcs tonga_dpm_ip_funcs = { | ||
137 | .early_init = tonga_dpm_early_init, | ||
138 | .late_init = NULL, | ||
139 | .sw_init = tonga_dpm_sw_init, | ||
140 | .sw_fini = tonga_dpm_sw_fini, | ||
141 | .hw_init = tonga_dpm_hw_init, | ||
142 | .hw_fini = tonga_dpm_hw_fini, | ||
143 | .suspend = tonga_dpm_suspend, | ||
144 | .resume = tonga_dpm_resume, | ||
145 | .is_idle = NULL, | ||
146 | .wait_for_idle = NULL, | ||
147 | .soft_reset = NULL, | ||
148 | .print_status = NULL, | ||
149 | .set_clockgating_state = tonga_dpm_set_clockgating_state, | ||
150 | .set_powergating_state = tonga_dpm_set_powergating_state, | ||
151 | }; | ||
152 | |||
153 | static const struct amdgpu_dpm_funcs tonga_dpm_funcs = { | ||
154 | .get_temperature = NULL, | ||
155 | .pre_set_power_state = NULL, | ||
156 | .set_power_state = NULL, | ||
157 | .post_set_power_state = NULL, | ||
158 | .display_configuration_changed = NULL, | ||
159 | .get_sclk = NULL, | ||
160 | .get_mclk = NULL, | ||
161 | .print_power_state = NULL, | ||
162 | .debugfs_print_current_performance_level = NULL, | ||
163 | .force_performance_level = NULL, | ||
164 | .vblank_too_short = NULL, | ||
165 | .powergate_uvd = NULL, | ||
166 | }; | ||
167 | |||
168 | static void tonga_dpm_set_funcs(struct amdgpu_device *adev) | ||
169 | { | ||
170 | if (NULL == adev->pm.funcs) | ||
171 | adev->pm.funcs = &tonga_dpm_funcs; | ||
172 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c new file mode 100644 index 000000000000..cff1b8bce6a4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c | |||
@@ -0,0 +1,458 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include "drmP.h" | ||
24 | #include "amdgpu.h" | ||
25 | #include "amdgpu_ih.h" | ||
26 | #include "vid.h" | ||
27 | |||
28 | #include "oss/oss_3_0_d.h" | ||
29 | #include "oss/oss_3_0_sh_mask.h" | ||
30 | |||
31 | #include "bif/bif_5_1_d.h" | ||
32 | #include "bif/bif_5_1_sh_mask.h" | ||
33 | |||
34 | /* | ||
35 | * Interrupts | ||
36 | * Starting with r6xx, interrupts are handled via a ring buffer. | ||
37 | * Ring buffers are areas of GPU accessible memory that the GPU | ||
38 | * writes interrupt vectors into and the host reads vectors out of. | ||
39 | * There is a rptr (read pointer) that determines where the | ||
40 | * host is currently reading, and a wptr (write pointer) | ||
41 | * which determines where the GPU has written. When the | ||
42 | * pointers are equal, the ring is idle. When the GPU | ||
43 | * writes vectors to the ring buffer, it increments the | ||
44 | * wptr. When there is an interrupt, the host then starts | ||
45 | * fetching commands and processing them until the pointers are | ||
46 | * equal again at which point it updates the rptr. | ||
47 | */ | ||
48 | |||
49 | static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev); | ||
50 | |||
51 | /** | ||
52 | * tonga_ih_enable_interrupts - Enable the interrupt ring buffer | ||
53 | * | ||
54 | * @adev: amdgpu_device pointer | ||
55 | * | ||
56 | * Enable the interrupt ring buffer (VI). | ||
57 | */ | ||
58 | static void tonga_ih_enable_interrupts(struct amdgpu_device *adev) | ||
59 | { | ||
60 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
61 | |||
62 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1); | ||
63 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1); | ||
64 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
65 | adev->irq.ih.enabled = true; | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * tonga_ih_disable_interrupts - Disable the interrupt ring buffer | ||
70 | * | ||
71 | * @adev: amdgpu_device pointer | ||
72 | * | ||
73 | * Disable the interrupt ring buffer (VI). | ||
74 | */ | ||
75 | static void tonga_ih_disable_interrupts(struct amdgpu_device *adev) | ||
76 | { | ||
77 | u32 ih_rb_cntl = RREG32(mmIH_RB_CNTL); | ||
78 | |||
79 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0); | ||
80 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0); | ||
81 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
82 | /* set rptr, wptr to 0 */ | ||
83 | WREG32(mmIH_RB_RPTR, 0); | ||
84 | WREG32(mmIH_RB_WPTR, 0); | ||
85 | adev->irq.ih.enabled = false; | ||
86 | adev->irq.ih.rptr = 0; | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * tonga_ih_irq_init - init and enable the interrupt ring | ||
91 | * | ||
92 | * @adev: amdgpu_device pointer | ||
93 | * | ||
94 | * Allocate a ring buffer for the interrupt controller, | ||
95 | * enable the RLC, disable interrupts, enable the IH | ||
96 | * ring buffer and enable it (VI). | ||
97 | * Called at device load and reume. | ||
98 | * Returns 0 for success, errors for failure. | ||
99 | */ | ||
100 | static int tonga_ih_irq_init(struct amdgpu_device *adev) | ||
101 | { | ||
102 | int ret = 0; | ||
103 | int rb_bufsz; | ||
104 | u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr; | ||
105 | u64 wptr_off; | ||
106 | |||
107 | /* disable irqs */ | ||
108 | tonga_ih_disable_interrupts(adev); | ||
109 | |||
110 | /* setup interrupt control */ | ||
111 | WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8); | ||
112 | interrupt_cntl = RREG32(mmINTERRUPT_CNTL); | ||
113 | /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi | ||
114 | * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN | ||
115 | */ | ||
116 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); | ||
117 | /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ | ||
118 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); | ||
119 | WREG32(mmINTERRUPT_CNTL, interrupt_cntl); | ||
120 | |||
121 | /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ | ||
122 | if (adev->irq.ih.use_bus_addr) | ||
123 | WREG32(mmIH_RB_BASE, adev->irq.ih.rb_dma_addr >> 8); | ||
124 | else | ||
125 | WREG32(mmIH_RB_BASE, adev->irq.ih.gpu_addr >> 8); | ||
126 | |||
127 | rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4); | ||
128 | ih_rb_cntl = REG_SET_FIELD(0, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
129 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz); | ||
130 | /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */ | ||
131 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1); | ||
132 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0); | ||
133 | |||
134 | if (adev->irq.msi_enabled) | ||
135 | ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1); | ||
136 | |||
137 | WREG32(mmIH_RB_CNTL, ih_rb_cntl); | ||
138 | |||
139 | /* set the writeback address whether it's enabled or not */ | ||
140 | if (adev->irq.ih.use_bus_addr) | ||
141 | wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4); | ||
142 | else | ||
143 | wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); | ||
144 | WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); | ||
145 | WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); | ||
146 | |||
147 | /* set rptr, wptr to 0 */ | ||
148 | WREG32(mmIH_RB_RPTR, 0); | ||
149 | WREG32(mmIH_RB_WPTR, 0); | ||
150 | |||
151 | ih_doorbell_rtpr = RREG32(mmIH_DOORBELL_RPTR); | ||
152 | if (adev->irq.ih.use_doorbell) { | ||
153 | ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, | ||
154 | OFFSET, adev->irq.ih.doorbell_index); | ||
155 | ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, | ||
156 | ENABLE, 1); | ||
157 | } else { | ||
158 | ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR, | ||
159 | ENABLE, 0); | ||
160 | } | ||
161 | WREG32(mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); | ||
162 | |||
163 | pci_set_master(adev->pdev); | ||
164 | |||
165 | /* enable interrupts */ | ||
166 | tonga_ih_enable_interrupts(adev); | ||
167 | |||
168 | return ret; | ||
169 | } | ||
170 | |||
171 | /** | ||
172 | * tonga_ih_irq_disable - disable interrupts | ||
173 | * | ||
174 | * @adev: amdgpu_device pointer | ||
175 | * | ||
176 | * Disable interrupts on the hw (VI). | ||
177 | */ | ||
178 | static void tonga_ih_irq_disable(struct amdgpu_device *adev) | ||
179 | { | ||
180 | tonga_ih_disable_interrupts(adev); | ||
181 | |||
182 | /* Wait and acknowledge irq */ | ||
183 | mdelay(1); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * tonga_ih_get_wptr - get the IH ring buffer wptr | ||
188 | * | ||
189 | * @adev: amdgpu_device pointer | ||
190 | * | ||
191 | * Get the IH ring buffer wptr from either the register | ||
192 | * or the writeback memory buffer (VI). Also check for | ||
193 | * ring buffer overflow and deal with it. | ||
194 | * Used by cz_irq_process(VI). | ||
195 | * Returns the value of the wptr. | ||
196 | */ | ||
197 | static u32 tonga_ih_get_wptr(struct amdgpu_device *adev) | ||
198 | { | ||
199 | u32 wptr, tmp; | ||
200 | |||
201 | if (adev->irq.ih.use_bus_addr) | ||
202 | wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]); | ||
203 | else | ||
204 | wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]); | ||
205 | |||
206 | if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) { | ||
207 | wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0); | ||
208 | /* When a ring buffer overflow happen start parsing interrupt | ||
209 | * from the last not overwritten vector (wptr + 16). Hopefully | ||
210 | * this should allow us to catchup. | ||
211 | */ | ||
212 | dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", | ||
213 | wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask); | ||
214 | adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask; | ||
215 | tmp = RREG32(mmIH_RB_CNTL); | ||
216 | tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); | ||
217 | WREG32(mmIH_RB_CNTL, tmp); | ||
218 | } | ||
219 | return (wptr & adev->irq.ih.ptr_mask); | ||
220 | } | ||
221 | |||
222 | /** | ||
223 | * tonga_ih_decode_iv - decode an interrupt vector | ||
224 | * | ||
225 | * @adev: amdgpu_device pointer | ||
226 | * | ||
227 | * Decodes the interrupt vector at the current rptr | ||
228 | * position and also advance the position. | ||
229 | */ | ||
230 | static void tonga_ih_decode_iv(struct amdgpu_device *adev, | ||
231 | struct amdgpu_iv_entry *entry) | ||
232 | { | ||
233 | /* wptr/rptr are in bytes! */ | ||
234 | u32 ring_index = adev->irq.ih.rptr >> 2; | ||
235 | uint32_t dw[4]; | ||
236 | |||
237 | dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); | ||
238 | dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); | ||
239 | dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); | ||
240 | dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); | ||
241 | |||
242 | entry->src_id = dw[0] & 0xff; | ||
243 | entry->src_data = dw[1] & 0xfffffff; | ||
244 | entry->ring_id = dw[2] & 0xff; | ||
245 | entry->vm_id = (dw[2] >> 8) & 0xff; | ||
246 | entry->pas_id = (dw[2] >> 16) & 0xffff; | ||
247 | |||
248 | /* wptr/rptr are in bytes! */ | ||
249 | adev->irq.ih.rptr += 16; | ||
250 | } | ||
251 | |||
252 | /** | ||
253 | * tonga_ih_set_rptr - set the IH ring buffer rptr | ||
254 | * | ||
255 | * @adev: amdgpu_device pointer | ||
256 | * | ||
257 | * Set the IH ring buffer rptr. | ||
258 | */ | ||
259 | static void tonga_ih_set_rptr(struct amdgpu_device *adev) | ||
260 | { | ||
261 | if (adev->irq.ih.use_doorbell) { | ||
262 | /* XXX check if swapping is necessary on BE */ | ||
263 | if (adev->irq.ih.use_bus_addr) | ||
264 | adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; | ||
265 | else | ||
266 | adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr; | ||
267 | WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr); | ||
268 | } else { | ||
269 | WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | static int tonga_ih_early_init(struct amdgpu_device *adev) | ||
274 | { | ||
275 | tonga_ih_set_interrupt_funcs(adev); | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int tonga_ih_sw_init(struct amdgpu_device *adev) | ||
280 | { | ||
281 | int r; | ||
282 | |||
283 | r = amdgpu_ih_ring_init(adev, 4 * 1024, true); | ||
284 | if (r) | ||
285 | return r; | ||
286 | |||
287 | adev->irq.ih.use_doorbell = true; | ||
288 | adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH; | ||
289 | |||
290 | r = amdgpu_irq_init(adev); | ||
291 | |||
292 | return r; | ||
293 | } | ||
294 | |||
295 | static int tonga_ih_sw_fini(struct amdgpu_device *adev) | ||
296 | { | ||
297 | amdgpu_irq_fini(adev); | ||
298 | amdgpu_ih_ring_fini(adev); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int tonga_ih_hw_init(struct amdgpu_device *adev) | ||
304 | { | ||
305 | int r; | ||
306 | |||
307 | r = tonga_ih_irq_init(adev); | ||
308 | if (r) | ||
309 | return r; | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int tonga_ih_hw_fini(struct amdgpu_device *adev) | ||
315 | { | ||
316 | tonga_ih_irq_disable(adev); | ||
317 | |||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | static int tonga_ih_suspend(struct amdgpu_device *adev) | ||
322 | { | ||
323 | return tonga_ih_hw_fini(adev); | ||
324 | } | ||
325 | |||
326 | static int tonga_ih_resume(struct amdgpu_device *adev) | ||
327 | { | ||
328 | return tonga_ih_hw_init(adev); | ||
329 | } | ||
330 | |||
331 | static bool tonga_ih_is_idle(struct amdgpu_device *adev) | ||
332 | { | ||
333 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
334 | |||
335 | if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
336 | return false; | ||
337 | |||
338 | return true; | ||
339 | } | ||
340 | |||
341 | static int tonga_ih_wait_for_idle(struct amdgpu_device *adev) | ||
342 | { | ||
343 | unsigned i; | ||
344 | u32 tmp; | ||
345 | |||
346 | for (i = 0; i < adev->usec_timeout; i++) { | ||
347 | /* read MC_STATUS */ | ||
348 | tmp = RREG32(mmSRBM_STATUS); | ||
349 | if (!REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY)) | ||
350 | return 0; | ||
351 | udelay(1); | ||
352 | } | ||
353 | return -ETIMEDOUT; | ||
354 | } | ||
355 | |||
356 | static void tonga_ih_print_status(struct amdgpu_device *adev) | ||
357 | { | ||
358 | dev_info(adev->dev, "TONGA IH registers\n"); | ||
359 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
360 | RREG32(mmSRBM_STATUS)); | ||
361 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
362 | RREG32(mmSRBM_STATUS2)); | ||
363 | dev_info(adev->dev, " INTERRUPT_CNTL=0x%08X\n", | ||
364 | RREG32(mmINTERRUPT_CNTL)); | ||
365 | dev_info(adev->dev, " INTERRUPT_CNTL2=0x%08X\n", | ||
366 | RREG32(mmINTERRUPT_CNTL2)); | ||
367 | dev_info(adev->dev, " IH_CNTL=0x%08X\n", | ||
368 | RREG32(mmIH_CNTL)); | ||
369 | dev_info(adev->dev, " IH_RB_CNTL=0x%08X\n", | ||
370 | RREG32(mmIH_RB_CNTL)); | ||
371 | dev_info(adev->dev, " IH_RB_BASE=0x%08X\n", | ||
372 | RREG32(mmIH_RB_BASE)); | ||
373 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_LO=0x%08X\n", | ||
374 | RREG32(mmIH_RB_WPTR_ADDR_LO)); | ||
375 | dev_info(adev->dev, " IH_RB_WPTR_ADDR_HI=0x%08X\n", | ||
376 | RREG32(mmIH_RB_WPTR_ADDR_HI)); | ||
377 | dev_info(adev->dev, " IH_RB_RPTR=0x%08X\n", | ||
378 | RREG32(mmIH_RB_RPTR)); | ||
379 | dev_info(adev->dev, " IH_RB_WPTR=0x%08X\n", | ||
380 | RREG32(mmIH_RB_WPTR)); | ||
381 | } | ||
382 | |||
383 | static int tonga_ih_soft_reset(struct amdgpu_device *adev) | ||
384 | { | ||
385 | u32 srbm_soft_reset = 0; | ||
386 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
387 | |||
388 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) | ||
389 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, | ||
390 | SOFT_RESET_IH, 1); | ||
391 | |||
392 | if (srbm_soft_reset) { | ||
393 | tonga_ih_print_status(adev); | ||
394 | |||
395 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
396 | tmp |= srbm_soft_reset; | ||
397 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
398 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
399 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
400 | |||
401 | udelay(50); | ||
402 | |||
403 | tmp &= ~srbm_soft_reset; | ||
404 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
405 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
406 | |||
407 | /* Wait a little for things to settle down */ | ||
408 | udelay(50); | ||
409 | |||
410 | tonga_ih_print_status(adev); | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static int tonga_ih_set_clockgating_state(struct amdgpu_device *adev, | ||
417 | enum amdgpu_clockgating_state state) | ||
418 | { | ||
419 | // TODO | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static int tonga_ih_set_powergating_state(struct amdgpu_device *adev, | ||
424 | enum amdgpu_powergating_state state) | ||
425 | { | ||
426 | // TODO | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | const struct amdgpu_ip_funcs tonga_ih_ip_funcs = { | ||
431 | .early_init = tonga_ih_early_init, | ||
432 | .late_init = NULL, | ||
433 | .sw_init = tonga_ih_sw_init, | ||
434 | .sw_fini = tonga_ih_sw_fini, | ||
435 | .hw_init = tonga_ih_hw_init, | ||
436 | .hw_fini = tonga_ih_hw_fini, | ||
437 | .suspend = tonga_ih_suspend, | ||
438 | .resume = tonga_ih_resume, | ||
439 | .is_idle = tonga_ih_is_idle, | ||
440 | .wait_for_idle = tonga_ih_wait_for_idle, | ||
441 | .soft_reset = tonga_ih_soft_reset, | ||
442 | .print_status = tonga_ih_print_status, | ||
443 | .set_clockgating_state = tonga_ih_set_clockgating_state, | ||
444 | .set_powergating_state = tonga_ih_set_powergating_state, | ||
445 | }; | ||
446 | |||
447 | static const struct amdgpu_ih_funcs tonga_ih_funcs = { | ||
448 | .get_wptr = tonga_ih_get_wptr, | ||
449 | .decode_iv = tonga_ih_decode_iv, | ||
450 | .set_rptr = tonga_ih_set_rptr | ||
451 | }; | ||
452 | |||
453 | static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) | ||
454 | { | ||
455 | if (adev->irq.ih_funcs == NULL) | ||
456 | adev->irq.ih_funcs = &tonga_ih_funcs; | ||
457 | } | ||
458 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.h b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h new file mode 100644 index 000000000000..7c9bae87124e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __TONGA_IH_H__ | ||
25 | #define __TONGA_IH_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs tonga_ih_ip_funcs; | ||
28 | |||
29 | #endif /* __CZ_IH_H__ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h new file mode 100644 index 000000000000..811781f69482 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ppsmc.h | |||
@@ -0,0 +1,198 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef TONGA_PP_SMC_H | ||
25 | #define TONGA_PP_SMC_H | ||
26 | |||
27 | #pragma pack(push, 1) | ||
28 | |||
29 | #define PPSMC_SWSTATE_FLAG_DC 0x01 | ||
30 | #define PPSMC_SWSTATE_FLAG_UVD 0x02 | ||
31 | #define PPSMC_SWSTATE_FLAG_VCE 0x04 | ||
32 | #define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 | ||
33 | |||
34 | #define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 | ||
35 | #define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 | ||
36 | #define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff | ||
37 | |||
38 | #define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 | ||
39 | #define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 | ||
40 | #define PPSMC_SYSTEMFLAG_GDDR5 0x04 | ||
41 | |||
42 | #define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 | ||
43 | |||
44 | #define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 | ||
45 | #define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 | ||
46 | #define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 | ||
47 | |||
48 | #define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 | ||
49 | #define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 | ||
50 | |||
51 | #define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 | ||
52 | #define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 | ||
53 | |||
54 | #define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 | ||
55 | #define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 | ||
56 | #define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 | ||
57 | |||
58 | #define PPSMC_DPM2FLAGS_TDPCLMP 0x01 | ||
59 | #define PPSMC_DPM2FLAGS_PWRSHFT 0x02 | ||
60 | #define PPSMC_DPM2FLAGS_OCP 0x04 | ||
61 | |||
62 | #define PPSMC_DISPLAY_WATERMARK_LOW 0 | ||
63 | #define PPSMC_DISPLAY_WATERMARK_HIGH 1 | ||
64 | |||
65 | #define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 | ||
66 | #define PPSMC_STATEFLAG_POWERBOOST 0x02 | ||
67 | #define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 | ||
68 | #define PPSMC_STATEFLAG_POWERSHIFT 0x08 | ||
69 | #define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 | ||
70 | #define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 | ||
71 | #define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 | ||
72 | |||
73 | #define FDO_MODE_HARDWARE 0 | ||
74 | #define FDO_MODE_PIECE_WISE_LINEAR 1 | ||
75 | |||
76 | enum FAN_CONTROL { | ||
77 | FAN_CONTROL_FUZZY, | ||
78 | FAN_CONTROL_TABLE | ||
79 | }; | ||
80 | |||
81 | #define PPSMC_Result_OK ((uint16_t)0x01) | ||
82 | #define PPSMC_Result_NoMore ((uint16_t)0x02) | ||
83 | #define PPSMC_Result_NotNow ((uint16_t)0x03) | ||
84 | #define PPSMC_Result_Failed ((uint16_t)0xFF) | ||
85 | #define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) | ||
86 | #define PPSMC_Result_UnknownVT ((uint16_t)0xFD) | ||
87 | |||
88 | typedef uint16_t PPSMC_Result; | ||
89 | |||
90 | #define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) | ||
91 | |||
92 | #define PPSMC_MSG_Halt ((uint16_t)0x10) | ||
93 | #define PPSMC_MSG_Resume ((uint16_t)0x11) | ||
94 | #define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) | ||
95 | #define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) | ||
96 | #define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) | ||
97 | #define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) | ||
98 | #define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) | ||
99 | #define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) | ||
100 | #define PPSMC_MSG_LevelUp ((uint16_t)0x18) | ||
101 | #define PPSMC_MSG_LevelDown ((uint16_t)0x19) | ||
102 | #define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) | ||
103 | #define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) | ||
104 | #define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) | ||
105 | #define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) | ||
106 | #define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) | ||
107 | #define PPSMC_MSG_ForceHigh ((uint16_t)0x42) | ||
108 | #define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) | ||
109 | #define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) | ||
110 | #define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) | ||
111 | #define PPSMC_MSG_EnableCac ((uint16_t)0x53) | ||
112 | #define PPSMC_MSG_DisableCac ((uint16_t)0x54) | ||
113 | #define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) | ||
114 | #define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) | ||
115 | #define PPSMC_CACHistoryStart ((uint16_t)0x57) | ||
116 | #define PPSMC_CACHistoryStop ((uint16_t)0x58) | ||
117 | #define PPSMC_TDPClampingActive ((uint16_t)0x59) | ||
118 | #define PPSMC_TDPClampingInactive ((uint16_t)0x5A) | ||
119 | #define PPSMC_StartFanControl ((uint16_t)0x5B) | ||
120 | #define PPSMC_StopFanControl ((uint16_t)0x5C) | ||
121 | #define PPSMC_NoDisplay ((uint16_t)0x5D) | ||
122 | #define PPSMC_HasDisplay ((uint16_t)0x5E) | ||
123 | #define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) | ||
124 | #define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) | ||
125 | #define PPSMC_MSG_EnableULV ((uint16_t)0x62) | ||
126 | #define PPSMC_MSG_DisableULV ((uint16_t)0x63) | ||
127 | #define PPSMC_MSG_EnterULV ((uint16_t)0x64) | ||
128 | #define PPSMC_MSG_ExitULV ((uint16_t)0x65) | ||
129 | #define PPSMC_PowerShiftActive ((uint16_t)0x6A) | ||
130 | #define PPSMC_PowerShiftInactive ((uint16_t)0x6B) | ||
131 | #define PPSMC_OCPActive ((uint16_t)0x6C) | ||
132 | #define PPSMC_OCPInactive ((uint16_t)0x6D) | ||
133 | #define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) | ||
134 | #define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) | ||
135 | #define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) | ||
136 | #define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) | ||
137 | #define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) | ||
138 | #define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) | ||
139 | #define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) | ||
140 | #define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) | ||
141 | #define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) | ||
142 | #define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) | ||
143 | #define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) | ||
144 | #define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) | ||
145 | #define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) | ||
146 | #define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) | ||
147 | #define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) | ||
148 | #define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) | ||
149 | #define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) | ||
150 | #define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) | ||
151 | #define PPSMC_FlushDataCache ((uint16_t)0x80) | ||
152 | #define PPSMC_FlushInstrCache ((uint16_t)0x81) | ||
153 | #define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) | ||
154 | #define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) | ||
155 | #define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) | ||
156 | #define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) | ||
157 | #define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) | ||
158 | #define PPSMC_MSG_EnableDTE ((uint16_t)0x87) | ||
159 | #define PPSMC_MSG_DisableDTE ((uint16_t)0x88) | ||
160 | #define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) | ||
161 | #define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A) | ||
162 | #define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B) | ||
163 | #define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C) | ||
164 | #define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) | ||
165 | #define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) | ||
166 | #define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) | ||
167 | #define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) | ||
168 | #define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) | ||
169 | #define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) | ||
170 | #define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) | ||
171 | #define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) | ||
172 | #define PPSMC_MSG_GPIO17 ((uint16_t)0x98) | ||
173 | #define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) | ||
174 | #define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) | ||
175 | #define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) | ||
176 | #define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) | ||
177 | #define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) | ||
178 | #define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) | ||
179 | |||
180 | #define PPSMC_MSG_BREAK ((uint16_t)0xF8) | ||
181 | |||
182 | #define PPSMC_MSG_Test ((uint16_t)0x100) | ||
183 | #define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250) | ||
184 | #define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251) | ||
185 | #define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252) | ||
186 | #define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253) | ||
187 | #define PPSMC_MSG_LoadUcodes ((uint16_t)0x254) | ||
188 | |||
189 | typedef uint16_t PPSMC_Msg; | ||
190 | |||
191 | #define PPSMC_EVENT_STATUS_THERMAL 0x00000001 | ||
192 | #define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 | ||
193 | #define PPSMC_EVENT_STATUS_DC 0x00000004 | ||
194 | #define PPSMC_EVENT_STATUS_GPIO17 0x00000008 | ||
195 | |||
196 | #pragma pack(pop) | ||
197 | |||
198 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h new file mode 100644 index 000000000000..099b7b56113c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_sdma_pkt_open.h | |||
@@ -0,0 +1,2240 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included | ||
12 | * in all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | ||
15 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN | ||
18 | * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
19 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __TONGA_SDMA_PKT_OPEN_H_ | ||
24 | #define __TONGA_SDMA_PKT_OPEN_H_ | ||
25 | |||
26 | #define SDMA_OP_NOP 0 | ||
27 | #define SDMA_OP_COPY 1 | ||
28 | #define SDMA_OP_WRITE 2 | ||
29 | #define SDMA_OP_INDIRECT 4 | ||
30 | #define SDMA_OP_FENCE 5 | ||
31 | #define SDMA_OP_TRAP 6 | ||
32 | #define SDMA_OP_SEM 7 | ||
33 | #define SDMA_OP_POLL_REGMEM 8 | ||
34 | #define SDMA_OP_COND_EXE 9 | ||
35 | #define SDMA_OP_ATOMIC 10 | ||
36 | #define SDMA_OP_CONST_FILL 11 | ||
37 | #define SDMA_OP_GEN_PTEPDE 12 | ||
38 | #define SDMA_OP_TIMESTAMP 13 | ||
39 | #define SDMA_OP_SRBM_WRITE 14 | ||
40 | #define SDMA_OP_PRE_EXE 15 | ||
41 | #define SDMA_SUBOP_TIMESTAMP_SET 0 | ||
42 | #define SDMA_SUBOP_TIMESTAMP_GET 1 | ||
43 | #define SDMA_SUBOP_TIMESTAMP_GET_GLOBAL 2 | ||
44 | #define SDMA_SUBOP_COPY_LINEAR 0 | ||
45 | #define SDMA_SUBOP_COPY_LINEAR_SUB_WIND 4 | ||
46 | #define SDMA_SUBOP_COPY_TILED 1 | ||
47 | #define SDMA_SUBOP_COPY_TILED_SUB_WIND 5 | ||
48 | #define SDMA_SUBOP_COPY_T2T_SUB_WIND 6 | ||
49 | #define SDMA_SUBOP_COPY_SOA 3 | ||
50 | #define SDMA_SUBOP_WRITE_LINEAR 0 | ||
51 | #define SDMA_SUBOP_WRITE_TILED 1 | ||
52 | |||
53 | /*define for op field*/ | ||
54 | #define SDMA_PKT_HEADER_op_offset 0 | ||
55 | #define SDMA_PKT_HEADER_op_mask 0x000000FF | ||
56 | #define SDMA_PKT_HEADER_op_shift 0 | ||
57 | #define SDMA_PKT_HEADER_OP(x) (((x) & SDMA_PKT_HEADER_op_mask) << SDMA_PKT_HEADER_op_shift) | ||
58 | |||
59 | /*define for sub_op field*/ | ||
60 | #define SDMA_PKT_HEADER_sub_op_offset 0 | ||
61 | #define SDMA_PKT_HEADER_sub_op_mask 0x000000FF | ||
62 | #define SDMA_PKT_HEADER_sub_op_shift 8 | ||
63 | #define SDMA_PKT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_HEADER_sub_op_mask) << SDMA_PKT_HEADER_sub_op_shift) | ||
64 | |||
65 | /* | ||
66 | ** Definitions for SDMA_PKT_COPY_LINEAR packet | ||
67 | */ | ||
68 | |||
69 | /*define for HEADER word*/ | ||
70 | /*define for op field*/ | ||
71 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_offset 0 | ||
72 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_mask 0x000000FF | ||
73 | #define SDMA_PKT_COPY_LINEAR_HEADER_op_shift 0 | ||
74 | #define SDMA_PKT_COPY_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_op_shift) | ||
75 | |||
76 | /*define for sub_op field*/ | ||
77 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_offset 0 | ||
78 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask 0x000000FF | ||
79 | #define SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift 8 | ||
80 | #define SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_HEADER_sub_op_shift) | ||
81 | |||
82 | /*define for broadcast field*/ | ||
83 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_offset 0 | ||
84 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask 0x00000001 | ||
85 | #define SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift 27 | ||
86 | #define SDMA_PKT_COPY_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_LINEAR_HEADER_broadcast_shift) | ||
87 | |||
88 | /*define for COUNT word*/ | ||
89 | /*define for count field*/ | ||
90 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_offset 1 | ||
91 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_mask 0x003FFFFF | ||
92 | #define SDMA_PKT_COPY_LINEAR_COUNT_count_shift 0 | ||
93 | #define SDMA_PKT_COPY_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_LINEAR_COUNT_count_shift) | ||
94 | |||
95 | /*define for PARAMETER word*/ | ||
96 | /*define for dst_sw field*/ | ||
97 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_offset 2 | ||
98 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask 0x00000003 | ||
99 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift 16 | ||
100 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_sw_shift) | ||
101 | |||
102 | /*define for dst_ha field*/ | ||
103 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_offset 2 | ||
104 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask 0x00000001 | ||
105 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift 22 | ||
106 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_dst_ha_shift) | ||
107 | |||
108 | /*define for src_sw field*/ | ||
109 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_offset 2 | ||
110 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask 0x00000003 | ||
111 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift 24 | ||
112 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_sw_shift) | ||
113 | |||
114 | /*define for src_ha field*/ | ||
115 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_offset 2 | ||
116 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask 0x00000001 | ||
117 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift 30 | ||
118 | #define SDMA_PKT_COPY_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_LINEAR_PARAMETER_src_ha_shift) | ||
119 | |||
120 | /*define for SRC_ADDR_LO word*/ | ||
121 | /*define for src_addr_31_0 field*/ | ||
122 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 | ||
123 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
124 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
125 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) | ||
126 | |||
127 | /*define for SRC_ADDR_HI word*/ | ||
128 | /*define for src_addr_63_32 field*/ | ||
129 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 | ||
130 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
131 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
132 | #define SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) | ||
133 | |||
134 | /*define for DST_ADDR_LO word*/ | ||
135 | /*define for dst_addr_31_0 field*/ | ||
136 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_offset 5 | ||
137 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
138 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
139 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_LO_dst_addr_31_0_shift) | ||
140 | |||
141 | /*define for DST_ADDR_HI word*/ | ||
142 | /*define for dst_addr_63_32 field*/ | ||
143 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_offset 6 | ||
144 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
145 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
146 | #define SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_DST_ADDR_HI_dst_addr_63_32_shift) | ||
147 | |||
148 | |||
149 | /* | ||
150 | ** Definitions for SDMA_PKT_COPY_BROADCAST_LINEAR packet | ||
151 | */ | ||
152 | |||
153 | /*define for HEADER word*/ | ||
154 | /*define for op field*/ | ||
155 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_offset 0 | ||
156 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask 0x000000FF | ||
157 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift 0 | ||
158 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_op_shift) | ||
159 | |||
160 | /*define for sub_op field*/ | ||
161 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_offset 0 | ||
162 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask 0x000000FF | ||
163 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift 8 | ||
164 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_sub_op_shift) | ||
165 | |||
166 | /*define for broadcast field*/ | ||
167 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_offset 0 | ||
168 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask 0x00000001 | ||
169 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift 27 | ||
170 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_HEADER_broadcast_shift) | ||
171 | |||
172 | /*define for COUNT word*/ | ||
173 | /*define for count field*/ | ||
174 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_offset 1 | ||
175 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask 0x003FFFFF | ||
176 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift 0 | ||
177 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_COUNT_count_shift) | ||
178 | |||
179 | /*define for PARAMETER word*/ | ||
180 | /*define for dst2_sw field*/ | ||
181 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_offset 2 | ||
182 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask 0x00000003 | ||
183 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift 8 | ||
184 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_sw_shift) | ||
185 | |||
186 | /*define for dst2_ha field*/ | ||
187 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_offset 2 | ||
188 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask 0x00000001 | ||
189 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift 14 | ||
190 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST2_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst2_ha_shift) | ||
191 | |||
192 | /*define for dst1_sw field*/ | ||
193 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_offset 2 | ||
194 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask 0x00000003 | ||
195 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift 16 | ||
196 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_sw_shift) | ||
197 | |||
198 | /*define for dst1_ha field*/ | ||
199 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_offset 2 | ||
200 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask 0x00000001 | ||
201 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift 22 | ||
202 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_DST1_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_dst1_ha_shift) | ||
203 | |||
204 | /*define for src_sw field*/ | ||
205 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_offset 2 | ||
206 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask 0x00000003 | ||
207 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift 24 | ||
208 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_SW(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_sw_shift) | ||
209 | |||
210 | /*define for src_ha field*/ | ||
211 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_offset 2 | ||
212 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask 0x00000001 | ||
213 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift 30 | ||
214 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_SRC_HA(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_PARAMETER_src_ha_shift) | ||
215 | |||
216 | /*define for SRC_ADDR_LO word*/ | ||
217 | /*define for src_addr_31_0 field*/ | ||
218 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_offset 3 | ||
219 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
220 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
221 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_LO_src_addr_31_0_shift) | ||
222 | |||
223 | /*define for SRC_ADDR_HI word*/ | ||
224 | /*define for src_addr_63_32 field*/ | ||
225 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_offset 4 | ||
226 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
227 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
228 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_SRC_ADDR_HI_src_addr_63_32_shift) | ||
229 | |||
230 | /*define for DST1_ADDR_LO word*/ | ||
231 | /*define for dst1_addr_31_0 field*/ | ||
232 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_offset 5 | ||
233 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask 0xFFFFFFFF | ||
234 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift 0 | ||
235 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_DST1_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_LO_dst1_addr_31_0_shift) | ||
236 | |||
237 | /*define for DST1_ADDR_HI word*/ | ||
238 | /*define for dst1_addr_63_32 field*/ | ||
239 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_offset 6 | ||
240 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask 0xFFFFFFFF | ||
241 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift 0 | ||
242 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_DST1_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST1_ADDR_HI_dst1_addr_63_32_shift) | ||
243 | |||
244 | /*define for DST2_ADDR_LO word*/ | ||
245 | /*define for dst2_addr_31_0 field*/ | ||
246 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_offset 7 | ||
247 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask 0xFFFFFFFF | ||
248 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift 0 | ||
249 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_DST2_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_LO_dst2_addr_31_0_shift) | ||
250 | |||
251 | /*define for DST2_ADDR_HI word*/ | ||
252 | /*define for dst2_addr_63_32 field*/ | ||
253 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_offset 8 | ||
254 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask 0xFFFFFFFF | ||
255 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift 0 | ||
256 | #define SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_DST2_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_mask) << SDMA_PKT_COPY_BROADCAST_LINEAR_DST2_ADDR_HI_dst2_addr_63_32_shift) | ||
257 | |||
258 | |||
259 | /* | ||
260 | ** Definitions for SDMA_PKT_COPY_LINEAR_SUBWIN packet | ||
261 | */ | ||
262 | |||
263 | /*define for HEADER word*/ | ||
264 | /*define for op field*/ | ||
265 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_offset 0 | ||
266 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask 0x000000FF | ||
267 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift 0 | ||
268 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_op_shift) | ||
269 | |||
270 | /*define for sub_op field*/ | ||
271 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_offset 0 | ||
272 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask 0x000000FF | ||
273 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift 8 | ||
274 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_sub_op_shift) | ||
275 | |||
276 | /*define for elementsize field*/ | ||
277 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_offset 0 | ||
278 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask 0x00000007 | ||
279 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift 29 | ||
280 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_ELEMENTSIZE(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_HEADER_elementsize_shift) | ||
281 | |||
282 | /*define for SRC_ADDR_LO word*/ | ||
283 | /*define for src_addr_31_0 field*/ | ||
284 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_offset 1 | ||
285 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
286 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
287 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_LO_src_addr_31_0_shift) | ||
288 | |||
289 | /*define for SRC_ADDR_HI word*/ | ||
290 | /*define for src_addr_63_32 field*/ | ||
291 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_offset 2 | ||
292 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
293 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
294 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_SRC_ADDR_HI_src_addr_63_32_shift) | ||
295 | |||
296 | /*define for DW_3 word*/ | ||
297 | /*define for src_x field*/ | ||
298 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_offset 3 | ||
299 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask 0x00003FFF | ||
300 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift 0 | ||
301 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_x_shift) | ||
302 | |||
303 | /*define for src_y field*/ | ||
304 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_offset 3 | ||
305 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask 0x00003FFF | ||
306 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift 16 | ||
307 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_3_src_y_shift) | ||
308 | |||
309 | /*define for DW_4 word*/ | ||
310 | /*define for src_z field*/ | ||
311 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_offset 4 | ||
312 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask 0x000007FF | ||
313 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift 0 | ||
314 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_z_shift) | ||
315 | |||
316 | /*define for src_pitch field*/ | ||
317 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_offset 4 | ||
318 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask 0x00003FFF | ||
319 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift 16 | ||
320 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_SRC_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_4_src_pitch_shift) | ||
321 | |||
322 | /*define for DW_5 word*/ | ||
323 | /*define for src_slice_pitch field*/ | ||
324 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_offset 5 | ||
325 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask 0x0FFFFFFF | ||
326 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift 0 | ||
327 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_5_src_slice_pitch_shift) | ||
328 | |||
329 | /*define for DST_ADDR_LO word*/ | ||
330 | /*define for dst_addr_31_0 field*/ | ||
331 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_offset 6 | ||
332 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
333 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
334 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_LO_dst_addr_31_0_shift) | ||
335 | |||
336 | /*define for DST_ADDR_HI word*/ | ||
337 | /*define for dst_addr_63_32 field*/ | ||
338 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_offset 7 | ||
339 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
340 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
341 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DST_ADDR_HI_dst_addr_63_32_shift) | ||
342 | |||
343 | /*define for DW_8 word*/ | ||
344 | /*define for dst_x field*/ | ||
345 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_offset 8 | ||
346 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask 0x00003FFF | ||
347 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift 0 | ||
348 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_x_shift) | ||
349 | |||
350 | /*define for dst_y field*/ | ||
351 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_offset 8 | ||
352 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask 0x00003FFF | ||
353 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift 16 | ||
354 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_DST_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_8_dst_y_shift) | ||
355 | |||
356 | /*define for DW_9 word*/ | ||
357 | /*define for dst_z field*/ | ||
358 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_offset 9 | ||
359 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask 0x000007FF | ||
360 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift 0 | ||
361 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_z_shift) | ||
362 | |||
363 | /*define for dst_pitch field*/ | ||
364 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_offset 9 | ||
365 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask 0x00003FFF | ||
366 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift 16 | ||
367 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_DST_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_9_dst_pitch_shift) | ||
368 | |||
369 | /*define for DW_10 word*/ | ||
370 | /*define for dst_slice_pitch field*/ | ||
371 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_offset 10 | ||
372 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask 0x0FFFFFFF | ||
373 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift 0 | ||
374 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_10_dst_slice_pitch_shift) | ||
375 | |||
376 | /*define for DW_11 word*/ | ||
377 | /*define for rect_x field*/ | ||
378 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_offset 11 | ||
379 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask 0x00003FFF | ||
380 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift 0 | ||
381 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_X(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_x_shift) | ||
382 | |||
383 | /*define for rect_y field*/ | ||
384 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_offset 11 | ||
385 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask 0x00003FFF | ||
386 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift 16 | ||
387 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_RECT_Y(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_11_rect_y_shift) | ||
388 | |||
389 | /*define for DW_12 word*/ | ||
390 | /*define for rect_z field*/ | ||
391 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_offset 12 | ||
392 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask 0x000007FF | ||
393 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift 0 | ||
394 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_RECT_Z(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_rect_z_shift) | ||
395 | |||
396 | /*define for dst_sw field*/ | ||
397 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_offset 12 | ||
398 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask 0x00000003 | ||
399 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift 16 | ||
400 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_sw_shift) | ||
401 | |||
402 | /*define for dst_ha field*/ | ||
403 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_offset 12 | ||
404 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask 0x00000001 | ||
405 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift 22 | ||
406 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_DST_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_dst_ha_shift) | ||
407 | |||
408 | /*define for src_sw field*/ | ||
409 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_offset 12 | ||
410 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask 0x00000003 | ||
411 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift 24 | ||
412 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_SW(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_sw_shift) | ||
413 | |||
414 | /*define for src_ha field*/ | ||
415 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_offset 12 | ||
416 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask 0x00000001 | ||
417 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift 30 | ||
418 | #define SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_SRC_HA(x) (((x) & SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_mask) << SDMA_PKT_COPY_LINEAR_SUBWIN_DW_12_src_ha_shift) | ||
419 | |||
420 | |||
421 | /* | ||
422 | ** Definitions for SDMA_PKT_COPY_TILED packet | ||
423 | */ | ||
424 | |||
425 | /*define for HEADER word*/ | ||
426 | /*define for op field*/ | ||
427 | #define SDMA_PKT_COPY_TILED_HEADER_op_offset 0 | ||
428 | #define SDMA_PKT_COPY_TILED_HEADER_op_mask 0x000000FF | ||
429 | #define SDMA_PKT_COPY_TILED_HEADER_op_shift 0 | ||
430 | #define SDMA_PKT_COPY_TILED_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_op_mask) << SDMA_PKT_COPY_TILED_HEADER_op_shift) | ||
431 | |||
432 | /*define for sub_op field*/ | ||
433 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_offset 0 | ||
434 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_mask 0x000000FF | ||
435 | #define SDMA_PKT_COPY_TILED_HEADER_sub_op_shift 8 | ||
436 | #define SDMA_PKT_COPY_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_HEADER_sub_op_shift) | ||
437 | |||
438 | /*define for detile field*/ | ||
439 | #define SDMA_PKT_COPY_TILED_HEADER_detile_offset 0 | ||
440 | #define SDMA_PKT_COPY_TILED_HEADER_detile_mask 0x00000001 | ||
441 | #define SDMA_PKT_COPY_TILED_HEADER_detile_shift 31 | ||
442 | #define SDMA_PKT_COPY_TILED_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_HEADER_detile_shift) | ||
443 | |||
444 | /*define for TILED_ADDR_LO word*/ | ||
445 | /*define for tiled_addr_31_0 field*/ | ||
446 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_offset 1 | ||
447 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF | ||
448 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift 0 | ||
449 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_LO_tiled_addr_31_0_shift) | ||
450 | |||
451 | /*define for TILED_ADDR_HI word*/ | ||
452 | /*define for tiled_addr_63_32 field*/ | ||
453 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_offset 2 | ||
454 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF | ||
455 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift 0 | ||
456 | #define SDMA_PKT_COPY_TILED_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_TILED_ADDR_HI_tiled_addr_63_32_shift) | ||
457 | |||
458 | /*define for DW_3 word*/ | ||
459 | /*define for pitch_in_tile field*/ | ||
460 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_offset 3 | ||
461 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask 0x000007FF | ||
462 | #define SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift 0 | ||
463 | #define SDMA_PKT_COPY_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_DW_3_pitch_in_tile_shift) | ||
464 | |||
465 | /*define for height field*/ | ||
466 | #define SDMA_PKT_COPY_TILED_DW_3_height_offset 3 | ||
467 | #define SDMA_PKT_COPY_TILED_DW_3_height_mask 0x00003FFF | ||
468 | #define SDMA_PKT_COPY_TILED_DW_3_height_shift 16 | ||
469 | #define SDMA_PKT_COPY_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_COPY_TILED_DW_3_height_mask) << SDMA_PKT_COPY_TILED_DW_3_height_shift) | ||
470 | |||
471 | /*define for DW_4 word*/ | ||
472 | /*define for slice_pitch field*/ | ||
473 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_offset 4 | ||
474 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask 0x003FFFFF | ||
475 | #define SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift 0 | ||
476 | #define SDMA_PKT_COPY_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_COPY_TILED_DW_4_slice_pitch_shift) | ||
477 | |||
478 | /*define for DW_5 word*/ | ||
479 | /*define for element_size field*/ | ||
480 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_offset 5 | ||
481 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_mask 0x00000007 | ||
482 | #define SDMA_PKT_COPY_TILED_DW_5_element_size_shift 0 | ||
483 | #define SDMA_PKT_COPY_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_element_size_mask) << SDMA_PKT_COPY_TILED_DW_5_element_size_shift) | ||
484 | |||
485 | /*define for array_mode field*/ | ||
486 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_offset 5 | ||
487 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_mask 0x0000000F | ||
488 | #define SDMA_PKT_COPY_TILED_DW_5_array_mode_shift 3 | ||
489 | #define SDMA_PKT_COPY_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_array_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_array_mode_shift) | ||
490 | |||
491 | /*define for mit_mode field*/ | ||
492 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_offset 5 | ||
493 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask 0x00000007 | ||
494 | #define SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift 8 | ||
495 | #define SDMA_PKT_COPY_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mit_mode_mask) << SDMA_PKT_COPY_TILED_DW_5_mit_mode_shift) | ||
496 | |||
497 | /*define for tilesplit_size field*/ | ||
498 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_offset 5 | ||
499 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask 0x00000007 | ||
500 | #define SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift 11 | ||
501 | #define SDMA_PKT_COPY_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_DW_5_tilesplit_size_shift) | ||
502 | |||
503 | /*define for bank_w field*/ | ||
504 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_offset 5 | ||
505 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_mask 0x00000003 | ||
506 | #define SDMA_PKT_COPY_TILED_DW_5_bank_w_shift 15 | ||
507 | #define SDMA_PKT_COPY_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_w_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_w_shift) | ||
508 | |||
509 | /*define for bank_h field*/ | ||
510 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_offset 5 | ||
511 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_mask 0x00000003 | ||
512 | #define SDMA_PKT_COPY_TILED_DW_5_bank_h_shift 18 | ||
513 | #define SDMA_PKT_COPY_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_bank_h_mask) << SDMA_PKT_COPY_TILED_DW_5_bank_h_shift) | ||
514 | |||
515 | /*define for num_bank field*/ | ||
516 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_offset 5 | ||
517 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_mask 0x00000003 | ||
518 | #define SDMA_PKT_COPY_TILED_DW_5_num_bank_shift 21 | ||
519 | #define SDMA_PKT_COPY_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_num_bank_mask) << SDMA_PKT_COPY_TILED_DW_5_num_bank_shift) | ||
520 | |||
521 | /*define for mat_aspt field*/ | ||
522 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_offset 5 | ||
523 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask 0x00000003 | ||
524 | #define SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift 24 | ||
525 | #define SDMA_PKT_COPY_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_COPY_TILED_DW_5_mat_aspt_shift) | ||
526 | |||
527 | /*define for pipe_config field*/ | ||
528 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_offset 5 | ||
529 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask 0x0000001F | ||
530 | #define SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift 26 | ||
531 | #define SDMA_PKT_COPY_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_DW_5_pipe_config_mask) << SDMA_PKT_COPY_TILED_DW_5_pipe_config_shift) | ||
532 | |||
533 | /*define for DW_6 word*/ | ||
534 | /*define for x field*/ | ||
535 | #define SDMA_PKT_COPY_TILED_DW_6_x_offset 6 | ||
536 | #define SDMA_PKT_COPY_TILED_DW_6_x_mask 0x00003FFF | ||
537 | #define SDMA_PKT_COPY_TILED_DW_6_x_shift 0 | ||
538 | #define SDMA_PKT_COPY_TILED_DW_6_X(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_x_mask) << SDMA_PKT_COPY_TILED_DW_6_x_shift) | ||
539 | |||
540 | /*define for y field*/ | ||
541 | #define SDMA_PKT_COPY_TILED_DW_6_y_offset 6 | ||
542 | #define SDMA_PKT_COPY_TILED_DW_6_y_mask 0x00003FFF | ||
543 | #define SDMA_PKT_COPY_TILED_DW_6_y_shift 16 | ||
544 | #define SDMA_PKT_COPY_TILED_DW_6_Y(x) (((x) & SDMA_PKT_COPY_TILED_DW_6_y_mask) << SDMA_PKT_COPY_TILED_DW_6_y_shift) | ||
545 | |||
546 | /*define for DW_7 word*/ | ||
547 | /*define for z field*/ | ||
548 | #define SDMA_PKT_COPY_TILED_DW_7_z_offset 7 | ||
549 | #define SDMA_PKT_COPY_TILED_DW_7_z_mask 0x00000FFF | ||
550 | #define SDMA_PKT_COPY_TILED_DW_7_z_shift 0 | ||
551 | #define SDMA_PKT_COPY_TILED_DW_7_Z(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_z_mask) << SDMA_PKT_COPY_TILED_DW_7_z_shift) | ||
552 | |||
553 | /*define for linear_sw field*/ | ||
554 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_offset 7 | ||
555 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask 0x00000003 | ||
556 | #define SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift 16 | ||
557 | #define SDMA_PKT_COPY_TILED_DW_7_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_linear_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_linear_sw_shift) | ||
558 | |||
559 | /*define for tile_sw field*/ | ||
560 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_offset 7 | ||
561 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask 0x00000003 | ||
562 | #define SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift 24 | ||
563 | #define SDMA_PKT_COPY_TILED_DW_7_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_DW_7_tile_sw_mask) << SDMA_PKT_COPY_TILED_DW_7_tile_sw_shift) | ||
564 | |||
565 | /*define for LINEAR_ADDR_LO word*/ | ||
566 | /*define for linear_addr_31_0 field*/ | ||
567 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_offset 8 | ||
568 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
569 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
570 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
571 | |||
572 | /*define for LINEAR_ADDR_HI word*/ | ||
573 | /*define for linear_addr_63_32 field*/ | ||
574 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_offset 9 | ||
575 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
576 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
577 | #define SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
578 | |||
579 | /*define for LINEAR_PITCH word*/ | ||
580 | /*define for linear_pitch field*/ | ||
581 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_offset 10 | ||
582 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF | ||
583 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift 0 | ||
584 | #define SDMA_PKT_COPY_TILED_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_TILED_LINEAR_PITCH_linear_pitch_shift) | ||
585 | |||
586 | /*define for COUNT word*/ | ||
587 | /*define for count field*/ | ||
588 | #define SDMA_PKT_COPY_TILED_COUNT_count_offset 11 | ||
589 | #define SDMA_PKT_COPY_TILED_COUNT_count_mask 0x000FFFFF | ||
590 | #define SDMA_PKT_COPY_TILED_COUNT_count_shift 0 | ||
591 | #define SDMA_PKT_COPY_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_TILED_COUNT_count_mask) << SDMA_PKT_COPY_TILED_COUNT_count_shift) | ||
592 | |||
593 | |||
594 | /* | ||
595 | ** Definitions for SDMA_PKT_COPY_L2T_BROADCAST packet | ||
596 | */ | ||
597 | |||
598 | /*define for HEADER word*/ | ||
599 | /*define for op field*/ | ||
600 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_offset 0 | ||
601 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask 0x000000FF | ||
602 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift 0 | ||
603 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_op_shift) | ||
604 | |||
605 | /*define for sub_op field*/ | ||
606 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_offset 0 | ||
607 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask 0x000000FF | ||
608 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift 8 | ||
609 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_sub_op_shift) | ||
610 | |||
611 | /*define for videocopy field*/ | ||
612 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_offset 0 | ||
613 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask 0x00000001 | ||
614 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift 26 | ||
615 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_VIDEOCOPY(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_videocopy_shift) | ||
616 | |||
617 | /*define for broadcast field*/ | ||
618 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_offset 0 | ||
619 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask 0x00000001 | ||
620 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift 27 | ||
621 | #define SDMA_PKT_COPY_L2T_BROADCAST_HEADER_BROADCAST(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_mask) << SDMA_PKT_COPY_L2T_BROADCAST_HEADER_broadcast_shift) | ||
622 | |||
623 | /*define for TILED_ADDR_LO_0 word*/ | ||
624 | /*define for tiled_addr0_31_0 field*/ | ||
625 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_offset 1 | ||
626 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask 0xFFFFFFFF | ||
627 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift 0 | ||
628 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_TILED_ADDR0_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_0_tiled_addr0_31_0_shift) | ||
629 | |||
630 | /*define for TILED_ADDR_HI_0 word*/ | ||
631 | /*define for tiled_addr0_63_32 field*/ | ||
632 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_offset 2 | ||
633 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask 0xFFFFFFFF | ||
634 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift 0 | ||
635 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_TILED_ADDR0_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_0_tiled_addr0_63_32_shift) | ||
636 | |||
637 | /*define for TILED_ADDR_LO_1 word*/ | ||
638 | /*define for tiled_addr1_31_0 field*/ | ||
639 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_offset 3 | ||
640 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask 0xFFFFFFFF | ||
641 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift 0 | ||
642 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_TILED_ADDR1_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_LO_1_tiled_addr1_31_0_shift) | ||
643 | |||
644 | /*define for TILED_ADDR_HI_1 word*/ | ||
645 | /*define for tiled_addr1_63_32 field*/ | ||
646 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_offset 4 | ||
647 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask 0xFFFFFFFF | ||
648 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift 0 | ||
649 | #define SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_TILED_ADDR1_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_TILED_ADDR_HI_1_tiled_addr1_63_32_shift) | ||
650 | |||
651 | /*define for DW_5 word*/ | ||
652 | /*define for pitch_in_tile field*/ | ||
653 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_offset 5 | ||
654 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask 0x000007FF | ||
655 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift 0 | ||
656 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_pitch_in_tile_shift) | ||
657 | |||
658 | /*define for height field*/ | ||
659 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_offset 5 | ||
660 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask 0x00003FFF | ||
661 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift 16 | ||
662 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_5_HEIGHT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_5_height_shift) | ||
663 | |||
664 | /*define for DW_6 word*/ | ||
665 | /*define for slice_pitch field*/ | ||
666 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_offset 6 | ||
667 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask 0x003FFFFF | ||
668 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift 0 | ||
669 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_6_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_6_slice_pitch_shift) | ||
670 | |||
671 | /*define for DW_7 word*/ | ||
672 | /*define for element_size field*/ | ||
673 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_offset 7 | ||
674 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask 0x00000007 | ||
675 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift 0 | ||
676 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_element_size_shift) | ||
677 | |||
678 | /*define for array_mode field*/ | ||
679 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_offset 7 | ||
680 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask 0x0000000F | ||
681 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift 3 | ||
682 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_array_mode_shift) | ||
683 | |||
684 | /*define for mit_mode field*/ | ||
685 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_offset 7 | ||
686 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask 0x00000007 | ||
687 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift 8 | ||
688 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MIT_MODE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mit_mode_shift) | ||
689 | |||
690 | /*define for tilesplit_size field*/ | ||
691 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_offset 7 | ||
692 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask 0x00000007 | ||
693 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift 11 | ||
694 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_tilesplit_size_shift) | ||
695 | |||
696 | /*define for bank_w field*/ | ||
697 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_offset 7 | ||
698 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask 0x00000003 | ||
699 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift 15 | ||
700 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_W(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_w_shift) | ||
701 | |||
702 | /*define for bank_h field*/ | ||
703 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_offset 7 | ||
704 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask 0x00000003 | ||
705 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift 18 | ||
706 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_BANK_H(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_bank_h_shift) | ||
707 | |||
708 | /*define for num_bank field*/ | ||
709 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_offset 7 | ||
710 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask 0x00000003 | ||
711 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift 21 | ||
712 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_NUM_BANK(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_num_bank_shift) | ||
713 | |||
714 | /*define for mat_aspt field*/ | ||
715 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_offset 7 | ||
716 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask 0x00000003 | ||
717 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift 24 | ||
718 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_mat_aspt_shift) | ||
719 | |||
720 | /*define for pipe_config field*/ | ||
721 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_offset 7 | ||
722 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask 0x0000001F | ||
723 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift 26 | ||
724 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_7_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_7_pipe_config_shift) | ||
725 | |||
726 | /*define for DW_8 word*/ | ||
727 | /*define for x field*/ | ||
728 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_offset 8 | ||
729 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask 0x00003FFF | ||
730 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift 0 | ||
731 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_X(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_x_shift) | ||
732 | |||
733 | /*define for y field*/ | ||
734 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_offset 8 | ||
735 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask 0x00003FFF | ||
736 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift 16 | ||
737 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_8_Y(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_8_y_shift) | ||
738 | |||
739 | /*define for DW_9 word*/ | ||
740 | /*define for z field*/ | ||
741 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_offset 9 | ||
742 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask 0x00000FFF | ||
743 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift 0 | ||
744 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_9_Z(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_9_z_shift) | ||
745 | |||
746 | /*define for DW_10 word*/ | ||
747 | /*define for dst2_sw field*/ | ||
748 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_offset 10 | ||
749 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask 0x00000003 | ||
750 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift 8 | ||
751 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_sw_shift) | ||
752 | |||
753 | /*define for dst2_ha field*/ | ||
754 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_offset 10 | ||
755 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask 0x00000001 | ||
756 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift 14 | ||
757 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_DST2_HA(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_dst2_ha_shift) | ||
758 | |||
759 | /*define for linear_sw field*/ | ||
760 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_offset 10 | ||
761 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask 0x00000003 | ||
762 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift 16 | ||
763 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_linear_sw_shift) | ||
764 | |||
765 | /*define for tile_sw field*/ | ||
766 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_offset 10 | ||
767 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask 0x00000003 | ||
768 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift 24 | ||
769 | #define SDMA_PKT_COPY_L2T_BROADCAST_DW_10_TILE_SW(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_mask) << SDMA_PKT_COPY_L2T_BROADCAST_DW_10_tile_sw_shift) | ||
770 | |||
771 | /*define for LINEAR_ADDR_LO word*/ | ||
772 | /*define for linear_addr_31_0 field*/ | ||
773 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_offset 11 | ||
774 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
775 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
776 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
777 | |||
778 | /*define for LINEAR_ADDR_HI word*/ | ||
779 | /*define for linear_addr_63_32 field*/ | ||
780 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_offset 12 | ||
781 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
782 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
783 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
784 | |||
785 | /*define for LINEAR_PITCH word*/ | ||
786 | /*define for linear_pitch field*/ | ||
787 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_offset 13 | ||
788 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask 0x0007FFFF | ||
789 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift 0 | ||
790 | #define SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_mask) << SDMA_PKT_COPY_L2T_BROADCAST_LINEAR_PITCH_linear_pitch_shift) | ||
791 | |||
792 | /*define for COUNT word*/ | ||
793 | /*define for count field*/ | ||
794 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_offset 14 | ||
795 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask 0x000FFFFF | ||
796 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift 0 | ||
797 | #define SDMA_PKT_COPY_L2T_BROADCAST_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_mask) << SDMA_PKT_COPY_L2T_BROADCAST_COUNT_count_shift) | ||
798 | |||
799 | |||
800 | /* | ||
801 | ** Definitions for SDMA_PKT_COPY_T2T packet | ||
802 | */ | ||
803 | |||
804 | /*define for HEADER word*/ | ||
805 | /*define for op field*/ | ||
806 | #define SDMA_PKT_COPY_T2T_HEADER_op_offset 0 | ||
807 | #define SDMA_PKT_COPY_T2T_HEADER_op_mask 0x000000FF | ||
808 | #define SDMA_PKT_COPY_T2T_HEADER_op_shift 0 | ||
809 | #define SDMA_PKT_COPY_T2T_HEADER_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_op_mask) << SDMA_PKT_COPY_T2T_HEADER_op_shift) | ||
810 | |||
811 | /*define for sub_op field*/ | ||
812 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_offset 0 | ||
813 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_mask 0x000000FF | ||
814 | #define SDMA_PKT_COPY_T2T_HEADER_sub_op_shift 8 | ||
815 | #define SDMA_PKT_COPY_T2T_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_T2T_HEADER_sub_op_mask) << SDMA_PKT_COPY_T2T_HEADER_sub_op_shift) | ||
816 | |||
817 | /*define for SRC_ADDR_LO word*/ | ||
818 | /*define for src_addr_31_0 field*/ | ||
819 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_offset 1 | ||
820 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask 0xFFFFFFFF | ||
821 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift 0 | ||
822 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_LO_SRC_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_LO_src_addr_31_0_shift) | ||
823 | |||
824 | /*define for SRC_ADDR_HI word*/ | ||
825 | /*define for src_addr_63_32 field*/ | ||
826 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_offset 2 | ||
827 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask 0xFFFFFFFF | ||
828 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift 0 | ||
829 | #define SDMA_PKT_COPY_T2T_SRC_ADDR_HI_SRC_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_mask) << SDMA_PKT_COPY_T2T_SRC_ADDR_HI_src_addr_63_32_shift) | ||
830 | |||
831 | /*define for DW_3 word*/ | ||
832 | /*define for src_x field*/ | ||
833 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_offset 3 | ||
834 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_mask 0x00003FFF | ||
835 | #define SDMA_PKT_COPY_T2T_DW_3_src_x_shift 0 | ||
836 | #define SDMA_PKT_COPY_T2T_DW_3_SRC_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_x_mask) << SDMA_PKT_COPY_T2T_DW_3_src_x_shift) | ||
837 | |||
838 | /*define for src_y field*/ | ||
839 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_offset 3 | ||
840 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_mask 0x00003FFF | ||
841 | #define SDMA_PKT_COPY_T2T_DW_3_src_y_shift 16 | ||
842 | #define SDMA_PKT_COPY_T2T_DW_3_SRC_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_3_src_y_mask) << SDMA_PKT_COPY_T2T_DW_3_src_y_shift) | ||
843 | |||
844 | /*define for DW_4 word*/ | ||
845 | /*define for src_z field*/ | ||
846 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_offset 4 | ||
847 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_mask 0x000007FF | ||
848 | #define SDMA_PKT_COPY_T2T_DW_4_src_z_shift 0 | ||
849 | #define SDMA_PKT_COPY_T2T_DW_4_SRC_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_z_mask) << SDMA_PKT_COPY_T2T_DW_4_src_z_shift) | ||
850 | |||
851 | /*define for src_pitch_in_tile field*/ | ||
852 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_offset 4 | ||
853 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask 0x00000FFF | ||
854 | #define SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift 16 | ||
855 | #define SDMA_PKT_COPY_T2T_DW_4_SRC_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_4_src_pitch_in_tile_shift) | ||
856 | |||
857 | /*define for DW_5 word*/ | ||
858 | /*define for src_slice_pitch field*/ | ||
859 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_offset 5 | ||
860 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask 0x003FFFFF | ||
861 | #define SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift 0 | ||
862 | #define SDMA_PKT_COPY_T2T_DW_5_SRC_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_5_src_slice_pitch_shift) | ||
863 | |||
864 | /*define for DW_6 word*/ | ||
865 | /*define for src_element_size field*/ | ||
866 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_offset 6 | ||
867 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask 0x00000007 | ||
868 | #define SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift 0 | ||
869 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_element_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_element_size_shift) | ||
870 | |||
871 | /*define for src_array_mode field*/ | ||
872 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_offset 6 | ||
873 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask 0x0000000F | ||
874 | #define SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift 3 | ||
875 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_array_mode_shift) | ||
876 | |||
877 | /*define for src_mit_mode field*/ | ||
878 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_offset 6 | ||
879 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask 0x00000007 | ||
880 | #define SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift 8 | ||
881 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mit_mode_shift) | ||
882 | |||
883 | /*define for src_tilesplit_size field*/ | ||
884 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_offset 6 | ||
885 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask 0x00000007 | ||
886 | #define SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift 11 | ||
887 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_6_src_tilesplit_size_shift) | ||
888 | |||
889 | /*define for src_bank_w field*/ | ||
890 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_offset 6 | ||
891 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask 0x00000003 | ||
892 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift 15 | ||
893 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_w_shift) | ||
894 | |||
895 | /*define for src_bank_h field*/ | ||
896 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_offset 6 | ||
897 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask 0x00000003 | ||
898 | #define SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift 18 | ||
899 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_6_src_bank_h_shift) | ||
900 | |||
901 | /*define for src_num_bank field*/ | ||
902 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_offset 6 | ||
903 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask 0x00000003 | ||
904 | #define SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift 21 | ||
905 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_6_src_num_bank_shift) | ||
906 | |||
907 | /*define for src_mat_aspt field*/ | ||
908 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_offset 6 | ||
909 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask 0x00000003 | ||
910 | #define SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift 24 | ||
911 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_6_src_mat_aspt_shift) | ||
912 | |||
913 | /*define for src_pipe_config field*/ | ||
914 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_offset 6 | ||
915 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask 0x0000001F | ||
916 | #define SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift 26 | ||
917 | #define SDMA_PKT_COPY_T2T_DW_6_SRC_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_6_src_pipe_config_shift) | ||
918 | |||
919 | /*define for DST_ADDR_LO word*/ | ||
920 | /*define for dst_addr_31_0 field*/ | ||
921 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_offset 7 | ||
922 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
923 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
924 | #define SDMA_PKT_COPY_T2T_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_LO_dst_addr_31_0_shift) | ||
925 | |||
926 | /*define for DST_ADDR_HI word*/ | ||
927 | /*define for dst_addr_63_32 field*/ | ||
928 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_offset 8 | ||
929 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
930 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
931 | #define SDMA_PKT_COPY_T2T_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_COPY_T2T_DST_ADDR_HI_dst_addr_63_32_shift) | ||
932 | |||
933 | /*define for DW_9 word*/ | ||
934 | /*define for dst_x field*/ | ||
935 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_offset 9 | ||
936 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_mask 0x00003FFF | ||
937 | #define SDMA_PKT_COPY_T2T_DW_9_dst_x_shift 0 | ||
938 | #define SDMA_PKT_COPY_T2T_DW_9_DST_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_x_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_x_shift) | ||
939 | |||
940 | /*define for dst_y field*/ | ||
941 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_offset 9 | ||
942 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_mask 0x00003FFF | ||
943 | #define SDMA_PKT_COPY_T2T_DW_9_dst_y_shift 16 | ||
944 | #define SDMA_PKT_COPY_T2T_DW_9_DST_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_9_dst_y_mask) << SDMA_PKT_COPY_T2T_DW_9_dst_y_shift) | ||
945 | |||
946 | /*define for DW_10 word*/ | ||
947 | /*define for dst_z field*/ | ||
948 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_offset 10 | ||
949 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_mask 0x000007FF | ||
950 | #define SDMA_PKT_COPY_T2T_DW_10_dst_z_shift 0 | ||
951 | #define SDMA_PKT_COPY_T2T_DW_10_DST_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_z_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_z_shift) | ||
952 | |||
953 | /*define for dst_pitch_in_tile field*/ | ||
954 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_offset 10 | ||
955 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask 0x00000FFF | ||
956 | #define SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift 16 | ||
957 | #define SDMA_PKT_COPY_T2T_DW_10_DST_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_mask) << SDMA_PKT_COPY_T2T_DW_10_dst_pitch_in_tile_shift) | ||
958 | |||
959 | /*define for DW_11 word*/ | ||
960 | /*define for dst_slice_pitch field*/ | ||
961 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_offset 11 | ||
962 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask 0x003FFFFF | ||
963 | #define SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift 0 | ||
964 | #define SDMA_PKT_COPY_T2T_DW_11_DST_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_mask) << SDMA_PKT_COPY_T2T_DW_11_dst_slice_pitch_shift) | ||
965 | |||
966 | /*define for DW_12 word*/ | ||
967 | /*define for dst_array_mode field*/ | ||
968 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_offset 12 | ||
969 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask 0x0000000F | ||
970 | #define SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift 3 | ||
971 | #define SDMA_PKT_COPY_T2T_DW_12_DST_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_array_mode_shift) | ||
972 | |||
973 | /*define for dst_mit_mode field*/ | ||
974 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_offset 12 | ||
975 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask 0x00000007 | ||
976 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift 8 | ||
977 | #define SDMA_PKT_COPY_T2T_DW_12_DST_MIT_MODE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mit_mode_shift) | ||
978 | |||
979 | /*define for dst_tilesplit_size field*/ | ||
980 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_offset 12 | ||
981 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask 0x00000007 | ||
982 | #define SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift 11 | ||
983 | #define SDMA_PKT_COPY_T2T_DW_12_DST_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_tilesplit_size_shift) | ||
984 | |||
985 | /*define for dst_bank_w field*/ | ||
986 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_offset 12 | ||
987 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask 0x00000003 | ||
988 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift 15 | ||
989 | #define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_W(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_w_shift) | ||
990 | |||
991 | /*define for dst_bank_h field*/ | ||
992 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_offset 12 | ||
993 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask 0x00000003 | ||
994 | #define SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift 18 | ||
995 | #define SDMA_PKT_COPY_T2T_DW_12_DST_BANK_H(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_bank_h_shift) | ||
996 | |||
997 | /*define for dst_num_bank field*/ | ||
998 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_offset 12 | ||
999 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask 0x00000003 | ||
1000 | #define SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift 21 | ||
1001 | #define SDMA_PKT_COPY_T2T_DW_12_DST_NUM_BANK(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_num_bank_shift) | ||
1002 | |||
1003 | /*define for dst_mat_aspt field*/ | ||
1004 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_offset 12 | ||
1005 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask 0x00000003 | ||
1006 | #define SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift 24 | ||
1007 | #define SDMA_PKT_COPY_T2T_DW_12_DST_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_mat_aspt_shift) | ||
1008 | |||
1009 | /*define for dst_pipe_config field*/ | ||
1010 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_offset 12 | ||
1011 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask 0x0000001F | ||
1012 | #define SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift 26 | ||
1013 | #define SDMA_PKT_COPY_T2T_DW_12_DST_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_mask) << SDMA_PKT_COPY_T2T_DW_12_dst_pipe_config_shift) | ||
1014 | |||
1015 | /*define for DW_13 word*/ | ||
1016 | /*define for rect_x field*/ | ||
1017 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_offset 13 | ||
1018 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_mask 0x00003FFF | ||
1019 | #define SDMA_PKT_COPY_T2T_DW_13_rect_x_shift 0 | ||
1020 | #define SDMA_PKT_COPY_T2T_DW_13_RECT_X(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_x_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_x_shift) | ||
1021 | |||
1022 | /*define for rect_y field*/ | ||
1023 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_offset 13 | ||
1024 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_mask 0x00003FFF | ||
1025 | #define SDMA_PKT_COPY_T2T_DW_13_rect_y_shift 16 | ||
1026 | #define SDMA_PKT_COPY_T2T_DW_13_RECT_Y(x) (((x) & SDMA_PKT_COPY_T2T_DW_13_rect_y_mask) << SDMA_PKT_COPY_T2T_DW_13_rect_y_shift) | ||
1027 | |||
1028 | /*define for DW_14 word*/ | ||
1029 | /*define for rect_z field*/ | ||
1030 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_offset 14 | ||
1031 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_mask 0x000007FF | ||
1032 | #define SDMA_PKT_COPY_T2T_DW_14_rect_z_shift 0 | ||
1033 | #define SDMA_PKT_COPY_T2T_DW_14_RECT_Z(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_rect_z_mask) << SDMA_PKT_COPY_T2T_DW_14_rect_z_shift) | ||
1034 | |||
1035 | /*define for dst_sw field*/ | ||
1036 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_offset 14 | ||
1037 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask 0x00000003 | ||
1038 | #define SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift 16 | ||
1039 | #define SDMA_PKT_COPY_T2T_DW_14_DST_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_dst_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_dst_sw_shift) | ||
1040 | |||
1041 | /*define for src_sw field*/ | ||
1042 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_offset 14 | ||
1043 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_mask 0x00000003 | ||
1044 | #define SDMA_PKT_COPY_T2T_DW_14_src_sw_shift 24 | ||
1045 | #define SDMA_PKT_COPY_T2T_DW_14_SRC_SW(x) (((x) & SDMA_PKT_COPY_T2T_DW_14_src_sw_mask) << SDMA_PKT_COPY_T2T_DW_14_src_sw_shift) | ||
1046 | |||
1047 | |||
1048 | /* | ||
1049 | ** Definitions for SDMA_PKT_COPY_TILED_SUBWIN packet | ||
1050 | */ | ||
1051 | |||
1052 | /*define for HEADER word*/ | ||
1053 | /*define for op field*/ | ||
1054 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_offset 0 | ||
1055 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask 0x000000FF | ||
1056 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift 0 | ||
1057 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_op_shift) | ||
1058 | |||
1059 | /*define for sub_op field*/ | ||
1060 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_offset 0 | ||
1061 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask 0x000000FF | ||
1062 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift 8 | ||
1063 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_sub_op_shift) | ||
1064 | |||
1065 | /*define for detile field*/ | ||
1066 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_offset 0 | ||
1067 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask 0x00000001 | ||
1068 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift 31 | ||
1069 | #define SDMA_PKT_COPY_TILED_SUBWIN_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_HEADER_detile_shift) | ||
1070 | |||
1071 | /*define for TILED_ADDR_LO word*/ | ||
1072 | /*define for tiled_addr_31_0 field*/ | ||
1073 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_offset 1 | ||
1074 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask 0xFFFFFFFF | ||
1075 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift 0 | ||
1076 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_TILED_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_LO_tiled_addr_31_0_shift) | ||
1077 | |||
1078 | /*define for TILED_ADDR_HI word*/ | ||
1079 | /*define for tiled_addr_63_32 field*/ | ||
1080 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_offset 2 | ||
1081 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask 0xFFFFFFFF | ||
1082 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift 0 | ||
1083 | #define SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_TILED_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_TILED_ADDR_HI_tiled_addr_63_32_shift) | ||
1084 | |||
1085 | /*define for DW_3 word*/ | ||
1086 | /*define for tiled_x field*/ | ||
1087 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_offset 3 | ||
1088 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask 0x00003FFF | ||
1089 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift 0 | ||
1090 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_x_shift) | ||
1091 | |||
1092 | /*define for tiled_y field*/ | ||
1093 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_offset 3 | ||
1094 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask 0x00003FFF | ||
1095 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift 16 | ||
1096 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_3_TILED_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_3_tiled_y_shift) | ||
1097 | |||
1098 | /*define for DW_4 word*/ | ||
1099 | /*define for tiled_z field*/ | ||
1100 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_offset 4 | ||
1101 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask 0x000007FF | ||
1102 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift 0 | ||
1103 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_TILED_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_tiled_z_shift) | ||
1104 | |||
1105 | /*define for pitch_in_tile field*/ | ||
1106 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_offset 4 | ||
1107 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask 0x00000FFF | ||
1108 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift 16 | ||
1109 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_4_PITCH_IN_TILE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_4_pitch_in_tile_shift) | ||
1110 | |||
1111 | /*define for DW_5 word*/ | ||
1112 | /*define for slice_pitch field*/ | ||
1113 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_offset 5 | ||
1114 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask 0x003FFFFF | ||
1115 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift 0 | ||
1116 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_5_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_5_slice_pitch_shift) | ||
1117 | |||
1118 | /*define for DW_6 word*/ | ||
1119 | /*define for element_size field*/ | ||
1120 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_offset 6 | ||
1121 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask 0x00000007 | ||
1122 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift 0 | ||
1123 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ELEMENT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_element_size_shift) | ||
1124 | |||
1125 | /*define for array_mode field*/ | ||
1126 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_offset 6 | ||
1127 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask 0x0000000F | ||
1128 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift 3 | ||
1129 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_ARRAY_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_array_mode_shift) | ||
1130 | |||
1131 | /*define for mit_mode field*/ | ||
1132 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_offset 6 | ||
1133 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask 0x00000007 | ||
1134 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift 8 | ||
1135 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MIT_MODE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mit_mode_shift) | ||
1136 | |||
1137 | /*define for tilesplit_size field*/ | ||
1138 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_offset 6 | ||
1139 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask 0x00000007 | ||
1140 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift 11 | ||
1141 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_tilesplit_size_shift) | ||
1142 | |||
1143 | /*define for bank_w field*/ | ||
1144 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_offset 6 | ||
1145 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask 0x00000003 | ||
1146 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift 15 | ||
1147 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_W(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_w_shift) | ||
1148 | |||
1149 | /*define for bank_h field*/ | ||
1150 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_offset 6 | ||
1151 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask 0x00000003 | ||
1152 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift 18 | ||
1153 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_BANK_H(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_bank_h_shift) | ||
1154 | |||
1155 | /*define for num_bank field*/ | ||
1156 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_offset 6 | ||
1157 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask 0x00000003 | ||
1158 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift 21 | ||
1159 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_NUM_BANK(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_num_bank_shift) | ||
1160 | |||
1161 | /*define for mat_aspt field*/ | ||
1162 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_offset 6 | ||
1163 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask 0x00000003 | ||
1164 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift 24 | ||
1165 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_MAT_ASPT(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_mat_aspt_shift) | ||
1166 | |||
1167 | /*define for pipe_config field*/ | ||
1168 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_offset 6 | ||
1169 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask 0x0000001F | ||
1170 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift 26 | ||
1171 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_6_PIPE_CONFIG(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_6_pipe_config_shift) | ||
1172 | |||
1173 | /*define for LINEAR_ADDR_LO word*/ | ||
1174 | /*define for linear_addr_31_0 field*/ | ||
1175 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_offset 7 | ||
1176 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
1177 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
1178 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
1179 | |||
1180 | /*define for LINEAR_ADDR_HI word*/ | ||
1181 | /*define for linear_addr_63_32 field*/ | ||
1182 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_offset 8 | ||
1183 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
1184 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
1185 | #define SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_TILED_SUBWIN_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
1186 | |||
1187 | /*define for DW_9 word*/ | ||
1188 | /*define for linear_x field*/ | ||
1189 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_offset 9 | ||
1190 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask 0x00003FFF | ||
1191 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift 0 | ||
1192 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_x_shift) | ||
1193 | |||
1194 | /*define for linear_y field*/ | ||
1195 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_offset 9 | ||
1196 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask 0x00003FFF | ||
1197 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift 16 | ||
1198 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_9_LINEAR_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_9_linear_y_shift) | ||
1199 | |||
1200 | /*define for DW_10 word*/ | ||
1201 | /*define for linear_z field*/ | ||
1202 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_offset 10 | ||
1203 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask 0x000007FF | ||
1204 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift 0 | ||
1205 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_z_shift) | ||
1206 | |||
1207 | /*define for linear_pitch field*/ | ||
1208 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_offset 10 | ||
1209 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask 0x00003FFF | ||
1210 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift 16 | ||
1211 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_10_LINEAR_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_10_linear_pitch_shift) | ||
1212 | |||
1213 | /*define for DW_11 word*/ | ||
1214 | /*define for linear_slice_pitch field*/ | ||
1215 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_offset 11 | ||
1216 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask 0x0FFFFFFF | ||
1217 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift 0 | ||
1218 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_11_LINEAR_SLICE_PITCH(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_11_linear_slice_pitch_shift) | ||
1219 | |||
1220 | /*define for DW_12 word*/ | ||
1221 | /*define for rect_x field*/ | ||
1222 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_offset 12 | ||
1223 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask 0x00003FFF | ||
1224 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift 0 | ||
1225 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_X(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_x_shift) | ||
1226 | |||
1227 | /*define for rect_y field*/ | ||
1228 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_offset 12 | ||
1229 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask 0x00003FFF | ||
1230 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift 16 | ||
1231 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_12_RECT_Y(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_12_rect_y_shift) | ||
1232 | |||
1233 | /*define for DW_13 word*/ | ||
1234 | /*define for rect_z field*/ | ||
1235 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_offset 13 | ||
1236 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask 0x000007FF | ||
1237 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift 0 | ||
1238 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_RECT_Z(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_rect_z_shift) | ||
1239 | |||
1240 | /*define for linear_sw field*/ | ||
1241 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_offset 13 | ||
1242 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask 0x00000003 | ||
1243 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift 16 | ||
1244 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_linear_sw_shift) | ||
1245 | |||
1246 | /*define for tile_sw field*/ | ||
1247 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_offset 13 | ||
1248 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask 0x00000003 | ||
1249 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift 24 | ||
1250 | #define SDMA_PKT_COPY_TILED_SUBWIN_DW_13_TILE_SW(x) (((x) & SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_mask) << SDMA_PKT_COPY_TILED_SUBWIN_DW_13_tile_sw_shift) | ||
1251 | |||
1252 | |||
1253 | /* | ||
1254 | ** Definitions for SDMA_PKT_COPY_STRUCT packet | ||
1255 | */ | ||
1256 | |||
1257 | /*define for HEADER word*/ | ||
1258 | /*define for op field*/ | ||
1259 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_offset 0 | ||
1260 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_mask 0x000000FF | ||
1261 | #define SDMA_PKT_COPY_STRUCT_HEADER_op_shift 0 | ||
1262 | #define SDMA_PKT_COPY_STRUCT_HEADER_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_op_shift) | ||
1263 | |||
1264 | /*define for sub_op field*/ | ||
1265 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_offset 0 | ||
1266 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask 0x000000FF | ||
1267 | #define SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift 8 | ||
1268 | #define SDMA_PKT_COPY_STRUCT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_sub_op_mask) << SDMA_PKT_COPY_STRUCT_HEADER_sub_op_shift) | ||
1269 | |||
1270 | /*define for detile field*/ | ||
1271 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_offset 0 | ||
1272 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_mask 0x00000001 | ||
1273 | #define SDMA_PKT_COPY_STRUCT_HEADER_detile_shift 31 | ||
1274 | #define SDMA_PKT_COPY_STRUCT_HEADER_DETILE(x) (((x) & SDMA_PKT_COPY_STRUCT_HEADER_detile_mask) << SDMA_PKT_COPY_STRUCT_HEADER_detile_shift) | ||
1275 | |||
1276 | /*define for SB_ADDR_LO word*/ | ||
1277 | /*define for sb_addr_31_0 field*/ | ||
1278 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_offset 1 | ||
1279 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask 0xFFFFFFFF | ||
1280 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift 0 | ||
1281 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_SB_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_LO_sb_addr_31_0_shift) | ||
1282 | |||
1283 | /*define for SB_ADDR_HI word*/ | ||
1284 | /*define for sb_addr_63_32 field*/ | ||
1285 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_offset 2 | ||
1286 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask 0xFFFFFFFF | ||
1287 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift 0 | ||
1288 | #define SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_SB_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_SB_ADDR_HI_sb_addr_63_32_shift) | ||
1289 | |||
1290 | /*define for START_INDEX word*/ | ||
1291 | /*define for start_index field*/ | ||
1292 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_offset 3 | ||
1293 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask 0xFFFFFFFF | ||
1294 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift 0 | ||
1295 | #define SDMA_PKT_COPY_STRUCT_START_INDEX_START_INDEX(x) (((x) & SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_mask) << SDMA_PKT_COPY_STRUCT_START_INDEX_start_index_shift) | ||
1296 | |||
1297 | /*define for COUNT word*/ | ||
1298 | /*define for count field*/ | ||
1299 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_offset 4 | ||
1300 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_mask 0xFFFFFFFF | ||
1301 | #define SDMA_PKT_COPY_STRUCT_COUNT_count_shift 0 | ||
1302 | #define SDMA_PKT_COPY_STRUCT_COUNT_COUNT(x) (((x) & SDMA_PKT_COPY_STRUCT_COUNT_count_mask) << SDMA_PKT_COPY_STRUCT_COUNT_count_shift) | ||
1303 | |||
1304 | /*define for DW_5 word*/ | ||
1305 | /*define for stride field*/ | ||
1306 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_offset 5 | ||
1307 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_mask 0x000007FF | ||
1308 | #define SDMA_PKT_COPY_STRUCT_DW_5_stride_shift 0 | ||
1309 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRIDE(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_stride_mask) << SDMA_PKT_COPY_STRUCT_DW_5_stride_shift) | ||
1310 | |||
1311 | /*define for struct_sw field*/ | ||
1312 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_offset 5 | ||
1313 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask 0x00000003 | ||
1314 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift 16 | ||
1315 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_sw_shift) | ||
1316 | |||
1317 | /*define for struct_ha field*/ | ||
1318 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_offset 5 | ||
1319 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask 0x00000001 | ||
1320 | #define SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift 22 | ||
1321 | #define SDMA_PKT_COPY_STRUCT_DW_5_STRUCT_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_struct_ha_shift) | ||
1322 | |||
1323 | /*define for linear_sw field*/ | ||
1324 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_offset 5 | ||
1325 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask 0x00000003 | ||
1326 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift 24 | ||
1327 | #define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_SW(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_sw_shift) | ||
1328 | |||
1329 | /*define for linear_ha field*/ | ||
1330 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_offset 5 | ||
1331 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask 0x00000001 | ||
1332 | #define SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift 30 | ||
1333 | #define SDMA_PKT_COPY_STRUCT_DW_5_LINEAR_HA(x) (((x) & SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_mask) << SDMA_PKT_COPY_STRUCT_DW_5_linear_ha_shift) | ||
1334 | |||
1335 | /*define for LINEAR_ADDR_LO word*/ | ||
1336 | /*define for linear_addr_31_0 field*/ | ||
1337 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_offset 6 | ||
1338 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask 0xFFFFFFFF | ||
1339 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift 0 | ||
1340 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_LINEAR_ADDR_31_0(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_LO_linear_addr_31_0_shift) | ||
1341 | |||
1342 | /*define for LINEAR_ADDR_HI word*/ | ||
1343 | /*define for linear_addr_63_32 field*/ | ||
1344 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_offset 7 | ||
1345 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask 0xFFFFFFFF | ||
1346 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift 0 | ||
1347 | #define SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_LINEAR_ADDR_63_32(x) (((x) & SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_mask) << SDMA_PKT_COPY_STRUCT_LINEAR_ADDR_HI_linear_addr_63_32_shift) | ||
1348 | |||
1349 | |||
1350 | /* | ||
1351 | ** Definitions for SDMA_PKT_WRITE_UNTILED packet | ||
1352 | */ | ||
1353 | |||
1354 | /*define for HEADER word*/ | ||
1355 | /*define for op field*/ | ||
1356 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_offset 0 | ||
1357 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_mask 0x000000FF | ||
1358 | #define SDMA_PKT_WRITE_UNTILED_HEADER_op_shift 0 | ||
1359 | #define SDMA_PKT_WRITE_UNTILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_op_shift) | ||
1360 | |||
1361 | /*define for sub_op field*/ | ||
1362 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_offset 0 | ||
1363 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask 0x000000FF | ||
1364 | #define SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift 8 | ||
1365 | #define SDMA_PKT_WRITE_UNTILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_UNTILED_HEADER_sub_op_shift) | ||
1366 | |||
1367 | /*define for DST_ADDR_LO word*/ | ||
1368 | /*define for dst_addr_31_0 field*/ | ||
1369 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1370 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1371 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1372 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1373 | |||
1374 | /*define for DST_ADDR_HI word*/ | ||
1375 | /*define for dst_addr_63_32 field*/ | ||
1376 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1377 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1378 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1379 | #define SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_UNTILED_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1380 | |||
1381 | /*define for DW_3 word*/ | ||
1382 | /*define for count field*/ | ||
1383 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_offset 3 | ||
1384 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_mask 0x003FFFFF | ||
1385 | #define SDMA_PKT_WRITE_UNTILED_DW_3_count_shift 0 | ||
1386 | #define SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_count_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_count_shift) | ||
1387 | |||
1388 | /*define for sw field*/ | ||
1389 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_offset 3 | ||
1390 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask 0x00000003 | ||
1391 | #define SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift 24 | ||
1392 | #define SDMA_PKT_WRITE_UNTILED_DW_3_SW(x) (((x) & SDMA_PKT_WRITE_UNTILED_DW_3_sw_mask) << SDMA_PKT_WRITE_UNTILED_DW_3_sw_shift) | ||
1393 | |||
1394 | /*define for DATA0 word*/ | ||
1395 | /*define for data0 field*/ | ||
1396 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_offset 4 | ||
1397 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask 0xFFFFFFFF | ||
1398 | #define SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift 0 | ||
1399 | #define SDMA_PKT_WRITE_UNTILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_UNTILED_DATA0_data0_mask) << SDMA_PKT_WRITE_UNTILED_DATA0_data0_shift) | ||
1400 | |||
1401 | |||
1402 | /* | ||
1403 | ** Definitions for SDMA_PKT_WRITE_TILED packet | ||
1404 | */ | ||
1405 | |||
1406 | /*define for HEADER word*/ | ||
1407 | /*define for op field*/ | ||
1408 | #define SDMA_PKT_WRITE_TILED_HEADER_op_offset 0 | ||
1409 | #define SDMA_PKT_WRITE_TILED_HEADER_op_mask 0x000000FF | ||
1410 | #define SDMA_PKT_WRITE_TILED_HEADER_op_shift 0 | ||
1411 | #define SDMA_PKT_WRITE_TILED_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_op_shift) | ||
1412 | |||
1413 | /*define for sub_op field*/ | ||
1414 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_offset 0 | ||
1415 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask 0x000000FF | ||
1416 | #define SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift 8 | ||
1417 | #define SDMA_PKT_WRITE_TILED_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_TILED_HEADER_sub_op_mask) << SDMA_PKT_WRITE_TILED_HEADER_sub_op_shift) | ||
1418 | |||
1419 | /*define for DST_ADDR_LO word*/ | ||
1420 | /*define for dst_addr_31_0 field*/ | ||
1421 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1422 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1423 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1424 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1425 | |||
1426 | /*define for DST_ADDR_HI word*/ | ||
1427 | /*define for dst_addr_63_32 field*/ | ||
1428 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1429 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1430 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1431 | #define SDMA_PKT_WRITE_TILED_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_TILED_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1432 | |||
1433 | /*define for DW_3 word*/ | ||
1434 | /*define for pitch_in_tile field*/ | ||
1435 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_offset 3 | ||
1436 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask 0x000007FF | ||
1437 | #define SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift 0 | ||
1438 | #define SDMA_PKT_WRITE_TILED_DW_3_PITCH_IN_TILE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_mask) << SDMA_PKT_WRITE_TILED_DW_3_pitch_in_tile_shift) | ||
1439 | |||
1440 | /*define for height field*/ | ||
1441 | #define SDMA_PKT_WRITE_TILED_DW_3_height_offset 3 | ||
1442 | #define SDMA_PKT_WRITE_TILED_DW_3_height_mask 0x00003FFF | ||
1443 | #define SDMA_PKT_WRITE_TILED_DW_3_height_shift 16 | ||
1444 | #define SDMA_PKT_WRITE_TILED_DW_3_HEIGHT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_3_height_mask) << SDMA_PKT_WRITE_TILED_DW_3_height_shift) | ||
1445 | |||
1446 | /*define for DW_4 word*/ | ||
1447 | /*define for slice_pitch field*/ | ||
1448 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_offset 4 | ||
1449 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask 0x003FFFFF | ||
1450 | #define SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift 0 | ||
1451 | #define SDMA_PKT_WRITE_TILED_DW_4_SLICE_PITCH(x) (((x) & SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_mask) << SDMA_PKT_WRITE_TILED_DW_4_slice_pitch_shift) | ||
1452 | |||
1453 | /*define for DW_5 word*/ | ||
1454 | /*define for element_size field*/ | ||
1455 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_offset 5 | ||
1456 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_mask 0x00000007 | ||
1457 | #define SDMA_PKT_WRITE_TILED_DW_5_element_size_shift 0 | ||
1458 | #define SDMA_PKT_WRITE_TILED_DW_5_ELEMENT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_element_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_element_size_shift) | ||
1459 | |||
1460 | /*define for array_mode field*/ | ||
1461 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_offset 5 | ||
1462 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask 0x0000000F | ||
1463 | #define SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift 3 | ||
1464 | #define SDMA_PKT_WRITE_TILED_DW_5_ARRAY_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_array_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_array_mode_shift) | ||
1465 | |||
1466 | /*define for mit_mode field*/ | ||
1467 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_offset 5 | ||
1468 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask 0x00000007 | ||
1469 | #define SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift 8 | ||
1470 | #define SDMA_PKT_WRITE_TILED_DW_5_MIT_MODE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mit_mode_mask) << SDMA_PKT_WRITE_TILED_DW_5_mit_mode_shift) | ||
1471 | |||
1472 | /*define for tilesplit_size field*/ | ||
1473 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_offset 5 | ||
1474 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask 0x00000007 | ||
1475 | #define SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift 11 | ||
1476 | #define SDMA_PKT_WRITE_TILED_DW_5_TILESPLIT_SIZE(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_mask) << SDMA_PKT_WRITE_TILED_DW_5_tilesplit_size_shift) | ||
1477 | |||
1478 | /*define for bank_w field*/ | ||
1479 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_offset 5 | ||
1480 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask 0x00000003 | ||
1481 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift 15 | ||
1482 | #define SDMA_PKT_WRITE_TILED_DW_5_BANK_W(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_w_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_w_shift) | ||
1483 | |||
1484 | /*define for bank_h field*/ | ||
1485 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_offset 5 | ||
1486 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask 0x00000003 | ||
1487 | #define SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift 18 | ||
1488 | #define SDMA_PKT_WRITE_TILED_DW_5_BANK_H(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_bank_h_mask) << SDMA_PKT_WRITE_TILED_DW_5_bank_h_shift) | ||
1489 | |||
1490 | /*define for num_bank field*/ | ||
1491 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_offset 5 | ||
1492 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask 0x00000003 | ||
1493 | #define SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift 21 | ||
1494 | #define SDMA_PKT_WRITE_TILED_DW_5_NUM_BANK(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_num_bank_mask) << SDMA_PKT_WRITE_TILED_DW_5_num_bank_shift) | ||
1495 | |||
1496 | /*define for mat_aspt field*/ | ||
1497 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_offset 5 | ||
1498 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask 0x00000003 | ||
1499 | #define SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift 24 | ||
1500 | #define SDMA_PKT_WRITE_TILED_DW_5_MAT_ASPT(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_mask) << SDMA_PKT_WRITE_TILED_DW_5_mat_aspt_shift) | ||
1501 | |||
1502 | /*define for pipe_config field*/ | ||
1503 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_offset 5 | ||
1504 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask 0x0000001F | ||
1505 | #define SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift 26 | ||
1506 | #define SDMA_PKT_WRITE_TILED_DW_5_PIPE_CONFIG(x) (((x) & SDMA_PKT_WRITE_TILED_DW_5_pipe_config_mask) << SDMA_PKT_WRITE_TILED_DW_5_pipe_config_shift) | ||
1507 | |||
1508 | /*define for DW_6 word*/ | ||
1509 | /*define for x field*/ | ||
1510 | #define SDMA_PKT_WRITE_TILED_DW_6_x_offset 6 | ||
1511 | #define SDMA_PKT_WRITE_TILED_DW_6_x_mask 0x00003FFF | ||
1512 | #define SDMA_PKT_WRITE_TILED_DW_6_x_shift 0 | ||
1513 | #define SDMA_PKT_WRITE_TILED_DW_6_X(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_x_mask) << SDMA_PKT_WRITE_TILED_DW_6_x_shift) | ||
1514 | |||
1515 | /*define for y field*/ | ||
1516 | #define SDMA_PKT_WRITE_TILED_DW_6_y_offset 6 | ||
1517 | #define SDMA_PKT_WRITE_TILED_DW_6_y_mask 0x00003FFF | ||
1518 | #define SDMA_PKT_WRITE_TILED_DW_6_y_shift 16 | ||
1519 | #define SDMA_PKT_WRITE_TILED_DW_6_Y(x) (((x) & SDMA_PKT_WRITE_TILED_DW_6_y_mask) << SDMA_PKT_WRITE_TILED_DW_6_y_shift) | ||
1520 | |||
1521 | /*define for DW_7 word*/ | ||
1522 | /*define for z field*/ | ||
1523 | #define SDMA_PKT_WRITE_TILED_DW_7_z_offset 7 | ||
1524 | #define SDMA_PKT_WRITE_TILED_DW_7_z_mask 0x00000FFF | ||
1525 | #define SDMA_PKT_WRITE_TILED_DW_7_z_shift 0 | ||
1526 | #define SDMA_PKT_WRITE_TILED_DW_7_Z(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_z_mask) << SDMA_PKT_WRITE_TILED_DW_7_z_shift) | ||
1527 | |||
1528 | /*define for sw field*/ | ||
1529 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_offset 7 | ||
1530 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_mask 0x00000003 | ||
1531 | #define SDMA_PKT_WRITE_TILED_DW_7_sw_shift 24 | ||
1532 | #define SDMA_PKT_WRITE_TILED_DW_7_SW(x) (((x) & SDMA_PKT_WRITE_TILED_DW_7_sw_mask) << SDMA_PKT_WRITE_TILED_DW_7_sw_shift) | ||
1533 | |||
1534 | /*define for COUNT word*/ | ||
1535 | /*define for count field*/ | ||
1536 | #define SDMA_PKT_WRITE_TILED_COUNT_count_offset 8 | ||
1537 | #define SDMA_PKT_WRITE_TILED_COUNT_count_mask 0x003FFFFF | ||
1538 | #define SDMA_PKT_WRITE_TILED_COUNT_count_shift 0 | ||
1539 | #define SDMA_PKT_WRITE_TILED_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_TILED_COUNT_count_mask) << SDMA_PKT_WRITE_TILED_COUNT_count_shift) | ||
1540 | |||
1541 | /*define for DATA0 word*/ | ||
1542 | /*define for data0 field*/ | ||
1543 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_offset 9 | ||
1544 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_mask 0xFFFFFFFF | ||
1545 | #define SDMA_PKT_WRITE_TILED_DATA0_data0_shift 0 | ||
1546 | #define SDMA_PKT_WRITE_TILED_DATA0_DATA0(x) (((x) & SDMA_PKT_WRITE_TILED_DATA0_data0_mask) << SDMA_PKT_WRITE_TILED_DATA0_data0_shift) | ||
1547 | |||
1548 | |||
1549 | /* | ||
1550 | ** Definitions for SDMA_PKT_WRITE_INCR packet | ||
1551 | */ | ||
1552 | |||
1553 | /*define for HEADER word*/ | ||
1554 | /*define for op field*/ | ||
1555 | #define SDMA_PKT_WRITE_INCR_HEADER_op_offset 0 | ||
1556 | #define SDMA_PKT_WRITE_INCR_HEADER_op_mask 0x000000FF | ||
1557 | #define SDMA_PKT_WRITE_INCR_HEADER_op_shift 0 | ||
1558 | #define SDMA_PKT_WRITE_INCR_HEADER_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_op_shift) | ||
1559 | |||
1560 | /*define for sub_op field*/ | ||
1561 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_offset 0 | ||
1562 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask 0x000000FF | ||
1563 | #define SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift 8 | ||
1564 | #define SDMA_PKT_WRITE_INCR_HEADER_SUB_OP(x) (((x) & SDMA_PKT_WRITE_INCR_HEADER_sub_op_mask) << SDMA_PKT_WRITE_INCR_HEADER_sub_op_shift) | ||
1565 | |||
1566 | /*define for DST_ADDR_LO word*/ | ||
1567 | /*define for dst_addr_31_0 field*/ | ||
1568 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1569 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1570 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1571 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1572 | |||
1573 | /*define for DST_ADDR_HI word*/ | ||
1574 | /*define for dst_addr_63_32 field*/ | ||
1575 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1576 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1577 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1578 | #define SDMA_PKT_WRITE_INCR_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_WRITE_INCR_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1579 | |||
1580 | /*define for MASK_DW0 word*/ | ||
1581 | /*define for mask_dw0 field*/ | ||
1582 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_offset 3 | ||
1583 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask 0xFFFFFFFF | ||
1584 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift 0 | ||
1585 | #define SDMA_PKT_WRITE_INCR_MASK_DW0_MASK_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_mask) << SDMA_PKT_WRITE_INCR_MASK_DW0_mask_dw0_shift) | ||
1586 | |||
1587 | /*define for MASK_DW1 word*/ | ||
1588 | /*define for mask_dw1 field*/ | ||
1589 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_offset 4 | ||
1590 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask 0xFFFFFFFF | ||
1591 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift 0 | ||
1592 | #define SDMA_PKT_WRITE_INCR_MASK_DW1_MASK_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_mask) << SDMA_PKT_WRITE_INCR_MASK_DW1_mask_dw1_shift) | ||
1593 | |||
1594 | /*define for INIT_DW0 word*/ | ||
1595 | /*define for init_dw0 field*/ | ||
1596 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_offset 5 | ||
1597 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask 0xFFFFFFFF | ||
1598 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift 0 | ||
1599 | #define SDMA_PKT_WRITE_INCR_INIT_DW0_INIT_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_mask) << SDMA_PKT_WRITE_INCR_INIT_DW0_init_dw0_shift) | ||
1600 | |||
1601 | /*define for INIT_DW1 word*/ | ||
1602 | /*define for init_dw1 field*/ | ||
1603 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_offset 6 | ||
1604 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask 0xFFFFFFFF | ||
1605 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift 0 | ||
1606 | #define SDMA_PKT_WRITE_INCR_INIT_DW1_INIT_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_mask) << SDMA_PKT_WRITE_INCR_INIT_DW1_init_dw1_shift) | ||
1607 | |||
1608 | /*define for INCR_DW0 word*/ | ||
1609 | /*define for incr_dw0 field*/ | ||
1610 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_offset 7 | ||
1611 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask 0xFFFFFFFF | ||
1612 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift 0 | ||
1613 | #define SDMA_PKT_WRITE_INCR_INCR_DW0_INCR_DW0(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_mask) << SDMA_PKT_WRITE_INCR_INCR_DW0_incr_dw0_shift) | ||
1614 | |||
1615 | /*define for INCR_DW1 word*/ | ||
1616 | /*define for incr_dw1 field*/ | ||
1617 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_offset 8 | ||
1618 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask 0xFFFFFFFF | ||
1619 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift 0 | ||
1620 | #define SDMA_PKT_WRITE_INCR_INCR_DW1_INCR_DW1(x) (((x) & SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_mask) << SDMA_PKT_WRITE_INCR_INCR_DW1_incr_dw1_shift) | ||
1621 | |||
1622 | /*define for COUNT word*/ | ||
1623 | /*define for count field*/ | ||
1624 | #define SDMA_PKT_WRITE_INCR_COUNT_count_offset 9 | ||
1625 | #define SDMA_PKT_WRITE_INCR_COUNT_count_mask 0x0007FFFF | ||
1626 | #define SDMA_PKT_WRITE_INCR_COUNT_count_shift 0 | ||
1627 | #define SDMA_PKT_WRITE_INCR_COUNT_COUNT(x) (((x) & SDMA_PKT_WRITE_INCR_COUNT_count_mask) << SDMA_PKT_WRITE_INCR_COUNT_count_shift) | ||
1628 | |||
1629 | |||
1630 | /* | ||
1631 | ** Definitions for SDMA_PKT_INDIRECT packet | ||
1632 | */ | ||
1633 | |||
1634 | /*define for HEADER word*/ | ||
1635 | /*define for op field*/ | ||
1636 | #define SDMA_PKT_INDIRECT_HEADER_op_offset 0 | ||
1637 | #define SDMA_PKT_INDIRECT_HEADER_op_mask 0x000000FF | ||
1638 | #define SDMA_PKT_INDIRECT_HEADER_op_shift 0 | ||
1639 | #define SDMA_PKT_INDIRECT_HEADER_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_op_mask) << SDMA_PKT_INDIRECT_HEADER_op_shift) | ||
1640 | |||
1641 | /*define for sub_op field*/ | ||
1642 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_offset 0 | ||
1643 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_mask 0x000000FF | ||
1644 | #define SDMA_PKT_INDIRECT_HEADER_sub_op_shift 8 | ||
1645 | #define SDMA_PKT_INDIRECT_HEADER_SUB_OP(x) (((x) & SDMA_PKT_INDIRECT_HEADER_sub_op_mask) << SDMA_PKT_INDIRECT_HEADER_sub_op_shift) | ||
1646 | |||
1647 | /*define for vmid field*/ | ||
1648 | #define SDMA_PKT_INDIRECT_HEADER_vmid_offset 0 | ||
1649 | #define SDMA_PKT_INDIRECT_HEADER_vmid_mask 0x0000000F | ||
1650 | #define SDMA_PKT_INDIRECT_HEADER_vmid_shift 16 | ||
1651 | #define SDMA_PKT_INDIRECT_HEADER_VMID(x) (((x) & SDMA_PKT_INDIRECT_HEADER_vmid_mask) << SDMA_PKT_INDIRECT_HEADER_vmid_shift) | ||
1652 | |||
1653 | /*define for BASE_LO word*/ | ||
1654 | /*define for ib_base_31_0 field*/ | ||
1655 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_offset 1 | ||
1656 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask 0xFFFFFFFF | ||
1657 | #define SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift 0 | ||
1658 | #define SDMA_PKT_INDIRECT_BASE_LO_IB_BASE_31_0(x) (((x) & SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_mask) << SDMA_PKT_INDIRECT_BASE_LO_ib_base_31_0_shift) | ||
1659 | |||
1660 | /*define for BASE_HI word*/ | ||
1661 | /*define for ib_base_63_32 field*/ | ||
1662 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_offset 2 | ||
1663 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask 0xFFFFFFFF | ||
1664 | #define SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift 0 | ||
1665 | #define SDMA_PKT_INDIRECT_BASE_HI_IB_BASE_63_32(x) (((x) & SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_mask) << SDMA_PKT_INDIRECT_BASE_HI_ib_base_63_32_shift) | ||
1666 | |||
1667 | /*define for IB_SIZE word*/ | ||
1668 | /*define for ib_size field*/ | ||
1669 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_offset 3 | ||
1670 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask 0x000FFFFF | ||
1671 | #define SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift 0 | ||
1672 | #define SDMA_PKT_INDIRECT_IB_SIZE_IB_SIZE(x) (((x) & SDMA_PKT_INDIRECT_IB_SIZE_ib_size_mask) << SDMA_PKT_INDIRECT_IB_SIZE_ib_size_shift) | ||
1673 | |||
1674 | /*define for CSA_ADDR_LO word*/ | ||
1675 | /*define for csa_addr_31_0 field*/ | ||
1676 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_offset 4 | ||
1677 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask 0xFFFFFFFF | ||
1678 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift 0 | ||
1679 | #define SDMA_PKT_INDIRECT_CSA_ADDR_LO_CSA_ADDR_31_0(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_LO_csa_addr_31_0_shift) | ||
1680 | |||
1681 | /*define for CSA_ADDR_HI word*/ | ||
1682 | /*define for csa_addr_63_32 field*/ | ||
1683 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_offset 5 | ||
1684 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask 0xFFFFFFFF | ||
1685 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift 0 | ||
1686 | #define SDMA_PKT_INDIRECT_CSA_ADDR_HI_CSA_ADDR_63_32(x) (((x) & SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_mask) << SDMA_PKT_INDIRECT_CSA_ADDR_HI_csa_addr_63_32_shift) | ||
1687 | |||
1688 | |||
1689 | /* | ||
1690 | ** Definitions for SDMA_PKT_SEMAPHORE packet | ||
1691 | */ | ||
1692 | |||
1693 | /*define for HEADER word*/ | ||
1694 | /*define for op field*/ | ||
1695 | #define SDMA_PKT_SEMAPHORE_HEADER_op_offset 0 | ||
1696 | #define SDMA_PKT_SEMAPHORE_HEADER_op_mask 0x000000FF | ||
1697 | #define SDMA_PKT_SEMAPHORE_HEADER_op_shift 0 | ||
1698 | #define SDMA_PKT_SEMAPHORE_HEADER_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_op_shift) | ||
1699 | |||
1700 | /*define for sub_op field*/ | ||
1701 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_offset 0 | ||
1702 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask 0x000000FF | ||
1703 | #define SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift 8 | ||
1704 | #define SDMA_PKT_SEMAPHORE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_sub_op_mask) << SDMA_PKT_SEMAPHORE_HEADER_sub_op_shift) | ||
1705 | |||
1706 | /*define for write_one field*/ | ||
1707 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_offset 0 | ||
1708 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_mask 0x00000001 | ||
1709 | #define SDMA_PKT_SEMAPHORE_HEADER_write_one_shift 29 | ||
1710 | #define SDMA_PKT_SEMAPHORE_HEADER_WRITE_ONE(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_write_one_mask) << SDMA_PKT_SEMAPHORE_HEADER_write_one_shift) | ||
1711 | |||
1712 | /*define for signal field*/ | ||
1713 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_offset 0 | ||
1714 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_mask 0x00000001 | ||
1715 | #define SDMA_PKT_SEMAPHORE_HEADER_signal_shift 30 | ||
1716 | #define SDMA_PKT_SEMAPHORE_HEADER_SIGNAL(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_signal_mask) << SDMA_PKT_SEMAPHORE_HEADER_signal_shift) | ||
1717 | |||
1718 | /*define for mailbox field*/ | ||
1719 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_offset 0 | ||
1720 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask 0x00000001 | ||
1721 | #define SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift 31 | ||
1722 | #define SDMA_PKT_SEMAPHORE_HEADER_MAILBOX(x) (((x) & SDMA_PKT_SEMAPHORE_HEADER_mailbox_mask) << SDMA_PKT_SEMAPHORE_HEADER_mailbox_shift) | ||
1723 | |||
1724 | /*define for ADDR_LO word*/ | ||
1725 | /*define for addr_31_0 field*/ | ||
1726 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_offset 1 | ||
1727 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1728 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift 0 | ||
1729 | #define SDMA_PKT_SEMAPHORE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_SEMAPHORE_ADDR_LO_addr_31_0_shift) | ||
1730 | |||
1731 | /*define for ADDR_HI word*/ | ||
1732 | /*define for addr_63_32 field*/ | ||
1733 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_offset 2 | ||
1734 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1735 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift 0 | ||
1736 | #define SDMA_PKT_SEMAPHORE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_SEMAPHORE_ADDR_HI_addr_63_32_shift) | ||
1737 | |||
1738 | |||
1739 | /* | ||
1740 | ** Definitions for SDMA_PKT_FENCE packet | ||
1741 | */ | ||
1742 | |||
1743 | /*define for HEADER word*/ | ||
1744 | /*define for op field*/ | ||
1745 | #define SDMA_PKT_FENCE_HEADER_op_offset 0 | ||
1746 | #define SDMA_PKT_FENCE_HEADER_op_mask 0x000000FF | ||
1747 | #define SDMA_PKT_FENCE_HEADER_op_shift 0 | ||
1748 | #define SDMA_PKT_FENCE_HEADER_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_op_mask) << SDMA_PKT_FENCE_HEADER_op_shift) | ||
1749 | |||
1750 | /*define for sub_op field*/ | ||
1751 | #define SDMA_PKT_FENCE_HEADER_sub_op_offset 0 | ||
1752 | #define SDMA_PKT_FENCE_HEADER_sub_op_mask 0x000000FF | ||
1753 | #define SDMA_PKT_FENCE_HEADER_sub_op_shift 8 | ||
1754 | #define SDMA_PKT_FENCE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_FENCE_HEADER_sub_op_mask) << SDMA_PKT_FENCE_HEADER_sub_op_shift) | ||
1755 | |||
1756 | /*define for ADDR_LO word*/ | ||
1757 | /*define for addr_31_0 field*/ | ||
1758 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_offset 1 | ||
1759 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1760 | #define SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift 0 | ||
1761 | #define SDMA_PKT_FENCE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_FENCE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_FENCE_ADDR_LO_addr_31_0_shift) | ||
1762 | |||
1763 | /*define for ADDR_HI word*/ | ||
1764 | /*define for addr_63_32 field*/ | ||
1765 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_offset 2 | ||
1766 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1767 | #define SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift 0 | ||
1768 | #define SDMA_PKT_FENCE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_FENCE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_FENCE_ADDR_HI_addr_63_32_shift) | ||
1769 | |||
1770 | /*define for DATA word*/ | ||
1771 | /*define for data field*/ | ||
1772 | #define SDMA_PKT_FENCE_DATA_data_offset 3 | ||
1773 | #define SDMA_PKT_FENCE_DATA_data_mask 0xFFFFFFFF | ||
1774 | #define SDMA_PKT_FENCE_DATA_data_shift 0 | ||
1775 | #define SDMA_PKT_FENCE_DATA_DATA(x) (((x) & SDMA_PKT_FENCE_DATA_data_mask) << SDMA_PKT_FENCE_DATA_data_shift) | ||
1776 | |||
1777 | |||
1778 | /* | ||
1779 | ** Definitions for SDMA_PKT_SRBM_WRITE packet | ||
1780 | */ | ||
1781 | |||
1782 | /*define for HEADER word*/ | ||
1783 | /*define for op field*/ | ||
1784 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_offset 0 | ||
1785 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_mask 0x000000FF | ||
1786 | #define SDMA_PKT_SRBM_WRITE_HEADER_op_shift 0 | ||
1787 | #define SDMA_PKT_SRBM_WRITE_HEADER_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_op_shift) | ||
1788 | |||
1789 | /*define for sub_op field*/ | ||
1790 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_offset 0 | ||
1791 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask 0x000000FF | ||
1792 | #define SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift 8 | ||
1793 | #define SDMA_PKT_SRBM_WRITE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_sub_op_mask) << SDMA_PKT_SRBM_WRITE_HEADER_sub_op_shift) | ||
1794 | |||
1795 | /*define for byte_en field*/ | ||
1796 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_offset 0 | ||
1797 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask 0x0000000F | ||
1798 | #define SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift 28 | ||
1799 | #define SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(x) (((x) & SDMA_PKT_SRBM_WRITE_HEADER_byte_en_mask) << SDMA_PKT_SRBM_WRITE_HEADER_byte_en_shift) | ||
1800 | |||
1801 | /*define for ADDR word*/ | ||
1802 | /*define for addr field*/ | ||
1803 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_offset 1 | ||
1804 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_mask 0x0000FFFF | ||
1805 | #define SDMA_PKT_SRBM_WRITE_ADDR_addr_shift 0 | ||
1806 | #define SDMA_PKT_SRBM_WRITE_ADDR_ADDR(x) (((x) & SDMA_PKT_SRBM_WRITE_ADDR_addr_mask) << SDMA_PKT_SRBM_WRITE_ADDR_addr_shift) | ||
1807 | |||
1808 | /*define for DATA word*/ | ||
1809 | /*define for data field*/ | ||
1810 | #define SDMA_PKT_SRBM_WRITE_DATA_data_offset 2 | ||
1811 | #define SDMA_PKT_SRBM_WRITE_DATA_data_mask 0xFFFFFFFF | ||
1812 | #define SDMA_PKT_SRBM_WRITE_DATA_data_shift 0 | ||
1813 | #define SDMA_PKT_SRBM_WRITE_DATA_DATA(x) (((x) & SDMA_PKT_SRBM_WRITE_DATA_data_mask) << SDMA_PKT_SRBM_WRITE_DATA_data_shift) | ||
1814 | |||
1815 | |||
1816 | /* | ||
1817 | ** Definitions for SDMA_PKT_PRE_EXE packet | ||
1818 | */ | ||
1819 | |||
1820 | /*define for HEADER word*/ | ||
1821 | /*define for op field*/ | ||
1822 | #define SDMA_PKT_PRE_EXE_HEADER_op_offset 0 | ||
1823 | #define SDMA_PKT_PRE_EXE_HEADER_op_mask 0x000000FF | ||
1824 | #define SDMA_PKT_PRE_EXE_HEADER_op_shift 0 | ||
1825 | #define SDMA_PKT_PRE_EXE_HEADER_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_op_mask) << SDMA_PKT_PRE_EXE_HEADER_op_shift) | ||
1826 | |||
1827 | /*define for sub_op field*/ | ||
1828 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_offset 0 | ||
1829 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_mask 0x000000FF | ||
1830 | #define SDMA_PKT_PRE_EXE_HEADER_sub_op_shift 8 | ||
1831 | #define SDMA_PKT_PRE_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_sub_op_mask) << SDMA_PKT_PRE_EXE_HEADER_sub_op_shift) | ||
1832 | |||
1833 | /*define for dev_sel field*/ | ||
1834 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_offset 0 | ||
1835 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask 0x000000FF | ||
1836 | #define SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift 16 | ||
1837 | #define SDMA_PKT_PRE_EXE_HEADER_DEV_SEL(x) (((x) & SDMA_PKT_PRE_EXE_HEADER_dev_sel_mask) << SDMA_PKT_PRE_EXE_HEADER_dev_sel_shift) | ||
1838 | |||
1839 | /*define for EXEC_COUNT word*/ | ||
1840 | /*define for exec_count field*/ | ||
1841 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_offset 1 | ||
1842 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF | ||
1843 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift 0 | ||
1844 | #define SDMA_PKT_PRE_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_PRE_EXE_EXEC_COUNT_exec_count_shift) | ||
1845 | |||
1846 | |||
1847 | /* | ||
1848 | ** Definitions for SDMA_PKT_COND_EXE packet | ||
1849 | */ | ||
1850 | |||
1851 | /*define for HEADER word*/ | ||
1852 | /*define for op field*/ | ||
1853 | #define SDMA_PKT_COND_EXE_HEADER_op_offset 0 | ||
1854 | #define SDMA_PKT_COND_EXE_HEADER_op_mask 0x000000FF | ||
1855 | #define SDMA_PKT_COND_EXE_HEADER_op_shift 0 | ||
1856 | #define SDMA_PKT_COND_EXE_HEADER_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_op_mask) << SDMA_PKT_COND_EXE_HEADER_op_shift) | ||
1857 | |||
1858 | /*define for sub_op field*/ | ||
1859 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_offset 0 | ||
1860 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_mask 0x000000FF | ||
1861 | #define SDMA_PKT_COND_EXE_HEADER_sub_op_shift 8 | ||
1862 | #define SDMA_PKT_COND_EXE_HEADER_SUB_OP(x) (((x) & SDMA_PKT_COND_EXE_HEADER_sub_op_mask) << SDMA_PKT_COND_EXE_HEADER_sub_op_shift) | ||
1863 | |||
1864 | /*define for ADDR_LO word*/ | ||
1865 | /*define for addr_31_0 field*/ | ||
1866 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_offset 1 | ||
1867 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1868 | #define SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift 0 | ||
1869 | #define SDMA_PKT_COND_EXE_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_mask) << SDMA_PKT_COND_EXE_ADDR_LO_addr_31_0_shift) | ||
1870 | |||
1871 | /*define for ADDR_HI word*/ | ||
1872 | /*define for addr_63_32 field*/ | ||
1873 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_offset 2 | ||
1874 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1875 | #define SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift 0 | ||
1876 | #define SDMA_PKT_COND_EXE_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_mask) << SDMA_PKT_COND_EXE_ADDR_HI_addr_63_32_shift) | ||
1877 | |||
1878 | /*define for REFERENCE word*/ | ||
1879 | /*define for reference field*/ | ||
1880 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_offset 3 | ||
1881 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_mask 0xFFFFFFFF | ||
1882 | #define SDMA_PKT_COND_EXE_REFERENCE_reference_shift 0 | ||
1883 | #define SDMA_PKT_COND_EXE_REFERENCE_REFERENCE(x) (((x) & SDMA_PKT_COND_EXE_REFERENCE_reference_mask) << SDMA_PKT_COND_EXE_REFERENCE_reference_shift) | ||
1884 | |||
1885 | /*define for EXEC_COUNT word*/ | ||
1886 | /*define for exec_count field*/ | ||
1887 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_offset 4 | ||
1888 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask 0x00003FFF | ||
1889 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift 0 | ||
1890 | #define SDMA_PKT_COND_EXE_EXEC_COUNT_EXEC_COUNT(x) (((x) & SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_mask) << SDMA_PKT_COND_EXE_EXEC_COUNT_exec_count_shift) | ||
1891 | |||
1892 | |||
1893 | /* | ||
1894 | ** Definitions for SDMA_PKT_CONSTANT_FILL packet | ||
1895 | */ | ||
1896 | |||
1897 | /*define for HEADER word*/ | ||
1898 | /*define for op field*/ | ||
1899 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_offset 0 | ||
1900 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_mask 0x000000FF | ||
1901 | #define SDMA_PKT_CONSTANT_FILL_HEADER_op_shift 0 | ||
1902 | #define SDMA_PKT_CONSTANT_FILL_HEADER_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_op_shift) | ||
1903 | |||
1904 | /*define for sub_op field*/ | ||
1905 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_offset 0 | ||
1906 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask 0x000000FF | ||
1907 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift 8 | ||
1908 | #define SDMA_PKT_CONSTANT_FILL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sub_op_shift) | ||
1909 | |||
1910 | /*define for sw field*/ | ||
1911 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_offset 0 | ||
1912 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask 0x00000003 | ||
1913 | #define SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift 16 | ||
1914 | #define SDMA_PKT_CONSTANT_FILL_HEADER_SW(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_sw_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_sw_shift) | ||
1915 | |||
1916 | /*define for fillsize field*/ | ||
1917 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_offset 0 | ||
1918 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask 0x00000003 | ||
1919 | #define SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift 30 | ||
1920 | #define SDMA_PKT_CONSTANT_FILL_HEADER_FILLSIZE(x) (((x) & SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_mask) << SDMA_PKT_CONSTANT_FILL_HEADER_fillsize_shift) | ||
1921 | |||
1922 | /*define for DST_ADDR_LO word*/ | ||
1923 | /*define for dst_addr_31_0 field*/ | ||
1924 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_offset 1 | ||
1925 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask 0xFFFFFFFF | ||
1926 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift 0 | ||
1927 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_DST_ADDR_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_LO_dst_addr_31_0_shift) | ||
1928 | |||
1929 | /*define for DST_ADDR_HI word*/ | ||
1930 | /*define for dst_addr_63_32 field*/ | ||
1931 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_offset 2 | ||
1932 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask 0xFFFFFFFF | ||
1933 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift 0 | ||
1934 | #define SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_DST_ADDR_63_32(x) (((x) & SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_mask) << SDMA_PKT_CONSTANT_FILL_DST_ADDR_HI_dst_addr_63_32_shift) | ||
1935 | |||
1936 | /*define for DATA word*/ | ||
1937 | /*define for src_data_31_0 field*/ | ||
1938 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_offset 3 | ||
1939 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask 0xFFFFFFFF | ||
1940 | #define SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift 0 | ||
1941 | #define SDMA_PKT_CONSTANT_FILL_DATA_SRC_DATA_31_0(x) (((x) & SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_mask) << SDMA_PKT_CONSTANT_FILL_DATA_src_data_31_0_shift) | ||
1942 | |||
1943 | /*define for COUNT word*/ | ||
1944 | /*define for count field*/ | ||
1945 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_offset 4 | ||
1946 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_mask 0x003FFFFF | ||
1947 | #define SDMA_PKT_CONSTANT_FILL_COUNT_count_shift 0 | ||
1948 | #define SDMA_PKT_CONSTANT_FILL_COUNT_COUNT(x) (((x) & SDMA_PKT_CONSTANT_FILL_COUNT_count_mask) << SDMA_PKT_CONSTANT_FILL_COUNT_count_shift) | ||
1949 | |||
1950 | |||
1951 | /* | ||
1952 | ** Definitions for SDMA_PKT_POLL_REGMEM packet | ||
1953 | */ | ||
1954 | |||
1955 | /*define for HEADER word*/ | ||
1956 | /*define for op field*/ | ||
1957 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_offset 0 | ||
1958 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_mask 0x000000FF | ||
1959 | #define SDMA_PKT_POLL_REGMEM_HEADER_op_shift 0 | ||
1960 | #define SDMA_PKT_POLL_REGMEM_HEADER_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_op_shift) | ||
1961 | |||
1962 | /*define for sub_op field*/ | ||
1963 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_offset 0 | ||
1964 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask 0x000000FF | ||
1965 | #define SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift 8 | ||
1966 | #define SDMA_PKT_POLL_REGMEM_HEADER_SUB_OP(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_sub_op_mask) << SDMA_PKT_POLL_REGMEM_HEADER_sub_op_shift) | ||
1967 | |||
1968 | /*define for hdp_flush field*/ | ||
1969 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_offset 0 | ||
1970 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask 0x00000001 | ||
1971 | #define SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift 26 | ||
1972 | #define SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_mask) << SDMA_PKT_POLL_REGMEM_HEADER_hdp_flush_shift) | ||
1973 | |||
1974 | /*define for func field*/ | ||
1975 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_offset 0 | ||
1976 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_mask 0x00000007 | ||
1977 | #define SDMA_PKT_POLL_REGMEM_HEADER_func_shift 28 | ||
1978 | #define SDMA_PKT_POLL_REGMEM_HEADER_FUNC(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_func_mask) << SDMA_PKT_POLL_REGMEM_HEADER_func_shift) | ||
1979 | |||
1980 | /*define for mem_poll field*/ | ||
1981 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_offset 0 | ||
1982 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask 0x00000001 | ||
1983 | #define SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift 31 | ||
1984 | #define SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(x) (((x) & SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_mask) << SDMA_PKT_POLL_REGMEM_HEADER_mem_poll_shift) | ||
1985 | |||
1986 | /*define for ADDR_LO word*/ | ||
1987 | /*define for addr_31_0 field*/ | ||
1988 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_offset 1 | ||
1989 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
1990 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift 0 | ||
1991 | #define SDMA_PKT_POLL_REGMEM_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_mask) << SDMA_PKT_POLL_REGMEM_ADDR_LO_addr_31_0_shift) | ||
1992 | |||
1993 | /*define for ADDR_HI word*/ | ||
1994 | /*define for addr_63_32 field*/ | ||
1995 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_offset 2 | ||
1996 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
1997 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift 0 | ||
1998 | #define SDMA_PKT_POLL_REGMEM_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_mask) << SDMA_PKT_POLL_REGMEM_ADDR_HI_addr_63_32_shift) | ||
1999 | |||
2000 | /*define for VALUE word*/ | ||
2001 | /*define for value field*/ | ||
2002 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_offset 3 | ||
2003 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_mask 0xFFFFFFFF | ||
2004 | #define SDMA_PKT_POLL_REGMEM_VALUE_value_shift 0 | ||
2005 | #define SDMA_PKT_POLL_REGMEM_VALUE_VALUE(x) (((x) & SDMA_PKT_POLL_REGMEM_VALUE_value_mask) << SDMA_PKT_POLL_REGMEM_VALUE_value_shift) | ||
2006 | |||
2007 | /*define for MASK word*/ | ||
2008 | /*define for mask field*/ | ||
2009 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_offset 4 | ||
2010 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_mask 0xFFFFFFFF | ||
2011 | #define SDMA_PKT_POLL_REGMEM_MASK_mask_shift 0 | ||
2012 | #define SDMA_PKT_POLL_REGMEM_MASK_MASK(x) (((x) & SDMA_PKT_POLL_REGMEM_MASK_mask_mask) << SDMA_PKT_POLL_REGMEM_MASK_mask_shift) | ||
2013 | |||
2014 | /*define for DW5 word*/ | ||
2015 | /*define for interval field*/ | ||
2016 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_offset 5 | ||
2017 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_mask 0x0000FFFF | ||
2018 | #define SDMA_PKT_POLL_REGMEM_DW5_interval_shift 0 | ||
2019 | #define SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_interval_mask) << SDMA_PKT_POLL_REGMEM_DW5_interval_shift) | ||
2020 | |||
2021 | /*define for retry_count field*/ | ||
2022 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_offset 5 | ||
2023 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask 0x00000FFF | ||
2024 | #define SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift 16 | ||
2025 | #define SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(x) (((x) & SDMA_PKT_POLL_REGMEM_DW5_retry_count_mask) << SDMA_PKT_POLL_REGMEM_DW5_retry_count_shift) | ||
2026 | |||
2027 | |||
2028 | /* | ||
2029 | ** Definitions for SDMA_PKT_ATOMIC packet | ||
2030 | */ | ||
2031 | |||
2032 | /*define for HEADER word*/ | ||
2033 | /*define for op field*/ | ||
2034 | #define SDMA_PKT_ATOMIC_HEADER_op_offset 0 | ||
2035 | #define SDMA_PKT_ATOMIC_HEADER_op_mask 0x000000FF | ||
2036 | #define SDMA_PKT_ATOMIC_HEADER_op_shift 0 | ||
2037 | #define SDMA_PKT_ATOMIC_HEADER_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_op_mask) << SDMA_PKT_ATOMIC_HEADER_op_shift) | ||
2038 | |||
2039 | /*define for loop field*/ | ||
2040 | #define SDMA_PKT_ATOMIC_HEADER_loop_offset 0 | ||
2041 | #define SDMA_PKT_ATOMIC_HEADER_loop_mask 0x00000001 | ||
2042 | #define SDMA_PKT_ATOMIC_HEADER_loop_shift 16 | ||
2043 | #define SDMA_PKT_ATOMIC_HEADER_LOOP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_loop_mask) << SDMA_PKT_ATOMIC_HEADER_loop_shift) | ||
2044 | |||
2045 | /*define for atomic_op field*/ | ||
2046 | #define SDMA_PKT_ATOMIC_HEADER_atomic_op_offset 0 | ||
2047 | #define SDMA_PKT_ATOMIC_HEADER_atomic_op_mask 0x0000007F | ||
2048 | #define SDMA_PKT_ATOMIC_HEADER_atomic_op_shift 25 | ||
2049 | #define SDMA_PKT_ATOMIC_HEADER_ATOMIC_OP(x) (((x) & SDMA_PKT_ATOMIC_HEADER_atomic_op_mask) << SDMA_PKT_ATOMIC_HEADER_atomic_op_shift) | ||
2050 | |||
2051 | /*define for ADDR_LO word*/ | ||
2052 | /*define for addr_31_0 field*/ | ||
2053 | #define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_offset 1 | ||
2054 | #define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask 0xFFFFFFFF | ||
2055 | #define SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift 0 | ||
2056 | #define SDMA_PKT_ATOMIC_ADDR_LO_ADDR_31_0(x) (((x) & SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_mask) << SDMA_PKT_ATOMIC_ADDR_LO_addr_31_0_shift) | ||
2057 | |||
2058 | /*define for ADDR_HI word*/ | ||
2059 | /*define for addr_63_32 field*/ | ||
2060 | #define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_offset 2 | ||
2061 | #define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask 0xFFFFFFFF | ||
2062 | #define SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift 0 | ||
2063 | #define SDMA_PKT_ATOMIC_ADDR_HI_ADDR_63_32(x) (((x) & SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_mask) << SDMA_PKT_ATOMIC_ADDR_HI_addr_63_32_shift) | ||
2064 | |||
2065 | /*define for SRC_DATA_LO word*/ | ||
2066 | /*define for src_data_31_0 field*/ | ||
2067 | #define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_offset 3 | ||
2068 | #define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask 0xFFFFFFFF | ||
2069 | #define SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift 0 | ||
2070 | #define SDMA_PKT_ATOMIC_SRC_DATA_LO_SRC_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_mask) << SDMA_PKT_ATOMIC_SRC_DATA_LO_src_data_31_0_shift) | ||
2071 | |||
2072 | /*define for SRC_DATA_HI word*/ | ||
2073 | /*define for src_data_63_32 field*/ | ||
2074 | #define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_offset 4 | ||
2075 | #define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask 0xFFFFFFFF | ||
2076 | #define SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift 0 | ||
2077 | #define SDMA_PKT_ATOMIC_SRC_DATA_HI_SRC_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_mask) << SDMA_PKT_ATOMIC_SRC_DATA_HI_src_data_63_32_shift) | ||
2078 | |||
2079 | /*define for CMP_DATA_LO word*/ | ||
2080 | /*define for cmp_data_31_0 field*/ | ||
2081 | #define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_offset 5 | ||
2082 | #define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask 0xFFFFFFFF | ||
2083 | #define SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift 0 | ||
2084 | #define SDMA_PKT_ATOMIC_CMP_DATA_LO_CMP_DATA_31_0(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_mask) << SDMA_PKT_ATOMIC_CMP_DATA_LO_cmp_data_31_0_shift) | ||
2085 | |||
2086 | /*define for CMP_DATA_HI word*/ | ||
2087 | /*define for cmp_data_63_32 field*/ | ||
2088 | #define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_offset 6 | ||
2089 | #define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask 0xFFFFFFFF | ||
2090 | #define SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift 0 | ||
2091 | #define SDMA_PKT_ATOMIC_CMP_DATA_HI_CMP_DATA_63_32(x) (((x) & SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_mask) << SDMA_PKT_ATOMIC_CMP_DATA_HI_cmp_data_63_32_shift) | ||
2092 | |||
2093 | /*define for LOOP_INTERVAL word*/ | ||
2094 | /*define for loop_interval field*/ | ||
2095 | #define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_offset 7 | ||
2096 | #define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask 0x00001FFF | ||
2097 | #define SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift 0 | ||
2098 | #define SDMA_PKT_ATOMIC_LOOP_INTERVAL_LOOP_INTERVAL(x) (((x) & SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_mask) << SDMA_PKT_ATOMIC_LOOP_INTERVAL_loop_interval_shift) | ||
2099 | |||
2100 | |||
2101 | /* | ||
2102 | ** Definitions for SDMA_PKT_TIMESTAMP_SET packet | ||
2103 | */ | ||
2104 | |||
2105 | /*define for HEADER word*/ | ||
2106 | /*define for op field*/ | ||
2107 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_offset 0 | ||
2108 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask 0x000000FF | ||
2109 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift 0 | ||
2110 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_op_shift) | ||
2111 | |||
2112 | /*define for sub_op field*/ | ||
2113 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_offset 0 | ||
2114 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask 0x000000FF | ||
2115 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift 8 | ||
2116 | #define SDMA_PKT_TIMESTAMP_SET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_SET_HEADER_sub_op_shift) | ||
2117 | |||
2118 | /*define for INIT_DATA_LO word*/ | ||
2119 | /*define for init_data_31_0 field*/ | ||
2120 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_offset 1 | ||
2121 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask 0xFFFFFFFF | ||
2122 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift 0 | ||
2123 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_INIT_DATA_31_0(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_LO_init_data_31_0_shift) | ||
2124 | |||
2125 | /*define for INIT_DATA_HI word*/ | ||
2126 | /*define for init_data_63_32 field*/ | ||
2127 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_offset 2 | ||
2128 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask 0xFFFFFFFF | ||
2129 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift 0 | ||
2130 | #define SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_INIT_DATA_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_mask) << SDMA_PKT_TIMESTAMP_SET_INIT_DATA_HI_init_data_63_32_shift) | ||
2131 | |||
2132 | |||
2133 | /* | ||
2134 | ** Definitions for SDMA_PKT_TIMESTAMP_GET packet | ||
2135 | */ | ||
2136 | |||
2137 | /*define for HEADER word*/ | ||
2138 | /*define for op field*/ | ||
2139 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_offset 0 | ||
2140 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask 0x000000FF | ||
2141 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift 0 | ||
2142 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_op_shift) | ||
2143 | |||
2144 | /*define for sub_op field*/ | ||
2145 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_offset 0 | ||
2146 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask 0x000000FF | ||
2147 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift 8 | ||
2148 | #define SDMA_PKT_TIMESTAMP_GET_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_HEADER_sub_op_shift) | ||
2149 | |||
2150 | /*define for WRITE_ADDR_LO word*/ | ||
2151 | /*define for write_addr_31_3 field*/ | ||
2152 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_offset 1 | ||
2153 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF | ||
2154 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift 3 | ||
2155 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_LO_write_addr_31_3_shift) | ||
2156 | |||
2157 | /*define for WRITE_ADDR_HI word*/ | ||
2158 | /*define for write_addr_63_32 field*/ | ||
2159 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_offset 2 | ||
2160 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF | ||
2161 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift 0 | ||
2162 | #define SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_WRITE_ADDR_HI_write_addr_63_32_shift) | ||
2163 | |||
2164 | |||
2165 | /* | ||
2166 | ** Definitions for SDMA_PKT_TIMESTAMP_GET_GLOBAL packet | ||
2167 | */ | ||
2168 | |||
2169 | /*define for HEADER word*/ | ||
2170 | /*define for op field*/ | ||
2171 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_offset 0 | ||
2172 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask 0x000000FF | ||
2173 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift 0 | ||
2174 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_op_shift) | ||
2175 | |||
2176 | /*define for sub_op field*/ | ||
2177 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_offset 0 | ||
2178 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask 0x000000FF | ||
2179 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift 8 | ||
2180 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_HEADER_sub_op_shift) | ||
2181 | |||
2182 | /*define for WRITE_ADDR_LO word*/ | ||
2183 | /*define for write_addr_31_3 field*/ | ||
2184 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_offset 1 | ||
2185 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask 0x1FFFFFFF | ||
2186 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift 3 | ||
2187 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_WRITE_ADDR_31_3(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_LO_write_addr_31_3_shift) | ||
2188 | |||
2189 | /*define for WRITE_ADDR_HI word*/ | ||
2190 | /*define for write_addr_63_32 field*/ | ||
2191 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_offset 2 | ||
2192 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask 0xFFFFFFFF | ||
2193 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift 0 | ||
2194 | #define SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_WRITE_ADDR_63_32(x) (((x) & SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_mask) << SDMA_PKT_TIMESTAMP_GET_GLOBAL_WRITE_ADDR_HI_write_addr_63_32_shift) | ||
2195 | |||
2196 | |||
2197 | /* | ||
2198 | ** Definitions for SDMA_PKT_TRAP packet | ||
2199 | */ | ||
2200 | |||
2201 | /*define for HEADER word*/ | ||
2202 | /*define for op field*/ | ||
2203 | #define SDMA_PKT_TRAP_HEADER_op_offset 0 | ||
2204 | #define SDMA_PKT_TRAP_HEADER_op_mask 0x000000FF | ||
2205 | #define SDMA_PKT_TRAP_HEADER_op_shift 0 | ||
2206 | #define SDMA_PKT_TRAP_HEADER_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_op_mask) << SDMA_PKT_TRAP_HEADER_op_shift) | ||
2207 | |||
2208 | /*define for sub_op field*/ | ||
2209 | #define SDMA_PKT_TRAP_HEADER_sub_op_offset 0 | ||
2210 | #define SDMA_PKT_TRAP_HEADER_sub_op_mask 0x000000FF | ||
2211 | #define SDMA_PKT_TRAP_HEADER_sub_op_shift 8 | ||
2212 | #define SDMA_PKT_TRAP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_TRAP_HEADER_sub_op_mask) << SDMA_PKT_TRAP_HEADER_sub_op_shift) | ||
2213 | |||
2214 | /*define for INT_CONTEXT word*/ | ||
2215 | /*define for int_context field*/ | ||
2216 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_offset 1 | ||
2217 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask 0x0FFFFFFF | ||
2218 | #define SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift 0 | ||
2219 | #define SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(x) (((x) & SDMA_PKT_TRAP_INT_CONTEXT_int_context_mask) << SDMA_PKT_TRAP_INT_CONTEXT_int_context_shift) | ||
2220 | |||
2221 | |||
2222 | /* | ||
2223 | ** Definitions for SDMA_PKT_NOP packet | ||
2224 | */ | ||
2225 | |||
2226 | /*define for HEADER word*/ | ||
2227 | /*define for op field*/ | ||
2228 | #define SDMA_PKT_NOP_HEADER_op_offset 0 | ||
2229 | #define SDMA_PKT_NOP_HEADER_op_mask 0x000000FF | ||
2230 | #define SDMA_PKT_NOP_HEADER_op_shift 0 | ||
2231 | #define SDMA_PKT_NOP_HEADER_OP(x) (((x) & SDMA_PKT_NOP_HEADER_op_mask) << SDMA_PKT_NOP_HEADER_op_shift) | ||
2232 | |||
2233 | /*define for sub_op field*/ | ||
2234 | #define SDMA_PKT_NOP_HEADER_sub_op_offset 0 | ||
2235 | #define SDMA_PKT_NOP_HEADER_sub_op_mask 0x000000FF | ||
2236 | #define SDMA_PKT_NOP_HEADER_sub_op_shift 8 | ||
2237 | #define SDMA_PKT_NOP_HEADER_SUB_OP(x) (((x) & SDMA_PKT_NOP_HEADER_sub_op_mask) << SDMA_PKT_NOP_HEADER_sub_op_shift) | ||
2238 | |||
2239 | |||
2240 | #endif /* __TONGA_SDMA_PKT_OPEN_H_ */ | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c new file mode 100644 index 000000000000..5fc53a40c7ac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c | |||
@@ -0,0 +1,852 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/firmware.h> | ||
25 | #include "drmP.h" | ||
26 | #include "amdgpu.h" | ||
27 | #include "tonga_ppsmc.h" | ||
28 | #include "tonga_smumgr.h" | ||
29 | #include "smu_ucode_xfer_vi.h" | ||
30 | #include "amdgpu_ucode.h" | ||
31 | |||
32 | #include "smu/smu_7_1_2_d.h" | ||
33 | #include "smu/smu_7_1_2_sh_mask.h" | ||
34 | |||
35 | #define TONGA_SMC_SIZE 0x20000 | ||
36 | |||
37 | static int tonga_set_smc_sram_address(struct amdgpu_device *adev, uint32_t smc_address, uint32_t limit) | ||
38 | { | ||
39 | uint32_t val; | ||
40 | |||
41 | if (smc_address & 3) | ||
42 | return -EINVAL; | ||
43 | |||
44 | if ((smc_address + 3) > limit) | ||
45 | return -EINVAL; | ||
46 | |||
47 | WREG32(mmSMC_IND_INDEX_0, smc_address); | ||
48 | |||
49 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
50 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
51 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int tonga_copy_bytes_to_smc(struct amdgpu_device *adev, uint32_t smc_start_address, const uint8_t *src, uint32_t byte_count, uint32_t limit) | ||
57 | { | ||
58 | uint32_t addr; | ||
59 | uint32_t data, orig_data; | ||
60 | int result = 0; | ||
61 | uint32_t extra_shift; | ||
62 | unsigned long flags; | ||
63 | |||
64 | if (smc_start_address & 3) | ||
65 | return -EINVAL; | ||
66 | |||
67 | if ((smc_start_address + byte_count) > limit) | ||
68 | return -EINVAL; | ||
69 | |||
70 | addr = smc_start_address; | ||
71 | |||
72 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
73 | while (byte_count >= 4) { | ||
74 | /* Bytes are written into the SMC addres space with the MSB first */ | ||
75 | data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; | ||
76 | |||
77 | result = tonga_set_smc_sram_address(adev, addr, limit); | ||
78 | |||
79 | if (result) | ||
80 | goto out; | ||
81 | |||
82 | WREG32(mmSMC_IND_DATA_0, data); | ||
83 | |||
84 | src += 4; | ||
85 | byte_count -= 4; | ||
86 | addr += 4; | ||
87 | } | ||
88 | |||
89 | if (0 != byte_count) { | ||
90 | /* Now write odd bytes left, do a read modify write cycle */ | ||
91 | data = 0; | ||
92 | |||
93 | result = tonga_set_smc_sram_address(adev, addr, limit); | ||
94 | if (result) | ||
95 | goto out; | ||
96 | |||
97 | orig_data = RREG32(mmSMC_IND_DATA_0); | ||
98 | extra_shift = 8 * (4 - byte_count); | ||
99 | |||
100 | while (byte_count > 0) { | ||
101 | data = (data << 8) + *src++; | ||
102 | byte_count--; | ||
103 | } | ||
104 | |||
105 | data <<= extra_shift; | ||
106 | data |= (orig_data & ~((~0UL) << extra_shift)); | ||
107 | |||
108 | result = tonga_set_smc_sram_address(adev, addr, limit); | ||
109 | if (result) | ||
110 | goto out; | ||
111 | |||
112 | WREG32(mmSMC_IND_DATA_0, data); | ||
113 | } | ||
114 | |||
115 | out: | ||
116 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
117 | return result; | ||
118 | } | ||
119 | |||
120 | static int tonga_program_jump_on_start(struct amdgpu_device *adev) | ||
121 | { | ||
122 | static unsigned char data[] = {0xE0, 0x00, 0x80, 0x40}; | ||
123 | tonga_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static bool tonga_is_smc_ram_running(struct amdgpu_device *adev) | ||
129 | { | ||
130 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
131 | val = REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable); | ||
132 | |||
133 | return ((0 == val) && (0x20100 <= RREG32_SMC(ixSMC_PC_C))); | ||
134 | } | ||
135 | |||
136 | static int wait_smu_response(struct amdgpu_device *adev) | ||
137 | { | ||
138 | int i; | ||
139 | uint32_t val; | ||
140 | |||
141 | for (i = 0; i < adev->usec_timeout; i++) { | ||
142 | val = RREG32(mmSMC_RESP_0); | ||
143 | if (REG_GET_FIELD(val, SMC_RESP_0, SMC_RESP)) | ||
144 | break; | ||
145 | udelay(1); | ||
146 | } | ||
147 | |||
148 | if (i == adev->usec_timeout) | ||
149 | return -EINVAL; | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int tonga_send_msg_to_smc_offset(struct amdgpu_device *adev) | ||
155 | { | ||
156 | if (wait_smu_response(adev)) { | ||
157 | DRM_ERROR("Failed to send previous message\n"); | ||
158 | return -EINVAL; | ||
159 | } | ||
160 | |||
161 | WREG32(mmSMC_MSG_ARG_0, 0x20000); | ||
162 | WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_Test); | ||
163 | |||
164 | if (wait_smu_response(adev)) { | ||
165 | DRM_ERROR("Failed to send message\n"); | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) | ||
173 | { | ||
174 | if (!tonga_is_smc_ram_running(adev)) | ||
175 | { | ||
176 | return -EINVAL;; | ||
177 | } | ||
178 | |||
179 | if (wait_smu_response(adev)) { | ||
180 | DRM_ERROR("Failed to send previous message\n"); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | |||
184 | WREG32(mmSMC_MESSAGE_0, msg); | ||
185 | |||
186 | if (wait_smu_response(adev)) { | ||
187 | DRM_ERROR("Failed to send message\n"); | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static int tonga_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, | ||
195 | PPSMC_Msg msg) | ||
196 | { | ||
197 | if (wait_smu_response(adev)) { | ||
198 | DRM_ERROR("Failed to send previous message\n"); | ||
199 | return -EINVAL; | ||
200 | } | ||
201 | |||
202 | WREG32(mmSMC_MESSAGE_0, msg); | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | static int tonga_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, | ||
208 | PPSMC_Msg msg, | ||
209 | uint32_t parameter) | ||
210 | { | ||
211 | if (!tonga_is_smc_ram_running(adev)) | ||
212 | return -EINVAL; | ||
213 | |||
214 | if (wait_smu_response(adev)) { | ||
215 | DRM_ERROR("Failed to send previous message\n"); | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | |||
219 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
220 | |||
221 | return tonga_send_msg_to_smc(adev, msg); | ||
222 | } | ||
223 | |||
224 | static int tonga_send_msg_to_smc_with_parameter_without_waiting( | ||
225 | struct amdgpu_device *adev, | ||
226 | PPSMC_Msg msg, uint32_t parameter) | ||
227 | { | ||
228 | if (wait_smu_response(adev)) { | ||
229 | DRM_ERROR("Failed to send previous message\n"); | ||
230 | return -EINVAL; | ||
231 | } | ||
232 | |||
233 | WREG32(mmSMC_MSG_ARG_0, parameter); | ||
234 | |||
235 | return tonga_send_msg_to_smc_without_waiting(adev, msg); | ||
236 | } | ||
237 | |||
238 | #if 0 /* not used yet */ | ||
239 | static int tonga_wait_for_smc_inactive(struct amdgpu_device *adev) | ||
240 | { | ||
241 | int i; | ||
242 | uint32_t val; | ||
243 | |||
244 | if (!tonga_is_smc_ram_running(adev)) | ||
245 | return -EINVAL; | ||
246 | |||
247 | for (i = 0; i < adev->usec_timeout; i++) { | ||
248 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
249 | if (REG_GET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, cken) == 0) | ||
250 | break; | ||
251 | udelay(1); | ||
252 | } | ||
253 | |||
254 | if (i == adev->usec_timeout) | ||
255 | return -EINVAL; | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | #endif | ||
260 | |||
261 | static int tonga_smu_upload_firmware_image(struct amdgpu_device *adev) | ||
262 | { | ||
263 | const struct smc_firmware_header_v1_0 *hdr; | ||
264 | uint32_t ucode_size; | ||
265 | uint32_t ucode_start_address; | ||
266 | const uint8_t *src; | ||
267 | uint32_t val; | ||
268 | uint32_t byte_count; | ||
269 | uint32_t *data; | ||
270 | unsigned long flags; | ||
271 | |||
272 | if (!adev->pm.fw) | ||
273 | return -EINVAL; | ||
274 | |||
275 | hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; | ||
276 | amdgpu_ucode_print_smc_hdr(&hdr->header); | ||
277 | |||
278 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
279 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | ||
280 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | ||
281 | src = (const uint8_t *) | ||
282 | (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
283 | |||
284 | if (ucode_size & 3) { | ||
285 | DRM_ERROR("SMC ucode is not 4 bytes aligned\n"); | ||
286 | return -EINVAL; | ||
287 | } | ||
288 | |||
289 | if (ucode_size > TONGA_SMC_SIZE) { | ||
290 | DRM_ERROR("SMC address is beyond the SMC RAM area\n"); | ||
291 | return -EINVAL; | ||
292 | } | ||
293 | |||
294 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
295 | WREG32(mmSMC_IND_INDEX_0, ucode_start_address); | ||
296 | |||
297 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
298 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1); | ||
299 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
300 | |||
301 | byte_count = ucode_size; | ||
302 | data = (uint32_t *)src; | ||
303 | for (; byte_count >= 4; data++, byte_count -= 4) | ||
304 | WREG32(mmSMC_IND_DATA_0, data[0]); | ||
305 | |||
306 | val = RREG32(mmSMC_IND_ACCESS_CNTL); | ||
307 | val = REG_SET_FIELD(val, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0); | ||
308 | WREG32(mmSMC_IND_ACCESS_CNTL, val); | ||
309 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | #if 0 /* not used yet */ | ||
315 | static int tonga_read_smc_sram_dword(struct amdgpu_device *adev, | ||
316 | uint32_t smc_address, | ||
317 | uint32_t *value, | ||
318 | uint32_t limit) | ||
319 | { | ||
320 | int result; | ||
321 | unsigned long flags; | ||
322 | |||
323 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
324 | result = tonga_set_smc_sram_address(adev, smc_address, limit); | ||
325 | if (result == 0) | ||
326 | *value = RREG32(mmSMC_IND_DATA_0); | ||
327 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
328 | return result; | ||
329 | } | ||
330 | |||
331 | static int tonga_write_smc_sram_dword(struct amdgpu_device *adev, | ||
332 | uint32_t smc_address, | ||
333 | uint32_t value, | ||
334 | uint32_t limit) | ||
335 | { | ||
336 | int result; | ||
337 | unsigned long flags; | ||
338 | |||
339 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
340 | result = tonga_set_smc_sram_address(adev, smc_address, limit); | ||
341 | if (result == 0) | ||
342 | WREG32(mmSMC_IND_DATA_0, value); | ||
343 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
344 | return result; | ||
345 | } | ||
346 | |||
347 | static int tonga_smu_stop_smc(struct amdgpu_device *adev) | ||
348 | { | ||
349 | uint32_t val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
350 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); | ||
351 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
352 | |||
353 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
354 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1); | ||
355 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | #endif | ||
360 | |||
361 | static enum AMDGPU_UCODE_ID tonga_convert_fw_type(uint32_t fw_type) | ||
362 | { | ||
363 | switch (fw_type) { | ||
364 | case UCODE_ID_SDMA0: | ||
365 | return AMDGPU_UCODE_ID_SDMA0; | ||
366 | case UCODE_ID_SDMA1: | ||
367 | return AMDGPU_UCODE_ID_SDMA1; | ||
368 | case UCODE_ID_CP_CE: | ||
369 | return AMDGPU_UCODE_ID_CP_CE; | ||
370 | case UCODE_ID_CP_PFP: | ||
371 | return AMDGPU_UCODE_ID_CP_PFP; | ||
372 | case UCODE_ID_CP_ME: | ||
373 | return AMDGPU_UCODE_ID_CP_ME; | ||
374 | case UCODE_ID_CP_MEC: | ||
375 | case UCODE_ID_CP_MEC_JT1: | ||
376 | return AMDGPU_UCODE_ID_CP_MEC1; | ||
377 | case UCODE_ID_CP_MEC_JT2: | ||
378 | return AMDGPU_UCODE_ID_CP_MEC2; | ||
379 | case UCODE_ID_RLC_G: | ||
380 | return AMDGPU_UCODE_ID_RLC_G; | ||
381 | default: | ||
382 | DRM_ERROR("ucode type is out of range!\n"); | ||
383 | return AMDGPU_UCODE_ID_MAXIMUM; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | static int tonga_smu_populate_single_firmware_entry(struct amdgpu_device *adev, | ||
388 | uint32_t fw_type, | ||
389 | struct SMU_Entry *entry) | ||
390 | { | ||
391 | enum AMDGPU_UCODE_ID id = tonga_convert_fw_type(fw_type); | ||
392 | struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id]; | ||
393 | const struct gfx_firmware_header_v1_0 *header = NULL; | ||
394 | uint64_t gpu_addr; | ||
395 | uint32_t data_size; | ||
396 | |||
397 | if (ucode->fw == NULL) | ||
398 | return -EINVAL; | ||
399 | |||
400 | gpu_addr = ucode->mc_addr; | ||
401 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | ||
402 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | ||
403 | |||
404 | if ((fw_type == UCODE_ID_CP_MEC_JT1) || | ||
405 | (fw_type == UCODE_ID_CP_MEC_JT2)) { | ||
406 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | ||
407 | data_size = le32_to_cpu(header->jt_size) << 2; | ||
408 | } | ||
409 | |||
410 | entry->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | ||
411 | entry->id = (uint16_t)fw_type; | ||
412 | entry->image_addr_high = upper_32_bits(gpu_addr); | ||
413 | entry->image_addr_low = lower_32_bits(gpu_addr); | ||
414 | entry->meta_data_addr_high = 0; | ||
415 | entry->meta_data_addr_low = 0; | ||
416 | entry->data_size_byte = data_size; | ||
417 | entry->num_register_entries = 0; | ||
418 | |||
419 | if (fw_type == UCODE_ID_RLC_G) | ||
420 | entry->flags = 1; | ||
421 | else | ||
422 | entry->flags = 0; | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static int tonga_smu_request_load_fw(struct amdgpu_device *adev) | ||
428 | { | ||
429 | struct tonga_smu_private_data *private = (struct tonga_smu_private_data *)adev->smu.priv; | ||
430 | struct SMU_DRAMData_TOC *toc; | ||
431 | uint32_t fw_to_load; | ||
432 | |||
433 | WREG32_SMC(ixSOFT_REGISTERS_TABLE_28, 0); | ||
434 | |||
435 | tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_HI, private->smu_buffer_addr_high); | ||
436 | tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SMU_DRAM_ADDR_LO, private->smu_buffer_addr_low); | ||
437 | |||
438 | toc = (struct SMU_DRAMData_TOC *)private->header; | ||
439 | toc->num_entries = 0; | ||
440 | toc->structure_version = 1; | ||
441 | |||
442 | if (!adev->firmware.smu_load) | ||
443 | return 0; | ||
444 | |||
445 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_RLC_G, | ||
446 | &toc->entry[toc->num_entries++])) { | ||
447 | DRM_ERROR("Failed to get firmware entry for RLC\n"); | ||
448 | return -EINVAL; | ||
449 | } | ||
450 | |||
451 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_CE, | ||
452 | &toc->entry[toc->num_entries++])) { | ||
453 | DRM_ERROR("Failed to get firmware entry for CE\n"); | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | |||
457 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_PFP, | ||
458 | &toc->entry[toc->num_entries++])) { | ||
459 | DRM_ERROR("Failed to get firmware entry for PFP\n"); | ||
460 | return -EINVAL; | ||
461 | } | ||
462 | |||
463 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_ME, | ||
464 | &toc->entry[toc->num_entries++])) { | ||
465 | DRM_ERROR("Failed to get firmware entry for ME\n"); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
469 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC, | ||
470 | &toc->entry[toc->num_entries++])) { | ||
471 | DRM_ERROR("Failed to get firmware entry for MEC\n"); | ||
472 | return -EINVAL; | ||
473 | } | ||
474 | |||
475 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT1, | ||
476 | &toc->entry[toc->num_entries++])) { | ||
477 | DRM_ERROR("Failed to get firmware entry for MEC_JT1\n"); | ||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, | ||
482 | &toc->entry[toc->num_entries++])) { | ||
483 | DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); | ||
484 | return -EINVAL; | ||
485 | } | ||
486 | |||
487 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, | ||
488 | &toc->entry[toc->num_entries++])) { | ||
489 | DRM_ERROR("Failed to get firmware entry for SDMA0\n"); | ||
490 | return -EINVAL; | ||
491 | } | ||
492 | |||
493 | if (tonga_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA1, | ||
494 | &toc->entry[toc->num_entries++])) { | ||
495 | DRM_ERROR("Failed to get firmware entry for SDMA1\n"); | ||
496 | return -EINVAL; | ||
497 | } | ||
498 | |||
499 | tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_HI, private->header_addr_high); | ||
500 | tonga_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DRV_DRAM_ADDR_LO, private->header_addr_low); | ||
501 | |||
502 | fw_to_load = UCODE_ID_RLC_G_MASK | | ||
503 | UCODE_ID_SDMA0_MASK | | ||
504 | UCODE_ID_SDMA1_MASK | | ||
505 | UCODE_ID_CP_CE_MASK | | ||
506 | UCODE_ID_CP_ME_MASK | | ||
507 | UCODE_ID_CP_PFP_MASK | | ||
508 | UCODE_ID_CP_MEC_MASK; | ||
509 | |||
510 | if (tonga_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { | ||
511 | DRM_ERROR("Fail to request SMU load ucode\n"); | ||
512 | return -EINVAL; | ||
513 | } | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static uint32_t tonga_smu_get_mask_for_fw_type(uint32_t fw_type) | ||
519 | { | ||
520 | switch (fw_type) { | ||
521 | case AMDGPU_UCODE_ID_SDMA0: | ||
522 | return UCODE_ID_SDMA0_MASK; | ||
523 | case AMDGPU_UCODE_ID_SDMA1: | ||
524 | return UCODE_ID_SDMA1_MASK; | ||
525 | case AMDGPU_UCODE_ID_CP_CE: | ||
526 | return UCODE_ID_CP_CE_MASK; | ||
527 | case AMDGPU_UCODE_ID_CP_PFP: | ||
528 | return UCODE_ID_CP_PFP_MASK; | ||
529 | case AMDGPU_UCODE_ID_CP_ME: | ||
530 | return UCODE_ID_CP_ME_MASK; | ||
531 | case AMDGPU_UCODE_ID_CP_MEC1: | ||
532 | return UCODE_ID_CP_MEC_MASK; | ||
533 | case AMDGPU_UCODE_ID_CP_MEC2: | ||
534 | return UCODE_ID_CP_MEC_MASK; | ||
535 | case AMDGPU_UCODE_ID_RLC_G: | ||
536 | return UCODE_ID_RLC_G_MASK; | ||
537 | default: | ||
538 | DRM_ERROR("ucode type is out of range!\n"); | ||
539 | return 0; | ||
540 | } | ||
541 | } | ||
542 | |||
543 | static int tonga_smu_check_fw_load_finish(struct amdgpu_device *adev, | ||
544 | uint32_t fw_type) | ||
545 | { | ||
546 | uint32_t fw_mask = tonga_smu_get_mask_for_fw_type(fw_type); | ||
547 | int i; | ||
548 | |||
549 | for (i = 0; i < adev->usec_timeout; i++) { | ||
550 | if (fw_mask == (RREG32_SMC(ixSOFT_REGISTERS_TABLE_28) & fw_mask)) | ||
551 | break; | ||
552 | udelay(1); | ||
553 | } | ||
554 | |||
555 | if (i == adev->usec_timeout) { | ||
556 | DRM_ERROR("check firmware loading failed\n"); | ||
557 | return -EINVAL; | ||
558 | } | ||
559 | |||
560 | return 0; | ||
561 | } | ||
562 | |||
563 | static int tonga_smu_start_in_protection_mode(struct amdgpu_device *adev) | ||
564 | { | ||
565 | int result; | ||
566 | uint32_t val; | ||
567 | int i; | ||
568 | |||
569 | /* Assert reset */ | ||
570 | val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
571 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); | ||
572 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
573 | |||
574 | result = tonga_smu_upload_firmware_image(adev); | ||
575 | if (result) | ||
576 | return result; | ||
577 | |||
578 | /* Clear status */ | ||
579 | WREG32_SMC(ixSMU_STATUS, 0); | ||
580 | |||
581 | /* Enable clock */ | ||
582 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
583 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); | ||
584 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
585 | |||
586 | /* De-assert reset */ | ||
587 | val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
588 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); | ||
589 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
590 | |||
591 | /* Set SMU Auto Start */ | ||
592 | val = RREG32_SMC(ixSMU_INPUT_DATA); | ||
593 | val = REG_SET_FIELD(val, SMU_INPUT_DATA, AUTO_START, 1); | ||
594 | WREG32_SMC(ixSMU_INPUT_DATA, val); | ||
595 | |||
596 | /* Clear firmware interrupt enable flag */ | ||
597 | WREG32_SMC(ixFIRMWARE_FLAGS, 0); | ||
598 | |||
599 | for (i = 0; i < adev->usec_timeout; i++) { | ||
600 | val = RREG32_SMC(ixRCU_UC_EVENTS); | ||
601 | if (REG_GET_FIELD(val, RCU_UC_EVENTS, INTERRUPTS_ENABLED)) | ||
602 | break; | ||
603 | udelay(1); | ||
604 | } | ||
605 | |||
606 | if (i == adev->usec_timeout) { | ||
607 | DRM_ERROR("Interrupt is not enabled by firmware\n"); | ||
608 | return -EINVAL; | ||
609 | } | ||
610 | |||
611 | /* Call Test SMU message with 0x20000 offset | ||
612 | * to trigger SMU start | ||
613 | */ | ||
614 | tonga_send_msg_to_smc_offset(adev); | ||
615 | |||
616 | /* Wait for done bit to be set */ | ||
617 | for (i = 0; i < adev->usec_timeout; i++) { | ||
618 | val = RREG32_SMC(ixSMU_STATUS); | ||
619 | if (REG_GET_FIELD(val, SMU_STATUS, SMU_DONE)) | ||
620 | break; | ||
621 | udelay(1); | ||
622 | } | ||
623 | |||
624 | if (i == adev->usec_timeout) { | ||
625 | DRM_ERROR("Timeout for SMU start\n"); | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | |||
629 | /* Check pass/failed indicator */ | ||
630 | val = RREG32_SMC(ixSMU_STATUS); | ||
631 | if (!REG_GET_FIELD(val, SMU_STATUS, SMU_PASS)) { | ||
632 | DRM_ERROR("SMU Firmware start failed\n"); | ||
633 | return -EINVAL; | ||
634 | } | ||
635 | |||
636 | /* Wait for firmware to initialize */ | ||
637 | for (i = 0; i < adev->usec_timeout; i++) { | ||
638 | val = RREG32_SMC(ixFIRMWARE_FLAGS); | ||
639 | if(REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) | ||
640 | break; | ||
641 | udelay(1); | ||
642 | } | ||
643 | |||
644 | if (i == adev->usec_timeout) { | ||
645 | DRM_ERROR("SMU firmware initialization failed\n"); | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static int tonga_smu_start_in_non_protection_mode(struct amdgpu_device *adev) | ||
653 | { | ||
654 | int i, result; | ||
655 | uint32_t val; | ||
656 | |||
657 | /* wait for smc boot up */ | ||
658 | for (i = 0; i < adev->usec_timeout; i++) { | ||
659 | val = RREG32_SMC(ixRCU_UC_EVENTS); | ||
660 | val = REG_GET_FIELD(val, RCU_UC_EVENTS, boot_seq_done); | ||
661 | if (val) | ||
662 | break; | ||
663 | udelay(1); | ||
664 | } | ||
665 | |||
666 | if (i == adev->usec_timeout) { | ||
667 | DRM_ERROR("SMC boot sequence is not completed\n"); | ||
668 | return -EINVAL; | ||
669 | } | ||
670 | |||
671 | /* Clear firmware interrupt enable flag */ | ||
672 | WREG32_SMC(ixFIRMWARE_FLAGS, 0); | ||
673 | |||
674 | /* Assert reset */ | ||
675 | val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
676 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 1); | ||
677 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
678 | |||
679 | result = tonga_smu_upload_firmware_image(adev); | ||
680 | if (result) | ||
681 | return result; | ||
682 | |||
683 | /* Set smc instruct start point at 0x0 */ | ||
684 | tonga_program_jump_on_start(adev); | ||
685 | |||
686 | /* Enable clock */ | ||
687 | val = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); | ||
688 | val = REG_SET_FIELD(val, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); | ||
689 | WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, val); | ||
690 | |||
691 | /* De-assert reset */ | ||
692 | val = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL); | ||
693 | val = REG_SET_FIELD(val, SMC_SYSCON_RESET_CNTL, rst_reg, 0); | ||
694 | WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, val); | ||
695 | |||
696 | /* Wait for firmware to initialize */ | ||
697 | for (i = 0; i < adev->usec_timeout; i++) { | ||
698 | val = RREG32_SMC(ixFIRMWARE_FLAGS); | ||
699 | if (REG_GET_FIELD(val, FIRMWARE_FLAGS, INTERRUPTS_ENABLED)) | ||
700 | break; | ||
701 | udelay(1); | ||
702 | } | ||
703 | |||
704 | if (i == adev->usec_timeout) { | ||
705 | DRM_ERROR("Timeout for SMC firmware initialization\n"); | ||
706 | return -EINVAL; | ||
707 | } | ||
708 | |||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | int tonga_smu_start(struct amdgpu_device *adev) | ||
713 | { | ||
714 | int result; | ||
715 | uint32_t val; | ||
716 | |||
717 | if (!tonga_is_smc_ram_running(adev)) { | ||
718 | val = RREG32_SMC(ixSMU_FIRMWARE); | ||
719 | if (!REG_GET_FIELD(val, SMU_FIRMWARE, SMU_MODE)) { | ||
720 | result = tonga_smu_start_in_non_protection_mode(adev); | ||
721 | if (result) | ||
722 | return result; | ||
723 | } else { | ||
724 | result = tonga_smu_start_in_protection_mode(adev); | ||
725 | if (result) | ||
726 | return result; | ||
727 | } | ||
728 | } | ||
729 | |||
730 | return tonga_smu_request_load_fw(adev); | ||
731 | } | ||
732 | |||
733 | static const struct amdgpu_smumgr_funcs tonga_smumgr_funcs = { | ||
734 | .check_fw_load_finish = tonga_smu_check_fw_load_finish, | ||
735 | .request_smu_load_fw = NULL, | ||
736 | .request_smu_specific_fw = NULL, | ||
737 | }; | ||
738 | |||
739 | int tonga_smu_init(struct amdgpu_device *adev) | ||
740 | { | ||
741 | struct tonga_smu_private_data *private; | ||
742 | uint32_t image_size = ((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096; | ||
743 | uint32_t smu_internal_buffer_size = 200*4096; | ||
744 | struct amdgpu_bo **toc_buf = &adev->smu.toc_buf; | ||
745 | struct amdgpu_bo **smu_buf = &adev->smu.smu_buf; | ||
746 | uint64_t mc_addr; | ||
747 | void *toc_buf_ptr; | ||
748 | void *smu_buf_ptr; | ||
749 | int ret; | ||
750 | |||
751 | private = kzalloc(sizeof(struct tonga_smu_private_data), GFP_KERNEL); | ||
752 | if (NULL == private) | ||
753 | return -ENOMEM; | ||
754 | |||
755 | /* allocate firmware buffers */ | ||
756 | if (adev->firmware.smu_load) | ||
757 | amdgpu_ucode_init_bo(adev); | ||
758 | |||
759 | adev->smu.priv = private; | ||
760 | adev->smu.fw_flags = 0; | ||
761 | |||
762 | /* Allocate FW image data structure and header buffer */ | ||
763 | ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE, | ||
764 | true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, toc_buf); | ||
765 | if (ret) { | ||
766 | DRM_ERROR("Failed to allocate memory for TOC buffer\n"); | ||
767 | return -ENOMEM; | ||
768 | } | ||
769 | |||
770 | /* Allocate buffer for SMU internal buffer */ | ||
771 | ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE, | ||
772 | true, AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, smu_buf); | ||
773 | if (ret) { | ||
774 | DRM_ERROR("Failed to allocate memory for SMU internal buffer\n"); | ||
775 | return -ENOMEM; | ||
776 | } | ||
777 | |||
778 | /* Retrieve GPU address for header buffer and internal buffer */ | ||
779 | ret = amdgpu_bo_reserve(adev->smu.toc_buf, false); | ||
780 | if (ret) { | ||
781 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
782 | DRM_ERROR("Failed to reserve the TOC buffer\n"); | ||
783 | return -EINVAL; | ||
784 | } | ||
785 | |||
786 | ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); | ||
787 | if (ret) { | ||
788 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
789 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
790 | DRM_ERROR("Failed to pin the TOC buffer\n"); | ||
791 | return -EINVAL; | ||
792 | } | ||
793 | |||
794 | ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr); | ||
795 | if (ret) { | ||
796 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
797 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
798 | DRM_ERROR("Failed to map the TOC buffer\n"); | ||
799 | return -EINVAL; | ||
800 | } | ||
801 | |||
802 | amdgpu_bo_unreserve(adev->smu.toc_buf); | ||
803 | private->header_addr_low = lower_32_bits(mc_addr); | ||
804 | private->header_addr_high = upper_32_bits(mc_addr); | ||
805 | private->header = toc_buf_ptr; | ||
806 | |||
807 | ret = amdgpu_bo_reserve(adev->smu.smu_buf, false); | ||
808 | if (ret) { | ||
809 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
810 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
811 | DRM_ERROR("Failed to reserve the SMU internal buffer\n"); | ||
812 | return -EINVAL; | ||
813 | } | ||
814 | |||
815 | ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_VRAM, &mc_addr); | ||
816 | if (ret) { | ||
817 | amdgpu_bo_unreserve(adev->smu.smu_buf); | ||
818 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
819 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
820 | DRM_ERROR("Failed to pin the SMU internal buffer\n"); | ||
821 | return -EINVAL; | ||
822 | } | ||
823 | |||
824 | ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr); | ||
825 | if (ret) { | ||
826 | amdgpu_bo_unreserve(adev->smu.smu_buf); | ||
827 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
828 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
829 | DRM_ERROR("Failed to map the SMU internal buffer\n"); | ||
830 | return -EINVAL; | ||
831 | } | ||
832 | |||
833 | amdgpu_bo_unreserve(adev->smu.smu_buf); | ||
834 | private->smu_buffer_addr_low = lower_32_bits(mc_addr); | ||
835 | private->smu_buffer_addr_high = upper_32_bits(mc_addr); | ||
836 | |||
837 | adev->smu.smumgr_funcs = &tonga_smumgr_funcs; | ||
838 | |||
839 | return 0; | ||
840 | } | ||
841 | |||
842 | int tonga_smu_fini(struct amdgpu_device *adev) | ||
843 | { | ||
844 | amdgpu_bo_unref(&adev->smu.toc_buf); | ||
845 | amdgpu_bo_unref(&adev->smu.smu_buf); | ||
846 | kfree(adev->smu.priv); | ||
847 | adev->smu.priv = NULL; | ||
848 | if (adev->firmware.fw_buf) | ||
849 | amdgpu_ucode_fini_bo(adev); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h b/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h new file mode 100644 index 000000000000..c031ff99fe3e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smumgr.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef TONGA_SMUMGR_H | ||
25 | #define TONGA_SMUMGR_H | ||
26 | |||
27 | #include "tonga_ppsmc.h" | ||
28 | |||
29 | int tonga_smu_init(struct amdgpu_device *adev); | ||
30 | int tonga_smu_fini(struct amdgpu_device *adev); | ||
31 | int tonga_smu_start(struct amdgpu_device *adev); | ||
32 | |||
33 | struct tonga_smu_private_data | ||
34 | { | ||
35 | uint8_t *header; | ||
36 | uint32_t smu_buffer_addr_high; | ||
37 | uint32_t smu_buffer_addr_low; | ||
38 | uint32_t header_addr_high; | ||
39 | uint32_t header_addr_low; | ||
40 | }; | ||
41 | |||
42 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c new file mode 100644 index 000000000000..f3b3026d5932 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | |||
@@ -0,0 +1,830 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König <christian.koenig@amd.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include <drm/drmP.h> | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_uvd.h" | ||
29 | #include "vid.h" | ||
30 | #include "uvd/uvd_5_0_d.h" | ||
31 | #include "uvd/uvd_5_0_sh_mask.h" | ||
32 | #include "oss/oss_2_0_d.h" | ||
33 | #include "oss/oss_2_0_sh_mask.h" | ||
34 | |||
35 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); | ||
36 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev); | ||
37 | static int uvd_v5_0_start(struct amdgpu_device *adev); | ||
38 | static void uvd_v5_0_stop(struct amdgpu_device *adev); | ||
39 | |||
40 | /** | ||
41 | * uvd_v5_0_ring_get_rptr - get read pointer | ||
42 | * | ||
43 | * @ring: amdgpu_ring pointer | ||
44 | * | ||
45 | * Returns the current hardware read pointer | ||
46 | */ | ||
47 | static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
48 | { | ||
49 | struct amdgpu_device *adev = ring->adev; | ||
50 | |||
51 | return RREG32(mmUVD_RBC_RB_RPTR); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * uvd_v5_0_ring_get_wptr - get write pointer | ||
56 | * | ||
57 | * @ring: amdgpu_ring pointer | ||
58 | * | ||
59 | * Returns the current hardware write pointer | ||
60 | */ | ||
61 | static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
62 | { | ||
63 | struct amdgpu_device *adev = ring->adev; | ||
64 | |||
65 | return RREG32(mmUVD_RBC_RB_WPTR); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * uvd_v5_0_ring_set_wptr - set write pointer | ||
70 | * | ||
71 | * @ring: amdgpu_ring pointer | ||
72 | * | ||
73 | * Commits the write pointer to the hardware | ||
74 | */ | ||
75 | static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
76 | { | ||
77 | struct amdgpu_device *adev = ring->adev; | ||
78 | |||
79 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
80 | } | ||
81 | |||
82 | static int uvd_v5_0_early_init(struct amdgpu_device *adev) | ||
83 | { | ||
84 | uvd_v5_0_set_ring_funcs(adev); | ||
85 | uvd_v5_0_set_irq_funcs(adev); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int uvd_v5_0_sw_init(struct amdgpu_device *adev) | ||
91 | { | ||
92 | struct amdgpu_ring *ring; | ||
93 | int r; | ||
94 | |||
95 | /* UVD TRAP */ | ||
96 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | ||
97 | if (r) | ||
98 | return r; | ||
99 | |||
100 | r = amdgpu_uvd_sw_init(adev); | ||
101 | if (r) | ||
102 | return r; | ||
103 | |||
104 | r = amdgpu_uvd_resume(adev); | ||
105 | if (r) | ||
106 | return r; | ||
107 | |||
108 | ring = &adev->uvd.ring; | ||
109 | sprintf(ring->name, "uvd"); | ||
110 | r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, | ||
111 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
112 | |||
113 | return r; | ||
114 | } | ||
115 | |||
116 | static int uvd_v5_0_sw_fini(struct amdgpu_device *adev) | ||
117 | { | ||
118 | int r; | ||
119 | |||
120 | r = amdgpu_uvd_suspend(adev); | ||
121 | if (r) | ||
122 | return r; | ||
123 | |||
124 | r = amdgpu_uvd_sw_fini(adev); | ||
125 | if (r) | ||
126 | return r; | ||
127 | |||
128 | return r; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * uvd_v5_0_hw_init - start and test UVD block | ||
133 | * | ||
134 | * @adev: amdgpu_device pointer | ||
135 | * | ||
136 | * Initialize the hardware, boot up the VCPU and do some testing | ||
137 | */ | ||
138 | static int uvd_v5_0_hw_init(struct amdgpu_device *adev) | ||
139 | { | ||
140 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
141 | uint32_t tmp; | ||
142 | int r; | ||
143 | |||
144 | /* raise clocks while booting up the VCPU */ | ||
145 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
146 | |||
147 | r = uvd_v5_0_start(adev); | ||
148 | if (r) | ||
149 | goto done; | ||
150 | |||
151 | ring->ready = true; | ||
152 | r = amdgpu_ring_test_ring(ring); | ||
153 | if (r) { | ||
154 | ring->ready = false; | ||
155 | goto done; | ||
156 | } | ||
157 | |||
158 | r = amdgpu_ring_lock(ring, 10); | ||
159 | if (r) { | ||
160 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | ||
161 | goto done; | ||
162 | } | ||
163 | |||
164 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | ||
165 | amdgpu_ring_write(ring, tmp); | ||
166 | amdgpu_ring_write(ring, 0xFFFFF); | ||
167 | |||
168 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
169 | amdgpu_ring_write(ring, tmp); | ||
170 | amdgpu_ring_write(ring, 0xFFFFF); | ||
171 | |||
172 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
173 | amdgpu_ring_write(ring, tmp); | ||
174 | amdgpu_ring_write(ring, 0xFFFFF); | ||
175 | |||
176 | /* Clear timeout status bits */ | ||
177 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | ||
178 | amdgpu_ring_write(ring, 0x8); | ||
179 | |||
180 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | ||
181 | amdgpu_ring_write(ring, 3); | ||
182 | |||
183 | amdgpu_ring_unlock_commit(ring); | ||
184 | |||
185 | done: | ||
186 | /* lower clocks again */ | ||
187 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
188 | |||
189 | if (!r) | ||
190 | DRM_INFO("UVD initialized successfully.\n"); | ||
191 | |||
192 | return r; | ||
193 | } | ||
194 | |||
195 | /** | ||
196 | * uvd_v5_0_hw_fini - stop the hardware block | ||
197 | * | ||
198 | * @adev: amdgpu_device pointer | ||
199 | * | ||
200 | * Stop the UVD block, mark ring as not ready any more | ||
201 | */ | ||
202 | static int uvd_v5_0_hw_fini(struct amdgpu_device *adev) | ||
203 | { | ||
204 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
205 | |||
206 | uvd_v5_0_stop(adev); | ||
207 | ring->ready = false; | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int uvd_v5_0_suspend(struct amdgpu_device *adev) | ||
213 | { | ||
214 | int r; | ||
215 | |||
216 | r = uvd_v5_0_hw_fini(adev); | ||
217 | if (r) | ||
218 | return r; | ||
219 | |||
220 | r = amdgpu_uvd_suspend(adev); | ||
221 | if (r) | ||
222 | return r; | ||
223 | |||
224 | return r; | ||
225 | } | ||
226 | |||
227 | static int uvd_v5_0_resume(struct amdgpu_device *adev) | ||
228 | { | ||
229 | int r; | ||
230 | |||
231 | r = amdgpu_uvd_resume(adev); | ||
232 | if (r) | ||
233 | return r; | ||
234 | |||
235 | r = uvd_v5_0_hw_init(adev); | ||
236 | if (r) | ||
237 | return r; | ||
238 | |||
239 | return r; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * uvd_v5_0_mc_resume - memory controller programming | ||
244 | * | ||
245 | * @adev: amdgpu_device pointer | ||
246 | * | ||
247 | * Let the UVD memory controller know it's offsets | ||
248 | */ | ||
249 | static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) | ||
250 | { | ||
251 | uint64_t offset; | ||
252 | uint32_t size; | ||
253 | |||
254 | /* programm memory controller bits 0-27 */ | ||
255 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | ||
256 | lower_32_bits(adev->uvd.gpu_addr)); | ||
257 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | ||
258 | upper_32_bits(adev->uvd.gpu_addr)); | ||
259 | |||
260 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | ||
261 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | ||
262 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | ||
263 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | ||
264 | |||
265 | offset += size; | ||
266 | size = AMDGPU_UVD_STACK_SIZE; | ||
267 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); | ||
268 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | ||
269 | |||
270 | offset += size; | ||
271 | size = AMDGPU_UVD_HEAP_SIZE; | ||
272 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); | ||
273 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * uvd_v5_0_start - start UVD block | ||
278 | * | ||
279 | * @adev: amdgpu_device pointer | ||
280 | * | ||
281 | * Setup and start the UVD block | ||
282 | */ | ||
283 | static int uvd_v5_0_start(struct amdgpu_device *adev) | ||
284 | { | ||
285 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
286 | uint32_t rb_bufsz, tmp; | ||
287 | uint32_t lmi_swap_cntl; | ||
288 | uint32_t mp_swap_cntl; | ||
289 | int i, j, r; | ||
290 | |||
291 | /*disable DPG */ | ||
292 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | ||
293 | |||
294 | /* disable byte swapping */ | ||
295 | lmi_swap_cntl = 0; | ||
296 | mp_swap_cntl = 0; | ||
297 | |||
298 | uvd_v5_0_mc_resume(adev); | ||
299 | |||
300 | /* disable clock gating */ | ||
301 | WREG32(mmUVD_CGC_GATE, 0); | ||
302 | |||
303 | /* disable interupt */ | ||
304 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | ||
305 | |||
306 | /* stall UMC and register bus before resetting VCPU */ | ||
307 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
308 | mdelay(1); | ||
309 | |||
310 | /* put LMI, VCPU, RBC etc... into reset */ | ||
311 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
312 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
313 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
314 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
315 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
316 | mdelay(5); | ||
317 | |||
318 | /* take UVD block out of reset */ | ||
319 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
320 | mdelay(5); | ||
321 | |||
322 | /* initialize UVD memory controller */ | ||
323 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | ||
324 | (1 << 21) | (1 << 9) | (1 << 20)); | ||
325 | |||
326 | #ifdef __BIG_ENDIAN | ||
327 | /* swap (8 in 32) RB and IB */ | ||
328 | lmi_swap_cntl = 0xa; | ||
329 | mp_swap_cntl = 0; | ||
330 | #endif | ||
331 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | ||
332 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | ||
333 | |||
334 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | ||
335 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | ||
336 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | ||
337 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | ||
338 | WREG32(mmUVD_MPC_SET_ALU, 0); | ||
339 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | ||
340 | |||
341 | /* take all subblocks out of reset, except VCPU */ | ||
342 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
343 | mdelay(5); | ||
344 | |||
345 | /* enable VCPU clock */ | ||
346 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | ||
347 | |||
348 | /* enable UMC */ | ||
349 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
350 | |||
351 | /* boot up the VCPU */ | ||
352 | WREG32(mmUVD_SOFT_RESET, 0); | ||
353 | mdelay(10); | ||
354 | |||
355 | for (i = 0; i < 10; ++i) { | ||
356 | uint32_t status; | ||
357 | for (j = 0; j < 100; ++j) { | ||
358 | status = RREG32(mmUVD_STATUS); | ||
359 | if (status & 2) | ||
360 | break; | ||
361 | mdelay(10); | ||
362 | } | ||
363 | r = 0; | ||
364 | if (status & 2) | ||
365 | break; | ||
366 | |||
367 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | ||
368 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | ||
369 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
370 | mdelay(10); | ||
371 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
372 | mdelay(10); | ||
373 | r = -1; | ||
374 | } | ||
375 | |||
376 | if (r) { | ||
377 | DRM_ERROR("UVD not responding, giving up!!!\n"); | ||
378 | return r; | ||
379 | } | ||
380 | /* enable master interrupt */ | ||
381 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | ||
382 | |||
383 | /* clear the bit 4 of UVD_STATUS */ | ||
384 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | ||
385 | |||
386 | rb_bufsz = order_base_2(ring->ring_size); | ||
387 | tmp = 0; | ||
388 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | ||
389 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | ||
390 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | ||
391 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | ||
392 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | ||
393 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | ||
394 | /* force RBC into idle state */ | ||
395 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | ||
396 | |||
397 | /* set the write pointer delay */ | ||
398 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | ||
399 | |||
400 | /* set the wb address */ | ||
401 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | ||
402 | |||
403 | /* programm the RB_BASE for ring buffer */ | ||
404 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | ||
405 | lower_32_bits(ring->gpu_addr)); | ||
406 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | ||
407 | upper_32_bits(ring->gpu_addr)); | ||
408 | |||
409 | /* Initialize the ring buffer's read and write pointers */ | ||
410 | WREG32(mmUVD_RBC_RB_RPTR, 0); | ||
411 | |||
412 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | ||
413 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
414 | |||
415 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | ||
416 | |||
417 | return 0; | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * uvd_v5_0_stop - stop UVD block | ||
422 | * | ||
423 | * @adev: amdgpu_device pointer | ||
424 | * | ||
425 | * stop the UVD block | ||
426 | */ | ||
427 | static void uvd_v5_0_stop(struct amdgpu_device *adev) | ||
428 | { | ||
429 | /* force RBC into idle state */ | ||
430 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | ||
431 | |||
432 | /* Stall UMC and register bus before resetting VCPU */ | ||
433 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
434 | mdelay(1); | ||
435 | |||
436 | /* put VCPU into reset */ | ||
437 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
438 | mdelay(5); | ||
439 | |||
440 | /* disable VCPU clock */ | ||
441 | WREG32(mmUVD_VCPU_CNTL, 0x0); | ||
442 | |||
443 | /* Unstall UMC and register bus */ | ||
444 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
445 | } | ||
446 | |||
447 | /** | ||
448 | * uvd_v5_0_ring_emit_fence - emit an fence & trap command | ||
449 | * | ||
450 | * @ring: amdgpu_ring pointer | ||
451 | * @fence: fence to emit | ||
452 | * | ||
453 | * Write a fence and a trap command to the ring. | ||
454 | */ | ||
455 | static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
456 | bool write64bit) | ||
457 | { | ||
458 | WARN_ON(write64bit); | ||
459 | |||
460 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
461 | amdgpu_ring_write(ring, seq); | ||
462 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
463 | amdgpu_ring_write(ring, addr & 0xffffffff); | ||
464 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
465 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | ||
466 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
467 | amdgpu_ring_write(ring, 0); | ||
468 | |||
469 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
470 | amdgpu_ring_write(ring, 0); | ||
471 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
472 | amdgpu_ring_write(ring, 0); | ||
473 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
474 | amdgpu_ring_write(ring, 2); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * uvd_v5_0_ring_emit_semaphore - emit semaphore command | ||
479 | * | ||
480 | * @ring: amdgpu_ring pointer | ||
481 | * @semaphore: semaphore to emit commands for | ||
482 | * @emit_wait: true if we should emit a wait command | ||
483 | * | ||
484 | * Emit a semaphore command (either wait or signal) to the UVD ring. | ||
485 | */ | ||
486 | static bool uvd_v5_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
487 | struct amdgpu_semaphore *semaphore, | ||
488 | bool emit_wait) | ||
489 | { | ||
490 | uint64_t addr = semaphore->gpu_addr; | ||
491 | |||
492 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); | ||
493 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
494 | |||
495 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); | ||
496 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
497 | |||
498 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); | ||
499 | amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); | ||
500 | |||
501 | return true; | ||
502 | } | ||
503 | |||
504 | /** | ||
505 | * uvd_v5_0_ring_test_ring - register write test | ||
506 | * | ||
507 | * @ring: amdgpu_ring pointer | ||
508 | * | ||
509 | * Test if we can successfully write to the context register | ||
510 | */ | ||
511 | static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring) | ||
512 | { | ||
513 | struct amdgpu_device *adev = ring->adev; | ||
514 | uint32_t tmp = 0; | ||
515 | unsigned i; | ||
516 | int r; | ||
517 | |||
518 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | ||
519 | r = amdgpu_ring_lock(ring, 3); | ||
520 | if (r) { | ||
521 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
522 | ring->idx, r); | ||
523 | return r; | ||
524 | } | ||
525 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
526 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
527 | amdgpu_ring_unlock_commit(ring); | ||
528 | for (i = 0; i < adev->usec_timeout; i++) { | ||
529 | tmp = RREG32(mmUVD_CONTEXT_ID); | ||
530 | if (tmp == 0xDEADBEEF) | ||
531 | break; | ||
532 | DRM_UDELAY(1); | ||
533 | } | ||
534 | |||
535 | if (i < adev->usec_timeout) { | ||
536 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
537 | ring->idx, i); | ||
538 | } else { | ||
539 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
540 | ring->idx, tmp); | ||
541 | r = -EINVAL; | ||
542 | } | ||
543 | return r; | ||
544 | } | ||
545 | |||
546 | /** | ||
547 | * uvd_v5_0_ring_emit_ib - execute indirect buffer | ||
548 | * | ||
549 | * @ring: amdgpu_ring pointer | ||
550 | * @ib: indirect buffer to execute | ||
551 | * | ||
552 | * Write ring commands to execute the indirect buffer | ||
553 | */ | ||
554 | static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
555 | struct amdgpu_ib *ib) | ||
556 | { | ||
557 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | ||
558 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
559 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | ||
560 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
561 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | ||
562 | amdgpu_ring_write(ring, ib->length_dw); | ||
563 | } | ||
564 | |||
565 | /** | ||
566 | * uvd_v5_0_ring_test_ib - test ib execution | ||
567 | * | ||
568 | * @ring: amdgpu_ring pointer | ||
569 | * | ||
570 | * Test if we can successfully execute an IB | ||
571 | */ | ||
572 | static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) | ||
573 | { | ||
574 | struct amdgpu_device *adev = ring->adev; | ||
575 | struct amdgpu_fence *fence = NULL; | ||
576 | int r; | ||
577 | |||
578 | r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
579 | if (r) { | ||
580 | DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); | ||
581 | return r; | ||
582 | } | ||
583 | |||
584 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | ||
585 | if (r) { | ||
586 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
587 | goto error; | ||
588 | } | ||
589 | |||
590 | r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); | ||
591 | if (r) { | ||
592 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
593 | goto error; | ||
594 | } | ||
595 | |||
596 | r = amdgpu_fence_wait(fence, false); | ||
597 | if (r) { | ||
598 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
599 | goto error; | ||
600 | } | ||
601 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
602 | error: | ||
603 | amdgpu_fence_unref(&fence); | ||
604 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
605 | return r; | ||
606 | } | ||
607 | |||
608 | static bool uvd_v5_0_is_idle(struct amdgpu_device *adev) | ||
609 | { | ||
610 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); | ||
611 | } | ||
612 | |||
613 | static int uvd_v5_0_wait_for_idle(struct amdgpu_device *adev) | ||
614 | { | ||
615 | unsigned i; | ||
616 | |||
617 | for (i = 0; i < adev->usec_timeout; i++) { | ||
618 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | ||
619 | return 0; | ||
620 | } | ||
621 | return -ETIMEDOUT; | ||
622 | } | ||
623 | |||
624 | static int uvd_v5_0_soft_reset(struct amdgpu_device *adev) | ||
625 | { | ||
626 | uvd_v5_0_stop(adev); | ||
627 | |||
628 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | ||
629 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
630 | mdelay(5); | ||
631 | |||
632 | return uvd_v5_0_start(adev); | ||
633 | } | ||
634 | |||
635 | static void uvd_v5_0_print_status(struct amdgpu_device *adev) | ||
636 | { | ||
637 | dev_info(adev->dev, "UVD 5.0 registers\n"); | ||
638 | dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", | ||
639 | RREG32(mmUVD_SEMA_ADDR_LOW)); | ||
640 | dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", | ||
641 | RREG32(mmUVD_SEMA_ADDR_HIGH)); | ||
642 | dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", | ||
643 | RREG32(mmUVD_SEMA_CMD)); | ||
644 | dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", | ||
645 | RREG32(mmUVD_GPCOM_VCPU_CMD)); | ||
646 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", | ||
647 | RREG32(mmUVD_GPCOM_VCPU_DATA0)); | ||
648 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", | ||
649 | RREG32(mmUVD_GPCOM_VCPU_DATA1)); | ||
650 | dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", | ||
651 | RREG32(mmUVD_ENGINE_CNTL)); | ||
652 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | ||
653 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | ||
654 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | ||
655 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | ||
656 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | ||
657 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | ||
658 | dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", | ||
659 | RREG32(mmUVD_SEMA_CNTL)); | ||
660 | dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", | ||
661 | RREG32(mmUVD_LMI_EXT40_ADDR)); | ||
662 | dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", | ||
663 | RREG32(mmUVD_CTX_INDEX)); | ||
664 | dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", | ||
665 | RREG32(mmUVD_CTX_DATA)); | ||
666 | dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", | ||
667 | RREG32(mmUVD_CGC_GATE)); | ||
668 | dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", | ||
669 | RREG32(mmUVD_CGC_CTRL)); | ||
670 | dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", | ||
671 | RREG32(mmUVD_LMI_CTRL2)); | ||
672 | dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", | ||
673 | RREG32(mmUVD_MASTINT_EN)); | ||
674 | dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", | ||
675 | RREG32(mmUVD_LMI_ADDR_EXT)); | ||
676 | dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", | ||
677 | RREG32(mmUVD_LMI_CTRL)); | ||
678 | dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", | ||
679 | RREG32(mmUVD_LMI_SWAP_CNTL)); | ||
680 | dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", | ||
681 | RREG32(mmUVD_MP_SWAP_CNTL)); | ||
682 | dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", | ||
683 | RREG32(mmUVD_MPC_SET_MUXA0)); | ||
684 | dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", | ||
685 | RREG32(mmUVD_MPC_SET_MUXA1)); | ||
686 | dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", | ||
687 | RREG32(mmUVD_MPC_SET_MUXB0)); | ||
688 | dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", | ||
689 | RREG32(mmUVD_MPC_SET_MUXB1)); | ||
690 | dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", | ||
691 | RREG32(mmUVD_MPC_SET_MUX)); | ||
692 | dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", | ||
693 | RREG32(mmUVD_MPC_SET_ALU)); | ||
694 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", | ||
695 | RREG32(mmUVD_VCPU_CACHE_OFFSET0)); | ||
696 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", | ||
697 | RREG32(mmUVD_VCPU_CACHE_SIZE0)); | ||
698 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", | ||
699 | RREG32(mmUVD_VCPU_CACHE_OFFSET1)); | ||
700 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", | ||
701 | RREG32(mmUVD_VCPU_CACHE_SIZE1)); | ||
702 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", | ||
703 | RREG32(mmUVD_VCPU_CACHE_OFFSET2)); | ||
704 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", | ||
705 | RREG32(mmUVD_VCPU_CACHE_SIZE2)); | ||
706 | dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", | ||
707 | RREG32(mmUVD_VCPU_CNTL)); | ||
708 | dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", | ||
709 | RREG32(mmUVD_SOFT_RESET)); | ||
710 | dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_LOW=0x%08X\n", | ||
711 | RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW)); | ||
712 | dev_info(adev->dev, " UVD_LMI_RBC_IB_64BIT_BAR_HIGH=0x%08X\n", | ||
713 | RREG32(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH)); | ||
714 | dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", | ||
715 | RREG32(mmUVD_RBC_IB_SIZE)); | ||
716 | dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_LOW=0x%08X\n", | ||
717 | RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW)); | ||
718 | dev_info(adev->dev, " UVD_LMI_RBC_RB_64BIT_BAR_HIGH=0x%08X\n", | ||
719 | RREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH)); | ||
720 | dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", | ||
721 | RREG32(mmUVD_RBC_RB_RPTR)); | ||
722 | dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", | ||
723 | RREG32(mmUVD_RBC_RB_WPTR)); | ||
724 | dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", | ||
725 | RREG32(mmUVD_RBC_RB_WPTR_CNTL)); | ||
726 | dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", | ||
727 | RREG32(mmUVD_RBC_RB_CNTL)); | ||
728 | dev_info(adev->dev, " UVD_STATUS=0x%08X\n", | ||
729 | RREG32(mmUVD_STATUS)); | ||
730 | dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", | ||
731 | RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); | ||
732 | dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
733 | RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); | ||
734 | dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", | ||
735 | RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); | ||
736 | dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
737 | RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); | ||
738 | dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", | ||
739 | RREG32(mmUVD_CONTEXT_ID)); | ||
740 | } | ||
741 | |||
742 | static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev, | ||
743 | struct amdgpu_irq_src *source, | ||
744 | unsigned type, | ||
745 | enum amdgpu_interrupt_state state) | ||
746 | { | ||
747 | // TODO | ||
748 | return 0; | ||
749 | } | ||
750 | |||
751 | static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, | ||
752 | struct amdgpu_irq_src *source, | ||
753 | struct amdgpu_iv_entry *entry) | ||
754 | { | ||
755 | DRM_DEBUG("IH: UVD TRAP\n"); | ||
756 | amdgpu_fence_process(&adev->uvd.ring); | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int uvd_v5_0_set_clockgating_state(struct amdgpu_device *adev, | ||
761 | enum amdgpu_clockgating_state state) | ||
762 | { | ||
763 | //TODO | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | static int uvd_v5_0_set_powergating_state(struct amdgpu_device *adev, | ||
769 | enum amdgpu_powergating_state state) | ||
770 | { | ||
771 | /* This doesn't actually powergate the UVD block. | ||
772 | * That's done in the dpm code via the SMC. This | ||
773 | * just re-inits the block as necessary. The actual | ||
774 | * gating still happens in the dpm code. We should | ||
775 | * revisit this when there is a cleaner line between | ||
776 | * the smc and the hw blocks | ||
777 | */ | ||
778 | if (state == AMDGPU_PG_STATE_GATE) { | ||
779 | uvd_v5_0_stop(adev); | ||
780 | return 0; | ||
781 | } else { | ||
782 | return uvd_v5_0_start(adev); | ||
783 | } | ||
784 | } | ||
785 | |||
786 | const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs = { | ||
787 | .early_init = uvd_v5_0_early_init, | ||
788 | .late_init = NULL, | ||
789 | .sw_init = uvd_v5_0_sw_init, | ||
790 | .sw_fini = uvd_v5_0_sw_fini, | ||
791 | .hw_init = uvd_v5_0_hw_init, | ||
792 | .hw_fini = uvd_v5_0_hw_fini, | ||
793 | .suspend = uvd_v5_0_suspend, | ||
794 | .resume = uvd_v5_0_resume, | ||
795 | .is_idle = uvd_v5_0_is_idle, | ||
796 | .wait_for_idle = uvd_v5_0_wait_for_idle, | ||
797 | .soft_reset = uvd_v5_0_soft_reset, | ||
798 | .print_status = uvd_v5_0_print_status, | ||
799 | .set_clockgating_state = uvd_v5_0_set_clockgating_state, | ||
800 | .set_powergating_state = uvd_v5_0_set_powergating_state, | ||
801 | }; | ||
802 | |||
803 | static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { | ||
804 | .get_rptr = uvd_v5_0_ring_get_rptr, | ||
805 | .get_wptr = uvd_v5_0_ring_get_wptr, | ||
806 | .set_wptr = uvd_v5_0_ring_set_wptr, | ||
807 | .parse_cs = amdgpu_uvd_ring_parse_cs, | ||
808 | .emit_ib = uvd_v5_0_ring_emit_ib, | ||
809 | .emit_fence = uvd_v5_0_ring_emit_fence, | ||
810 | .emit_semaphore = uvd_v5_0_ring_emit_semaphore, | ||
811 | .test_ring = uvd_v5_0_ring_test_ring, | ||
812 | .test_ib = uvd_v5_0_ring_test_ib, | ||
813 | .is_lockup = amdgpu_ring_test_lockup, | ||
814 | }; | ||
815 | |||
816 | static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) | ||
817 | { | ||
818 | adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; | ||
819 | } | ||
820 | |||
821 | static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { | ||
822 | .set = uvd_v5_0_set_interrupt_state, | ||
823 | .process = uvd_v5_0_process_interrupt, | ||
824 | }; | ||
825 | |||
826 | static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) | ||
827 | { | ||
828 | adev->uvd.irq.num_types = 1; | ||
829 | adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; | ||
830 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h new file mode 100644 index 000000000000..7d7a15296383 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __UVD_V5_0_H__ | ||
25 | #define __UVD_V5_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs uvd_v5_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c new file mode 100644 index 000000000000..f59942d5c50e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -0,0 +1,810 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Christian König <christian.koenig@amd.com> | ||
23 | */ | ||
24 | |||
25 | #include <linux/firmware.h> | ||
26 | #include <drm/drmP.h> | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_uvd.h" | ||
29 | #include "vid.h" | ||
30 | #include "uvd/uvd_6_0_d.h" | ||
31 | #include "uvd/uvd_6_0_sh_mask.h" | ||
32 | #include "oss/oss_2_0_d.h" | ||
33 | #include "oss/oss_2_0_sh_mask.h" | ||
34 | |||
35 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); | ||
36 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); | ||
37 | static int uvd_v6_0_start(struct amdgpu_device *adev); | ||
38 | static void uvd_v6_0_stop(struct amdgpu_device *adev); | ||
39 | |||
40 | /** | ||
41 | * uvd_v6_0_ring_get_rptr - get read pointer | ||
42 | * | ||
43 | * @ring: amdgpu_ring pointer | ||
44 | * | ||
45 | * Returns the current hardware read pointer | ||
46 | */ | ||
47 | static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
48 | { | ||
49 | struct amdgpu_device *adev = ring->adev; | ||
50 | |||
51 | return RREG32(mmUVD_RBC_RB_RPTR); | ||
52 | } | ||
53 | |||
54 | /** | ||
55 | * uvd_v6_0_ring_get_wptr - get write pointer | ||
56 | * | ||
57 | * @ring: amdgpu_ring pointer | ||
58 | * | ||
59 | * Returns the current hardware write pointer | ||
60 | */ | ||
61 | static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
62 | { | ||
63 | struct amdgpu_device *adev = ring->adev; | ||
64 | |||
65 | return RREG32(mmUVD_RBC_RB_WPTR); | ||
66 | } | ||
67 | |||
68 | /** | ||
69 | * uvd_v6_0_ring_set_wptr - set write pointer | ||
70 | * | ||
71 | * @ring: amdgpu_ring pointer | ||
72 | * | ||
73 | * Commits the write pointer to the hardware | ||
74 | */ | ||
75 | static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
76 | { | ||
77 | struct amdgpu_device *adev = ring->adev; | ||
78 | |||
79 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
80 | } | ||
81 | |||
82 | static int uvd_v6_0_early_init(struct amdgpu_device *adev) | ||
83 | { | ||
84 | uvd_v6_0_set_ring_funcs(adev); | ||
85 | uvd_v6_0_set_irq_funcs(adev); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static int uvd_v6_0_sw_init(struct amdgpu_device *adev) | ||
91 | { | ||
92 | struct amdgpu_ring *ring; | ||
93 | int r; | ||
94 | |||
95 | /* UVD TRAP */ | ||
96 | r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq); | ||
97 | if (r) | ||
98 | return r; | ||
99 | |||
100 | r = amdgpu_uvd_sw_init(adev); | ||
101 | if (r) | ||
102 | return r; | ||
103 | |||
104 | r = amdgpu_uvd_resume(adev); | ||
105 | if (r) | ||
106 | return r; | ||
107 | |||
108 | ring = &adev->uvd.ring; | ||
109 | sprintf(ring->name, "uvd"); | ||
110 | r = amdgpu_ring_init(adev, ring, 4096, CP_PACKET2, 0xf, | ||
111 | &adev->uvd.irq, 0, AMDGPU_RING_TYPE_UVD); | ||
112 | |||
113 | return r; | ||
114 | } | ||
115 | |||
116 | static int uvd_v6_0_sw_fini(struct amdgpu_device *adev) | ||
117 | { | ||
118 | int r; | ||
119 | |||
120 | r = amdgpu_uvd_suspend(adev); | ||
121 | if (r) | ||
122 | return r; | ||
123 | |||
124 | r = amdgpu_uvd_sw_fini(adev); | ||
125 | if (r) | ||
126 | return r; | ||
127 | |||
128 | return r; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * uvd_v6_0_hw_init - start and test UVD block | ||
133 | * | ||
134 | * @adev: amdgpu_device pointer | ||
135 | * | ||
136 | * Initialize the hardware, boot up the VCPU and do some testing | ||
137 | */ | ||
138 | static int uvd_v6_0_hw_init(struct amdgpu_device *adev) | ||
139 | { | ||
140 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
141 | uint32_t tmp; | ||
142 | int r; | ||
143 | |||
144 | r = uvd_v6_0_start(adev); | ||
145 | if (r) | ||
146 | goto done; | ||
147 | |||
148 | ring->ready = true; | ||
149 | r = amdgpu_ring_test_ring(ring); | ||
150 | if (r) { | ||
151 | ring->ready = false; | ||
152 | goto done; | ||
153 | } | ||
154 | |||
155 | r = amdgpu_ring_lock(ring, 10); | ||
156 | if (r) { | ||
157 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); | ||
158 | goto done; | ||
159 | } | ||
160 | |||
161 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); | ||
162 | amdgpu_ring_write(ring, tmp); | ||
163 | amdgpu_ring_write(ring, 0xFFFFF); | ||
164 | |||
165 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
166 | amdgpu_ring_write(ring, tmp); | ||
167 | amdgpu_ring_write(ring, 0xFFFFF); | ||
168 | |||
169 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); | ||
170 | amdgpu_ring_write(ring, tmp); | ||
171 | amdgpu_ring_write(ring, 0xFFFFF); | ||
172 | |||
173 | /* Clear timeout status bits */ | ||
174 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); | ||
175 | amdgpu_ring_write(ring, 0x8); | ||
176 | |||
177 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); | ||
178 | amdgpu_ring_write(ring, 3); | ||
179 | |||
180 | amdgpu_ring_unlock_commit(ring); | ||
181 | |||
182 | done: | ||
183 | if (!r) | ||
184 | DRM_INFO("UVD initialized successfully.\n"); | ||
185 | |||
186 | return r; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * uvd_v6_0_hw_fini - stop the hardware block | ||
191 | * | ||
192 | * @adev: amdgpu_device pointer | ||
193 | * | ||
194 | * Stop the UVD block, mark ring as not ready any more | ||
195 | */ | ||
196 | static int uvd_v6_0_hw_fini(struct amdgpu_device *adev) | ||
197 | { | ||
198 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
199 | |||
200 | uvd_v6_0_stop(adev); | ||
201 | ring->ready = false; | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static int uvd_v6_0_suspend(struct amdgpu_device *adev) | ||
207 | { | ||
208 | int r; | ||
209 | |||
210 | r = uvd_v6_0_hw_fini(adev); | ||
211 | if (r) | ||
212 | return r; | ||
213 | |||
214 | r = amdgpu_uvd_suspend(adev); | ||
215 | if (r) | ||
216 | return r; | ||
217 | |||
218 | return r; | ||
219 | } | ||
220 | |||
221 | static int uvd_v6_0_resume(struct amdgpu_device *adev) | ||
222 | { | ||
223 | int r; | ||
224 | |||
225 | r = amdgpu_uvd_resume(adev); | ||
226 | if (r) | ||
227 | return r; | ||
228 | |||
229 | r = uvd_v6_0_hw_init(adev); | ||
230 | if (r) | ||
231 | return r; | ||
232 | |||
233 | return r; | ||
234 | } | ||
235 | |||
236 | /** | ||
237 | * uvd_v6_0_mc_resume - memory controller programming | ||
238 | * | ||
239 | * @adev: amdgpu_device pointer | ||
240 | * | ||
241 | * Let the UVD memory controller know it's offsets | ||
242 | */ | ||
243 | static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) | ||
244 | { | ||
245 | uint64_t offset; | ||
246 | uint32_t size; | ||
247 | |||
248 | /* programm memory controller bits 0-27 */ | ||
249 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, | ||
250 | lower_32_bits(adev->uvd.gpu_addr)); | ||
251 | WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, | ||
252 | upper_32_bits(adev->uvd.gpu_addr)); | ||
253 | |||
254 | offset = AMDGPU_UVD_FIRMWARE_OFFSET; | ||
255 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | ||
256 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); | ||
257 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); | ||
258 | |||
259 | offset += size; | ||
260 | size = AMDGPU_UVD_STACK_SIZE; | ||
261 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); | ||
262 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); | ||
263 | |||
264 | offset += size; | ||
265 | size = AMDGPU_UVD_HEAP_SIZE; | ||
266 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); | ||
267 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); | ||
268 | } | ||
269 | |||
270 | /** | ||
271 | * uvd_v6_0_start - start UVD block | ||
272 | * | ||
273 | * @adev: amdgpu_device pointer | ||
274 | * | ||
275 | * Setup and start the UVD block | ||
276 | */ | ||
277 | static int uvd_v6_0_start(struct amdgpu_device *adev) | ||
278 | { | ||
279 | struct amdgpu_ring *ring = &adev->uvd.ring; | ||
280 | uint32_t rb_bufsz, tmp; | ||
281 | uint32_t lmi_swap_cntl; | ||
282 | uint32_t mp_swap_cntl; | ||
283 | int i, j, r; | ||
284 | |||
285 | /*disable DPG */ | ||
286 | WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); | ||
287 | |||
288 | /* disable byte swapping */ | ||
289 | lmi_swap_cntl = 0; | ||
290 | mp_swap_cntl = 0; | ||
291 | |||
292 | uvd_v6_0_mc_resume(adev); | ||
293 | |||
294 | /* disable clock gating */ | ||
295 | WREG32(mmUVD_CGC_GATE, 0); | ||
296 | |||
297 | /* disable interupt */ | ||
298 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); | ||
299 | |||
300 | /* stall UMC and register bus before resetting VCPU */ | ||
301 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
302 | mdelay(1); | ||
303 | |||
304 | /* put LMI, VCPU, RBC etc... into reset */ | ||
305 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | | ||
306 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | | ||
307 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | | ||
308 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | | ||
309 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); | ||
310 | mdelay(5); | ||
311 | |||
312 | /* take UVD block out of reset */ | ||
313 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
314 | mdelay(5); | ||
315 | |||
316 | /* initialize UVD memory controller */ | ||
317 | WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | | ||
318 | (1 << 21) | (1 << 9) | (1 << 20)); | ||
319 | |||
320 | #ifdef __BIG_ENDIAN | ||
321 | /* swap (8 in 32) RB and IB */ | ||
322 | lmi_swap_cntl = 0xa; | ||
323 | mp_swap_cntl = 0; | ||
324 | #endif | ||
325 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); | ||
326 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); | ||
327 | |||
328 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); | ||
329 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); | ||
330 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); | ||
331 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); | ||
332 | WREG32(mmUVD_MPC_SET_ALU, 0); | ||
333 | WREG32(mmUVD_MPC_SET_MUX, 0x88); | ||
334 | |||
335 | /* take all subblocks out of reset, except VCPU */ | ||
336 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
337 | mdelay(5); | ||
338 | |||
339 | /* enable VCPU clock */ | ||
340 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); | ||
341 | |||
342 | /* enable UMC */ | ||
343 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
344 | |||
345 | /* boot up the VCPU */ | ||
346 | WREG32(mmUVD_SOFT_RESET, 0); | ||
347 | mdelay(10); | ||
348 | |||
349 | for (i = 0; i < 10; ++i) { | ||
350 | uint32_t status; | ||
351 | |||
352 | for (j = 0; j < 100; ++j) { | ||
353 | status = RREG32(mmUVD_STATUS); | ||
354 | if (status & 2) | ||
355 | break; | ||
356 | mdelay(10); | ||
357 | } | ||
358 | r = 0; | ||
359 | if (status & 2) | ||
360 | break; | ||
361 | |||
362 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); | ||
363 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, | ||
364 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
365 | mdelay(10); | ||
366 | WREG32_P(mmUVD_SOFT_RESET, 0, | ||
367 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
368 | mdelay(10); | ||
369 | r = -1; | ||
370 | } | ||
371 | |||
372 | if (r) { | ||
373 | DRM_ERROR("UVD not responding, giving up!!!\n"); | ||
374 | return r; | ||
375 | } | ||
376 | /* enable master interrupt */ | ||
377 | WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); | ||
378 | |||
379 | /* clear the bit 4 of UVD_STATUS */ | ||
380 | WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); | ||
381 | |||
382 | rb_bufsz = order_base_2(ring->ring_size); | ||
383 | tmp = 0; | ||
384 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); | ||
385 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | ||
386 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | ||
387 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | ||
388 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | ||
389 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | ||
390 | /* force RBC into idle state */ | ||
391 | WREG32(mmUVD_RBC_RB_CNTL, tmp); | ||
392 | |||
393 | /* set the write pointer delay */ | ||
394 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); | ||
395 | |||
396 | /* set the wb address */ | ||
397 | WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); | ||
398 | |||
399 | /* programm the RB_BASE for ring buffer */ | ||
400 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, | ||
401 | lower_32_bits(ring->gpu_addr)); | ||
402 | WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, | ||
403 | upper_32_bits(ring->gpu_addr)); | ||
404 | |||
405 | /* Initialize the ring buffer's read and write pointers */ | ||
406 | WREG32(mmUVD_RBC_RB_RPTR, 0); | ||
407 | |||
408 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); | ||
409 | WREG32(mmUVD_RBC_RB_WPTR, ring->wptr); | ||
410 | |||
411 | WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * uvd_v6_0_stop - stop UVD block | ||
418 | * | ||
419 | * @adev: amdgpu_device pointer | ||
420 | * | ||
421 | * stop the UVD block | ||
422 | */ | ||
423 | static void uvd_v6_0_stop(struct amdgpu_device *adev) | ||
424 | { | ||
425 | /* force RBC into idle state */ | ||
426 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); | ||
427 | |||
428 | /* Stall UMC and register bus before resetting VCPU */ | ||
429 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
430 | mdelay(1); | ||
431 | |||
432 | /* put VCPU into reset */ | ||
433 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | ||
434 | mdelay(5); | ||
435 | |||
436 | /* disable VCPU clock */ | ||
437 | WREG32(mmUVD_VCPU_CNTL, 0x0); | ||
438 | |||
439 | /* Unstall UMC and register bus */ | ||
440 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * uvd_v6_0_ring_emit_fence - emit an fence & trap command | ||
445 | * | ||
446 | * @ring: amdgpu_ring pointer | ||
447 | * @fence: fence to emit | ||
448 | * | ||
449 | * Write a fence and a trap command to the ring. | ||
450 | */ | ||
451 | static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, | ||
452 | bool write64bit) | ||
453 | { | ||
454 | WARN_ON(write64bit); | ||
455 | |||
456 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
457 | amdgpu_ring_write(ring, seq); | ||
458 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
459 | amdgpu_ring_write(ring, addr & 0xffffffff); | ||
460 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
461 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); | ||
462 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
463 | amdgpu_ring_write(ring, 0); | ||
464 | |||
465 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); | ||
466 | amdgpu_ring_write(ring, 0); | ||
467 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); | ||
468 | amdgpu_ring_write(ring, 0); | ||
469 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); | ||
470 | amdgpu_ring_write(ring, 2); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * uvd_v6_0_ring_emit_semaphore - emit semaphore command | ||
475 | * | ||
476 | * @ring: amdgpu_ring pointer | ||
477 | * @semaphore: semaphore to emit commands for | ||
478 | * @emit_wait: true if we should emit a wait command | ||
479 | * | ||
480 | * Emit a semaphore command (either wait or signal) to the UVD ring. | ||
481 | */ | ||
482 | static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring *ring, | ||
483 | struct amdgpu_semaphore *semaphore, | ||
484 | bool emit_wait) | ||
485 | { | ||
486 | uint64_t addr = semaphore->gpu_addr; | ||
487 | |||
488 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_LOW, 0)); | ||
489 | amdgpu_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
490 | |||
491 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_ADDR_HIGH, 0)); | ||
492 | amdgpu_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
493 | |||
494 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CMD, 0)); | ||
495 | amdgpu_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); | ||
496 | |||
497 | return true; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * uvd_v6_0_ring_test_ring - register write test | ||
502 | * | ||
503 | * @ring: amdgpu_ring pointer | ||
504 | * | ||
505 | * Test if we can successfully write to the context register | ||
506 | */ | ||
507 | static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) | ||
508 | { | ||
509 | struct amdgpu_device *adev = ring->adev; | ||
510 | uint32_t tmp = 0; | ||
511 | unsigned i; | ||
512 | int r; | ||
513 | |||
514 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); | ||
515 | r = amdgpu_ring_lock(ring, 3); | ||
516 | if (r) { | ||
517 | DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", | ||
518 | ring->idx, r); | ||
519 | return r; | ||
520 | } | ||
521 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); | ||
522 | amdgpu_ring_write(ring, 0xDEADBEEF); | ||
523 | amdgpu_ring_unlock_commit(ring); | ||
524 | for (i = 0; i < adev->usec_timeout; i++) { | ||
525 | tmp = RREG32(mmUVD_CONTEXT_ID); | ||
526 | if (tmp == 0xDEADBEEF) | ||
527 | break; | ||
528 | DRM_UDELAY(1); | ||
529 | } | ||
530 | |||
531 | if (i < adev->usec_timeout) { | ||
532 | DRM_INFO("ring test on %d succeeded in %d usecs\n", | ||
533 | ring->idx, i); | ||
534 | } else { | ||
535 | DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", | ||
536 | ring->idx, tmp); | ||
537 | r = -EINVAL; | ||
538 | } | ||
539 | return r; | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * uvd_v6_0_ring_emit_ib - execute indirect buffer | ||
544 | * | ||
545 | * @ring: amdgpu_ring pointer | ||
546 | * @ib: indirect buffer to execute | ||
547 | * | ||
548 | * Write ring commands to execute the indirect buffer | ||
549 | */ | ||
550 | static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, | ||
551 | struct amdgpu_ib *ib) | ||
552 | { | ||
553 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); | ||
554 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); | ||
555 | amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); | ||
556 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); | ||
557 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); | ||
558 | amdgpu_ring_write(ring, ib->length_dw); | ||
559 | } | ||
560 | |||
561 | /** | ||
562 | * uvd_v6_0_ring_test_ib - test ib execution | ||
563 | * | ||
564 | * @ring: amdgpu_ring pointer | ||
565 | * | ||
566 | * Test if we can successfully execute an IB | ||
567 | */ | ||
568 | static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) | ||
569 | { | ||
570 | struct amdgpu_fence *fence = NULL; | ||
571 | int r; | ||
572 | |||
573 | r = amdgpu_uvd_get_create_msg(ring, 1, NULL); | ||
574 | if (r) { | ||
575 | DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); | ||
576 | goto error; | ||
577 | } | ||
578 | |||
579 | r = amdgpu_uvd_get_destroy_msg(ring, 1, &fence); | ||
580 | if (r) { | ||
581 | DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); | ||
582 | goto error; | ||
583 | } | ||
584 | |||
585 | r = amdgpu_fence_wait(fence, false); | ||
586 | if (r) { | ||
587 | DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); | ||
588 | goto error; | ||
589 | } | ||
590 | DRM_INFO("ib test on ring %d succeeded\n", ring->idx); | ||
591 | error: | ||
592 | amdgpu_fence_unref(&fence); | ||
593 | return r; | ||
594 | } | ||
595 | |||
596 | static bool uvd_v6_0_is_idle(struct amdgpu_device *adev) | ||
597 | { | ||
598 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); | ||
599 | } | ||
600 | |||
601 | static int uvd_v6_0_wait_for_idle(struct amdgpu_device *adev) | ||
602 | { | ||
603 | unsigned i; | ||
604 | |||
605 | for (i = 0; i < adev->usec_timeout; i++) { | ||
606 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) | ||
607 | return 0; | ||
608 | } | ||
609 | return -ETIMEDOUT; | ||
610 | } | ||
611 | |||
612 | static int uvd_v6_0_soft_reset(struct amdgpu_device *adev) | ||
613 | { | ||
614 | uvd_v6_0_stop(adev); | ||
615 | |||
616 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, | ||
617 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); | ||
618 | mdelay(5); | ||
619 | |||
620 | return uvd_v6_0_start(adev); | ||
621 | } | ||
622 | |||
623 | static void uvd_v6_0_print_status(struct amdgpu_device *adev) | ||
624 | { | ||
625 | dev_info(adev->dev, "UVD 6.0 registers\n"); | ||
626 | dev_info(adev->dev, " UVD_SEMA_ADDR_LOW=0x%08X\n", | ||
627 | RREG32(mmUVD_SEMA_ADDR_LOW)); | ||
628 | dev_info(adev->dev, " UVD_SEMA_ADDR_HIGH=0x%08X\n", | ||
629 | RREG32(mmUVD_SEMA_ADDR_HIGH)); | ||
630 | dev_info(adev->dev, " UVD_SEMA_CMD=0x%08X\n", | ||
631 | RREG32(mmUVD_SEMA_CMD)); | ||
632 | dev_info(adev->dev, " UVD_GPCOM_VCPU_CMD=0x%08X\n", | ||
633 | RREG32(mmUVD_GPCOM_VCPU_CMD)); | ||
634 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA0=0x%08X\n", | ||
635 | RREG32(mmUVD_GPCOM_VCPU_DATA0)); | ||
636 | dev_info(adev->dev, " UVD_GPCOM_VCPU_DATA1=0x%08X\n", | ||
637 | RREG32(mmUVD_GPCOM_VCPU_DATA1)); | ||
638 | dev_info(adev->dev, " UVD_ENGINE_CNTL=0x%08X\n", | ||
639 | RREG32(mmUVD_ENGINE_CNTL)); | ||
640 | dev_info(adev->dev, " UVD_UDEC_ADDR_CONFIG=0x%08X\n", | ||
641 | RREG32(mmUVD_UDEC_ADDR_CONFIG)); | ||
642 | dev_info(adev->dev, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n", | ||
643 | RREG32(mmUVD_UDEC_DB_ADDR_CONFIG)); | ||
644 | dev_info(adev->dev, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n", | ||
645 | RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG)); | ||
646 | dev_info(adev->dev, " UVD_SEMA_CNTL=0x%08X\n", | ||
647 | RREG32(mmUVD_SEMA_CNTL)); | ||
648 | dev_info(adev->dev, " UVD_LMI_EXT40_ADDR=0x%08X\n", | ||
649 | RREG32(mmUVD_LMI_EXT40_ADDR)); | ||
650 | dev_info(adev->dev, " UVD_CTX_INDEX=0x%08X\n", | ||
651 | RREG32(mmUVD_CTX_INDEX)); | ||
652 | dev_info(adev->dev, " UVD_CTX_DATA=0x%08X\n", | ||
653 | RREG32(mmUVD_CTX_DATA)); | ||
654 | dev_info(adev->dev, " UVD_CGC_GATE=0x%08X\n", | ||
655 | RREG32(mmUVD_CGC_GATE)); | ||
656 | dev_info(adev->dev, " UVD_CGC_CTRL=0x%08X\n", | ||
657 | RREG32(mmUVD_CGC_CTRL)); | ||
658 | dev_info(adev->dev, " UVD_LMI_CTRL2=0x%08X\n", | ||
659 | RREG32(mmUVD_LMI_CTRL2)); | ||
660 | dev_info(adev->dev, " UVD_MASTINT_EN=0x%08X\n", | ||
661 | RREG32(mmUVD_MASTINT_EN)); | ||
662 | dev_info(adev->dev, " UVD_LMI_ADDR_EXT=0x%08X\n", | ||
663 | RREG32(mmUVD_LMI_ADDR_EXT)); | ||
664 | dev_info(adev->dev, " UVD_LMI_CTRL=0x%08X\n", | ||
665 | RREG32(mmUVD_LMI_CTRL)); | ||
666 | dev_info(adev->dev, " UVD_LMI_SWAP_CNTL=0x%08X\n", | ||
667 | RREG32(mmUVD_LMI_SWAP_CNTL)); | ||
668 | dev_info(adev->dev, " UVD_MP_SWAP_CNTL=0x%08X\n", | ||
669 | RREG32(mmUVD_MP_SWAP_CNTL)); | ||
670 | dev_info(adev->dev, " UVD_MPC_SET_MUXA0=0x%08X\n", | ||
671 | RREG32(mmUVD_MPC_SET_MUXA0)); | ||
672 | dev_info(adev->dev, " UVD_MPC_SET_MUXA1=0x%08X\n", | ||
673 | RREG32(mmUVD_MPC_SET_MUXA1)); | ||
674 | dev_info(adev->dev, " UVD_MPC_SET_MUXB0=0x%08X\n", | ||
675 | RREG32(mmUVD_MPC_SET_MUXB0)); | ||
676 | dev_info(adev->dev, " UVD_MPC_SET_MUXB1=0x%08X\n", | ||
677 | RREG32(mmUVD_MPC_SET_MUXB1)); | ||
678 | dev_info(adev->dev, " UVD_MPC_SET_MUX=0x%08X\n", | ||
679 | RREG32(mmUVD_MPC_SET_MUX)); | ||
680 | dev_info(adev->dev, " UVD_MPC_SET_ALU=0x%08X\n", | ||
681 | RREG32(mmUVD_MPC_SET_ALU)); | ||
682 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n", | ||
683 | RREG32(mmUVD_VCPU_CACHE_OFFSET0)); | ||
684 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE0=0x%08X\n", | ||
685 | RREG32(mmUVD_VCPU_CACHE_SIZE0)); | ||
686 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n", | ||
687 | RREG32(mmUVD_VCPU_CACHE_OFFSET1)); | ||
688 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE1=0x%08X\n", | ||
689 | RREG32(mmUVD_VCPU_CACHE_SIZE1)); | ||
690 | dev_info(adev->dev, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n", | ||
691 | RREG32(mmUVD_VCPU_CACHE_OFFSET2)); | ||
692 | dev_info(adev->dev, " UVD_VCPU_CACHE_SIZE2=0x%08X\n", | ||
693 | RREG32(mmUVD_VCPU_CACHE_SIZE2)); | ||
694 | dev_info(adev->dev, " UVD_VCPU_CNTL=0x%08X\n", | ||
695 | RREG32(mmUVD_VCPU_CNTL)); | ||
696 | dev_info(adev->dev, " UVD_SOFT_RESET=0x%08X\n", | ||
697 | RREG32(mmUVD_SOFT_RESET)); | ||
698 | dev_info(adev->dev, " UVD_RBC_IB_SIZE=0x%08X\n", | ||
699 | RREG32(mmUVD_RBC_IB_SIZE)); | ||
700 | dev_info(adev->dev, " UVD_RBC_RB_RPTR=0x%08X\n", | ||
701 | RREG32(mmUVD_RBC_RB_RPTR)); | ||
702 | dev_info(adev->dev, " UVD_RBC_RB_WPTR=0x%08X\n", | ||
703 | RREG32(mmUVD_RBC_RB_WPTR)); | ||
704 | dev_info(adev->dev, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n", | ||
705 | RREG32(mmUVD_RBC_RB_WPTR_CNTL)); | ||
706 | dev_info(adev->dev, " UVD_RBC_RB_CNTL=0x%08X\n", | ||
707 | RREG32(mmUVD_RBC_RB_CNTL)); | ||
708 | dev_info(adev->dev, " UVD_STATUS=0x%08X\n", | ||
709 | RREG32(mmUVD_STATUS)); | ||
710 | dev_info(adev->dev, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n", | ||
711 | RREG32(mmUVD_SEMA_TIMEOUT_STATUS)); | ||
712 | dev_info(adev->dev, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
713 | RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL)); | ||
714 | dev_info(adev->dev, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n", | ||
715 | RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL)); | ||
716 | dev_info(adev->dev, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n", | ||
717 | RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL)); | ||
718 | dev_info(adev->dev, " UVD_CONTEXT_ID=0x%08X\n", | ||
719 | RREG32(mmUVD_CONTEXT_ID)); | ||
720 | } | ||
721 | |||
722 | static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, | ||
723 | struct amdgpu_irq_src *source, | ||
724 | unsigned type, | ||
725 | enum amdgpu_interrupt_state state) | ||
726 | { | ||
727 | // TODO | ||
728 | return 0; | ||
729 | } | ||
730 | |||
731 | static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, | ||
732 | struct amdgpu_irq_src *source, | ||
733 | struct amdgpu_iv_entry *entry) | ||
734 | { | ||
735 | DRM_DEBUG("IH: UVD TRAP\n"); | ||
736 | amdgpu_fence_process(&adev->uvd.ring); | ||
737 | return 0; | ||
738 | } | ||
739 | |||
740 | static int uvd_v6_0_set_clockgating_state(struct amdgpu_device *adev, | ||
741 | enum amdgpu_clockgating_state state) | ||
742 | { | ||
743 | //TODO | ||
744 | |||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | static int uvd_v6_0_set_powergating_state(struct amdgpu_device *adev, | ||
749 | enum amdgpu_powergating_state state) | ||
750 | { | ||
751 | /* This doesn't actually powergate the UVD block. | ||
752 | * That's done in the dpm code via the SMC. This | ||
753 | * just re-inits the block as necessary. The actual | ||
754 | * gating still happens in the dpm code. We should | ||
755 | * revisit this when there is a cleaner line between | ||
756 | * the smc and the hw blocks | ||
757 | */ | ||
758 | if (state == AMDGPU_PG_STATE_GATE) { | ||
759 | uvd_v6_0_stop(adev); | ||
760 | return 0; | ||
761 | } else { | ||
762 | return uvd_v6_0_start(adev); | ||
763 | } | ||
764 | } | ||
765 | |||
766 | const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs = { | ||
767 | .early_init = uvd_v6_0_early_init, | ||
768 | .late_init = NULL, | ||
769 | .sw_init = uvd_v6_0_sw_init, | ||
770 | .sw_fini = uvd_v6_0_sw_fini, | ||
771 | .hw_init = uvd_v6_0_hw_init, | ||
772 | .hw_fini = uvd_v6_0_hw_fini, | ||
773 | .suspend = uvd_v6_0_suspend, | ||
774 | .resume = uvd_v6_0_resume, | ||
775 | .is_idle = uvd_v6_0_is_idle, | ||
776 | .wait_for_idle = uvd_v6_0_wait_for_idle, | ||
777 | .soft_reset = uvd_v6_0_soft_reset, | ||
778 | .print_status = uvd_v6_0_print_status, | ||
779 | .set_clockgating_state = uvd_v6_0_set_clockgating_state, | ||
780 | .set_powergating_state = uvd_v6_0_set_powergating_state, | ||
781 | }; | ||
782 | |||
783 | static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { | ||
784 | .get_rptr = uvd_v6_0_ring_get_rptr, | ||
785 | .get_wptr = uvd_v6_0_ring_get_wptr, | ||
786 | .set_wptr = uvd_v6_0_ring_set_wptr, | ||
787 | .parse_cs = amdgpu_uvd_ring_parse_cs, | ||
788 | .emit_ib = uvd_v6_0_ring_emit_ib, | ||
789 | .emit_fence = uvd_v6_0_ring_emit_fence, | ||
790 | .emit_semaphore = uvd_v6_0_ring_emit_semaphore, | ||
791 | .test_ring = uvd_v6_0_ring_test_ring, | ||
792 | .test_ib = uvd_v6_0_ring_test_ib, | ||
793 | .is_lockup = amdgpu_ring_test_lockup, | ||
794 | }; | ||
795 | |||
796 | static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) | ||
797 | { | ||
798 | adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; | ||
799 | } | ||
800 | |||
801 | static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { | ||
802 | .set = uvd_v6_0_set_interrupt_state, | ||
803 | .process = uvd_v6_0_process_interrupt, | ||
804 | }; | ||
805 | |||
806 | static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) | ||
807 | { | ||
808 | adev->uvd.irq.num_types = 1; | ||
809 | adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; | ||
810 | } | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h new file mode 100644 index 000000000000..bc21afc8abac --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __UVD_V6_0_H__ | ||
25 | #define __UVD_V6_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs uvd_v6_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c new file mode 100644 index 000000000000..384c45e74053 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -0,0 +1,521 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | * Authors: Christian König <christian.koenig@amd.com> | ||
26 | */ | ||
27 | |||
28 | #include <linux/firmware.h> | ||
29 | #include <drm/drmP.h> | ||
30 | #include "amdgpu.h" | ||
31 | #include "amdgpu_vce.h" | ||
32 | #include "vid.h" | ||
33 | #include "vce/vce_3_0_d.h" | ||
34 | #include "vce/vce_3_0_sh_mask.h" | ||
35 | #include "oss/oss_2_0_d.h" | ||
36 | #include "oss/oss_2_0_sh_mask.h" | ||
37 | |||
38 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev); | ||
39 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); | ||
40 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); | ||
41 | |||
42 | /** | ||
43 | * vce_v3_0_ring_get_rptr - get read pointer | ||
44 | * | ||
45 | * @ring: amdgpu_ring pointer | ||
46 | * | ||
47 | * Returns the current hardware read pointer | ||
48 | */ | ||
49 | static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | ||
50 | { | ||
51 | struct amdgpu_device *adev = ring->adev; | ||
52 | |||
53 | if (ring == &adev->vce.ring[0]) | ||
54 | return RREG32(mmVCE_RB_RPTR); | ||
55 | else | ||
56 | return RREG32(mmVCE_RB_RPTR2); | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * vce_v3_0_ring_get_wptr - get write pointer | ||
61 | * | ||
62 | * @ring: amdgpu_ring pointer | ||
63 | * | ||
64 | * Returns the current hardware write pointer | ||
65 | */ | ||
66 | static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | ||
67 | { | ||
68 | struct amdgpu_device *adev = ring->adev; | ||
69 | |||
70 | if (ring == &adev->vce.ring[0]) | ||
71 | return RREG32(mmVCE_RB_WPTR); | ||
72 | else | ||
73 | return RREG32(mmVCE_RB_WPTR2); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * vce_v3_0_ring_set_wptr - set write pointer | ||
78 | * | ||
79 | * @ring: amdgpu_ring pointer | ||
80 | * | ||
81 | * Commits the write pointer to the hardware | ||
82 | */ | ||
83 | static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | ||
84 | { | ||
85 | struct amdgpu_device *adev = ring->adev; | ||
86 | |||
87 | if (ring == &adev->vce.ring[0]) | ||
88 | WREG32(mmVCE_RB_WPTR, ring->wptr); | ||
89 | else | ||
90 | WREG32(mmVCE_RB_WPTR2, ring->wptr); | ||
91 | } | ||
92 | |||
93 | /** | ||
94 | * vce_v3_0_start - start VCE block | ||
95 | * | ||
96 | * @adev: amdgpu_device pointer | ||
97 | * | ||
98 | * Setup and start the VCE block | ||
99 | */ | ||
100 | static int vce_v3_0_start(struct amdgpu_device *adev) | ||
101 | { | ||
102 | struct amdgpu_ring *ring; | ||
103 | int i, j, r; | ||
104 | |||
105 | vce_v3_0_mc_resume(adev); | ||
106 | |||
107 | /* set BUSY flag */ | ||
108 | WREG32_P(mmVCE_STATUS, 1, ~1); | ||
109 | |||
110 | ring = &adev->vce.ring[0]; | ||
111 | WREG32(mmVCE_RB_RPTR, ring->wptr); | ||
112 | WREG32(mmVCE_RB_WPTR, ring->wptr); | ||
113 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
114 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
115 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
116 | |||
117 | ring = &adev->vce.ring[1]; | ||
118 | WREG32(mmVCE_RB_RPTR2, ring->wptr); | ||
119 | WREG32(mmVCE_RB_WPTR2, ring->wptr); | ||
120 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
121 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
122 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
123 | |||
124 | WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, ~VCE_VCPU_CNTL__CLK_EN_MASK); | ||
125 | |||
126 | WREG32_P(mmVCE_SOFT_RESET, | ||
127 | VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
128 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
129 | |||
130 | mdelay(100); | ||
131 | |||
132 | WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
133 | |||
134 | for (i = 0; i < 10; ++i) { | ||
135 | uint32_t status; | ||
136 | for (j = 0; j < 100; ++j) { | ||
137 | status = RREG32(mmVCE_STATUS); | ||
138 | if (status & 2) | ||
139 | break; | ||
140 | mdelay(10); | ||
141 | } | ||
142 | r = 0; | ||
143 | if (status & 2) | ||
144 | break; | ||
145 | |||
146 | DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n"); | ||
147 | WREG32_P(mmVCE_SOFT_RESET, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK, | ||
148 | ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
149 | mdelay(10); | ||
150 | WREG32_P(mmVCE_SOFT_RESET, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); | ||
151 | mdelay(10); | ||
152 | r = -1; | ||
153 | } | ||
154 | |||
155 | /* clear BUSY flag */ | ||
156 | WREG32_P(mmVCE_STATUS, 0, ~1); | ||
157 | |||
158 | if (r) { | ||
159 | DRM_ERROR("VCE not responding, giving up!!!\n"); | ||
160 | return r; | ||
161 | } | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static int vce_v3_0_early_init(struct amdgpu_device *adev) | ||
167 | { | ||
168 | vce_v3_0_set_ring_funcs(adev); | ||
169 | vce_v3_0_set_irq_funcs(adev); | ||
170 | |||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int vce_v3_0_sw_init(struct amdgpu_device *adev) | ||
175 | { | ||
176 | struct amdgpu_ring *ring; | ||
177 | int r; | ||
178 | |||
179 | /* VCE */ | ||
180 | r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq); | ||
181 | if (r) | ||
182 | return r; | ||
183 | |||
184 | r = amdgpu_vce_sw_init(adev); | ||
185 | if (r) | ||
186 | return r; | ||
187 | |||
188 | r = amdgpu_vce_resume(adev); | ||
189 | if (r) | ||
190 | return r; | ||
191 | |||
192 | ring = &adev->vce.ring[0]; | ||
193 | sprintf(ring->name, "vce0"); | ||
194 | r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, | ||
195 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | ||
196 | if (r) | ||
197 | return r; | ||
198 | |||
199 | ring = &adev->vce.ring[1]; | ||
200 | sprintf(ring->name, "vce1"); | ||
201 | r = amdgpu_ring_init(adev, ring, 4096, VCE_CMD_NO_OP, 0xf, | ||
202 | &adev->vce.irq, 0, AMDGPU_RING_TYPE_VCE); | ||
203 | if (r) | ||
204 | return r; | ||
205 | |||
206 | return r; | ||
207 | } | ||
208 | |||
209 | static int vce_v3_0_sw_fini(struct amdgpu_device *adev) | ||
210 | { | ||
211 | int r; | ||
212 | |||
213 | r = amdgpu_vce_suspend(adev); | ||
214 | if (r) | ||
215 | return r; | ||
216 | |||
217 | r = amdgpu_vce_sw_fini(adev); | ||
218 | if (r) | ||
219 | return r; | ||
220 | |||
221 | return r; | ||
222 | } | ||
223 | |||
224 | static int vce_v3_0_hw_init(struct amdgpu_device *adev) | ||
225 | { | ||
226 | struct amdgpu_ring *ring; | ||
227 | int r; | ||
228 | |||
229 | r = vce_v3_0_start(adev); | ||
230 | if (r) | ||
231 | return r; | ||
232 | |||
233 | ring = &adev->vce.ring[0]; | ||
234 | ring->ready = true; | ||
235 | r = amdgpu_ring_test_ring(ring); | ||
236 | if (r) { | ||
237 | ring->ready = false; | ||
238 | return r; | ||
239 | } | ||
240 | |||
241 | ring = &adev->vce.ring[1]; | ||
242 | ring->ready = true; | ||
243 | r = amdgpu_ring_test_ring(ring); | ||
244 | if (r) { | ||
245 | ring->ready = false; | ||
246 | return r; | ||
247 | } | ||
248 | |||
249 | DRM_INFO("VCE initialized successfully.\n"); | ||
250 | |||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | static int vce_v3_0_hw_fini(struct amdgpu_device *adev) | ||
255 | { | ||
256 | // TODO | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int vce_v3_0_suspend(struct amdgpu_device *adev) | ||
261 | { | ||
262 | int r; | ||
263 | |||
264 | r = vce_v3_0_hw_fini(adev); | ||
265 | if (r) | ||
266 | return r; | ||
267 | |||
268 | r = amdgpu_vce_suspend(adev); | ||
269 | if (r) | ||
270 | return r; | ||
271 | |||
272 | return r; | ||
273 | } | ||
274 | |||
275 | static int vce_v3_0_resume(struct amdgpu_device *adev) | ||
276 | { | ||
277 | int r; | ||
278 | |||
279 | r = amdgpu_vce_resume(adev); | ||
280 | if (r) | ||
281 | return r; | ||
282 | |||
283 | r = vce_v3_0_hw_init(adev); | ||
284 | if (r) | ||
285 | return r; | ||
286 | |||
287 | return r; | ||
288 | } | ||
289 | |||
290 | static void vce_v3_0_mc_resume(struct amdgpu_device *adev) | ||
291 | { | ||
292 | uint32_t offset, size; | ||
293 | |||
294 | WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16)); | ||
295 | WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000); | ||
296 | WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F); | ||
297 | WREG32(mmVCE_CLOCK_GATING_B, 0xf7); | ||
298 | |||
299 | WREG32(mmVCE_LMI_CTRL, 0x00398000); | ||
300 | WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1); | ||
301 | WREG32(mmVCE_LMI_SWAP_CNTL, 0); | ||
302 | WREG32(mmVCE_LMI_SWAP_CNTL1, 0); | ||
303 | WREG32(mmVCE_LMI_VM_CTRL, 0); | ||
304 | |||
305 | WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8)); | ||
306 | offset = AMDGPU_VCE_FIRMWARE_OFFSET; | ||
307 | size = AMDGPU_GPU_PAGE_ALIGN(adev->vce.fw->size); | ||
308 | WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff); | ||
309 | WREG32(mmVCE_VCPU_CACHE_SIZE0, size); | ||
310 | |||
311 | offset += size; | ||
312 | size = AMDGPU_VCE_STACK_SIZE; | ||
313 | WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff); | ||
314 | WREG32(mmVCE_VCPU_CACHE_SIZE1, size); | ||
315 | |||
316 | offset += size; | ||
317 | size = AMDGPU_VCE_HEAP_SIZE; | ||
318 | WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff); | ||
319 | WREG32(mmVCE_VCPU_CACHE_SIZE2, size); | ||
320 | |||
321 | WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100); | ||
322 | |||
323 | WREG32_P(mmVCE_SYS_INT_EN, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, | ||
324 | ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | ||
325 | } | ||
326 | |||
327 | static bool vce_v3_0_is_idle(struct amdgpu_device *adev) | ||
328 | { | ||
329 | return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK); | ||
330 | } | ||
331 | |||
332 | static int vce_v3_0_wait_for_idle(struct amdgpu_device *adev) | ||
333 | { | ||
334 | unsigned i; | ||
335 | |||
336 | for (i = 0; i < adev->usec_timeout; i++) { | ||
337 | if (!(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK)) | ||
338 | return 0; | ||
339 | } | ||
340 | return -ETIMEDOUT; | ||
341 | } | ||
342 | |||
343 | static int vce_v3_0_soft_reset(struct amdgpu_device *adev) | ||
344 | { | ||
345 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK, | ||
346 | ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK); | ||
347 | mdelay(5); | ||
348 | |||
349 | return vce_v3_0_start(adev); | ||
350 | } | ||
351 | |||
352 | static void vce_v3_0_print_status(struct amdgpu_device *adev) | ||
353 | { | ||
354 | dev_info(adev->dev, "VCE 3.0 registers\n"); | ||
355 | dev_info(adev->dev, " VCE_STATUS=0x%08X\n", | ||
356 | RREG32(mmVCE_STATUS)); | ||
357 | dev_info(adev->dev, " VCE_VCPU_CNTL=0x%08X\n", | ||
358 | RREG32(mmVCE_VCPU_CNTL)); | ||
359 | dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n", | ||
360 | RREG32(mmVCE_VCPU_CACHE_OFFSET0)); | ||
361 | dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE0=0x%08X\n", | ||
362 | RREG32(mmVCE_VCPU_CACHE_SIZE0)); | ||
363 | dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n", | ||
364 | RREG32(mmVCE_VCPU_CACHE_OFFSET1)); | ||
365 | dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE1=0x%08X\n", | ||
366 | RREG32(mmVCE_VCPU_CACHE_SIZE1)); | ||
367 | dev_info(adev->dev, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n", | ||
368 | RREG32(mmVCE_VCPU_CACHE_OFFSET2)); | ||
369 | dev_info(adev->dev, " VCE_VCPU_CACHE_SIZE2=0x%08X\n", | ||
370 | RREG32(mmVCE_VCPU_CACHE_SIZE2)); | ||
371 | dev_info(adev->dev, " VCE_SOFT_RESET=0x%08X\n", | ||
372 | RREG32(mmVCE_SOFT_RESET)); | ||
373 | dev_info(adev->dev, " VCE_RB_BASE_LO2=0x%08X\n", | ||
374 | RREG32(mmVCE_RB_BASE_LO2)); | ||
375 | dev_info(adev->dev, " VCE_RB_BASE_HI2=0x%08X\n", | ||
376 | RREG32(mmVCE_RB_BASE_HI2)); | ||
377 | dev_info(adev->dev, " VCE_RB_SIZE2=0x%08X\n", | ||
378 | RREG32(mmVCE_RB_SIZE2)); | ||
379 | dev_info(adev->dev, " VCE_RB_RPTR2=0x%08X\n", | ||
380 | RREG32(mmVCE_RB_RPTR2)); | ||
381 | dev_info(adev->dev, " VCE_RB_WPTR2=0x%08X\n", | ||
382 | RREG32(mmVCE_RB_WPTR2)); | ||
383 | dev_info(adev->dev, " VCE_RB_BASE_LO=0x%08X\n", | ||
384 | RREG32(mmVCE_RB_BASE_LO)); | ||
385 | dev_info(adev->dev, " VCE_RB_BASE_HI=0x%08X\n", | ||
386 | RREG32(mmVCE_RB_BASE_HI)); | ||
387 | dev_info(adev->dev, " VCE_RB_SIZE=0x%08X\n", | ||
388 | RREG32(mmVCE_RB_SIZE)); | ||
389 | dev_info(adev->dev, " VCE_RB_RPTR=0x%08X\n", | ||
390 | RREG32(mmVCE_RB_RPTR)); | ||
391 | dev_info(adev->dev, " VCE_RB_WPTR=0x%08X\n", | ||
392 | RREG32(mmVCE_RB_WPTR)); | ||
393 | dev_info(adev->dev, " VCE_CLOCK_GATING_A=0x%08X\n", | ||
394 | RREG32(mmVCE_CLOCK_GATING_A)); | ||
395 | dev_info(adev->dev, " VCE_CLOCK_GATING_B=0x%08X\n", | ||
396 | RREG32(mmVCE_CLOCK_GATING_B)); | ||
397 | dev_info(adev->dev, " VCE_UENC_CLOCK_GATING=0x%08X\n", | ||
398 | RREG32(mmVCE_UENC_CLOCK_GATING)); | ||
399 | dev_info(adev->dev, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n", | ||
400 | RREG32(mmVCE_UENC_REG_CLOCK_GATING)); | ||
401 | dev_info(adev->dev, " VCE_SYS_INT_EN=0x%08X\n", | ||
402 | RREG32(mmVCE_SYS_INT_EN)); | ||
403 | dev_info(adev->dev, " VCE_LMI_CTRL2=0x%08X\n", | ||
404 | RREG32(mmVCE_LMI_CTRL2)); | ||
405 | dev_info(adev->dev, " VCE_LMI_CTRL=0x%08X\n", | ||
406 | RREG32(mmVCE_LMI_CTRL)); | ||
407 | dev_info(adev->dev, " VCE_LMI_VM_CTRL=0x%08X\n", | ||
408 | RREG32(mmVCE_LMI_VM_CTRL)); | ||
409 | dev_info(adev->dev, " VCE_LMI_SWAP_CNTL=0x%08X\n", | ||
410 | RREG32(mmVCE_LMI_SWAP_CNTL)); | ||
411 | dev_info(adev->dev, " VCE_LMI_SWAP_CNTL1=0x%08X\n", | ||
412 | RREG32(mmVCE_LMI_SWAP_CNTL1)); | ||
413 | dev_info(adev->dev, " VCE_LMI_CACHE_CTRL=0x%08X\n", | ||
414 | RREG32(mmVCE_LMI_CACHE_CTRL)); | ||
415 | } | ||
416 | |||
417 | static int vce_v3_0_set_interrupt_state(struct amdgpu_device *adev, | ||
418 | struct amdgpu_irq_src *source, | ||
419 | unsigned type, | ||
420 | enum amdgpu_interrupt_state state) | ||
421 | { | ||
422 | uint32_t val = 0; | ||
423 | |||
424 | if (state == AMDGPU_IRQ_STATE_ENABLE) | ||
425 | val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; | ||
426 | |||
427 | WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, | ||
432 | struct amdgpu_irq_src *source, | ||
433 | struct amdgpu_iv_entry *entry) | ||
434 | { | ||
435 | DRM_DEBUG("IH: VCE\n"); | ||
436 | switch (entry->src_data) { | ||
437 | case 0: | ||
438 | amdgpu_fence_process(&adev->vce.ring[0]); | ||
439 | break; | ||
440 | case 1: | ||
441 | amdgpu_fence_process(&adev->vce.ring[1]); | ||
442 | break; | ||
443 | default: | ||
444 | DRM_ERROR("Unhandled interrupt: %d %d\n", | ||
445 | entry->src_id, entry->src_data); | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int vce_v3_0_set_clockgating_state(struct amdgpu_device *adev, | ||
453 | enum amdgpu_clockgating_state state) | ||
454 | { | ||
455 | //TODO | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static int vce_v3_0_set_powergating_state(struct amdgpu_device *adev, | ||
460 | enum amdgpu_powergating_state state) | ||
461 | { | ||
462 | /* This doesn't actually powergate the VCE block. | ||
463 | * That's done in the dpm code via the SMC. This | ||
464 | * just re-inits the block as necessary. The actual | ||
465 | * gating still happens in the dpm code. We should | ||
466 | * revisit this when there is a cleaner line between | ||
467 | * the smc and the hw blocks | ||
468 | */ | ||
469 | if (state == AMDGPU_PG_STATE_GATE) | ||
470 | /* XXX do we need a vce_v3_0_stop()? */ | ||
471 | return 0; | ||
472 | else | ||
473 | return vce_v3_0_start(adev); | ||
474 | } | ||
475 | |||
476 | const struct amdgpu_ip_funcs vce_v3_0_ip_funcs = { | ||
477 | .early_init = vce_v3_0_early_init, | ||
478 | .late_init = NULL, | ||
479 | .sw_init = vce_v3_0_sw_init, | ||
480 | .sw_fini = vce_v3_0_sw_fini, | ||
481 | .hw_init = vce_v3_0_hw_init, | ||
482 | .hw_fini = vce_v3_0_hw_fini, | ||
483 | .suspend = vce_v3_0_suspend, | ||
484 | .resume = vce_v3_0_resume, | ||
485 | .is_idle = vce_v3_0_is_idle, | ||
486 | .wait_for_idle = vce_v3_0_wait_for_idle, | ||
487 | .soft_reset = vce_v3_0_soft_reset, | ||
488 | .print_status = vce_v3_0_print_status, | ||
489 | .set_clockgating_state = vce_v3_0_set_clockgating_state, | ||
490 | .set_powergating_state = vce_v3_0_set_powergating_state, | ||
491 | }; | ||
492 | |||
493 | static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { | ||
494 | .get_rptr = vce_v3_0_ring_get_rptr, | ||
495 | .get_wptr = vce_v3_0_ring_get_wptr, | ||
496 | .set_wptr = vce_v3_0_ring_set_wptr, | ||
497 | .parse_cs = amdgpu_vce_ring_parse_cs, | ||
498 | .emit_ib = amdgpu_vce_ring_emit_ib, | ||
499 | .emit_fence = amdgpu_vce_ring_emit_fence, | ||
500 | .emit_semaphore = amdgpu_vce_ring_emit_semaphore, | ||
501 | .test_ring = amdgpu_vce_ring_test_ring, | ||
502 | .test_ib = amdgpu_vce_ring_test_ib, | ||
503 | .is_lockup = amdgpu_ring_test_lockup, | ||
504 | }; | ||
505 | |||
506 | static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) | ||
507 | { | ||
508 | adev->vce.ring[0].funcs = &vce_v3_0_ring_funcs; | ||
509 | adev->vce.ring[1].funcs = &vce_v3_0_ring_funcs; | ||
510 | } | ||
511 | |||
512 | static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs = { | ||
513 | .set = vce_v3_0_set_interrupt_state, | ||
514 | .process = vce_v3_0_process_interrupt, | ||
515 | }; | ||
516 | |||
517 | static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev) | ||
518 | { | ||
519 | adev->vce.irq.num_types = 1; | ||
520 | adev->vce.irq.funcs = &vce_v3_0_irq_funcs; | ||
521 | }; | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h new file mode 100644 index 000000000000..f3c2ba92a1f1 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __VCE_V3_0_H__ | ||
25 | #define __VCE_V3_0_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs vce_v3_0_ip_funcs; | ||
28 | |||
29 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c new file mode 100644 index 000000000000..20a159803983 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
@@ -0,0 +1,1373 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/module.h> | ||
26 | #include "drmP.h" | ||
27 | #include "amdgpu.h" | ||
28 | #include "amdgpu_atombios.h" | ||
29 | #include "amdgpu_ih.h" | ||
30 | #include "amdgpu_uvd.h" | ||
31 | #include "amdgpu_vce.h" | ||
32 | #include "amdgpu_ucode.h" | ||
33 | #include "atom.h" | ||
34 | |||
35 | #include "gmc/gmc_8_1_d.h" | ||
36 | #include "gmc/gmc_8_1_sh_mask.h" | ||
37 | |||
38 | #include "oss/oss_3_0_d.h" | ||
39 | #include "oss/oss_3_0_sh_mask.h" | ||
40 | |||
41 | #include "bif/bif_5_0_d.h" | ||
42 | #include "bif/bif_5_0_sh_mask.h" | ||
43 | |||
44 | #include "gca/gfx_8_0_d.h" | ||
45 | #include "gca/gfx_8_0_sh_mask.h" | ||
46 | |||
47 | #include "smu/smu_7_1_1_d.h" | ||
48 | #include "smu/smu_7_1_1_sh_mask.h" | ||
49 | |||
50 | #include "uvd/uvd_5_0_d.h" | ||
51 | #include "uvd/uvd_5_0_sh_mask.h" | ||
52 | |||
53 | #include "vce/vce_3_0_d.h" | ||
54 | #include "vce/vce_3_0_sh_mask.h" | ||
55 | |||
56 | #include "dce/dce_10_0_d.h" | ||
57 | #include "dce/dce_10_0_sh_mask.h" | ||
58 | |||
59 | #include "vid.h" | ||
60 | #include "vi.h" | ||
61 | #include "vi_dpm.h" | ||
62 | #include "gmc_v8_0.h" | ||
63 | #include "gfx_v8_0.h" | ||
64 | #include "sdma_v2_4.h" | ||
65 | #include "sdma_v3_0.h" | ||
66 | #include "dce_v10_0.h" | ||
67 | #include "dce_v11_0.h" | ||
68 | #include "iceland_ih.h" | ||
69 | #include "tonga_ih.h" | ||
70 | #include "cz_ih.h" | ||
71 | #include "uvd_v5_0.h" | ||
72 | #include "uvd_v6_0.h" | ||
73 | #include "vce_v3_0.h" | ||
74 | |||
75 | /* | ||
76 | * Indirect registers accessor | ||
77 | */ | ||
78 | static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | u32 r; | ||
82 | |||
83 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | ||
84 | WREG32(mmPCIE_INDEX, reg); | ||
85 | (void)RREG32(mmPCIE_INDEX); | ||
86 | r = RREG32(mmPCIE_DATA); | ||
87 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | ||
88 | return r; | ||
89 | } | ||
90 | |||
91 | static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | ||
92 | { | ||
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&adev->pcie_idx_lock, flags); | ||
96 | WREG32(mmPCIE_INDEX, reg); | ||
97 | (void)RREG32(mmPCIE_INDEX); | ||
98 | WREG32(mmPCIE_DATA, v); | ||
99 | (void)RREG32(mmPCIE_DATA); | ||
100 | spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); | ||
101 | } | ||
102 | |||
103 | static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) | ||
104 | { | ||
105 | unsigned long flags; | ||
106 | u32 r; | ||
107 | |||
108 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
109 | WREG32(mmSMC_IND_INDEX_0, (reg)); | ||
110 | r = RREG32(mmSMC_IND_DATA_0); | ||
111 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
112 | return r; | ||
113 | } | ||
114 | |||
115 | static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | ||
116 | { | ||
117 | unsigned long flags; | ||
118 | |||
119 | spin_lock_irqsave(&adev->smc_idx_lock, flags); | ||
120 | WREG32(mmSMC_IND_INDEX_0, (reg)); | ||
121 | WREG32(mmSMC_IND_DATA_0, (v)); | ||
122 | spin_unlock_irqrestore(&adev->smc_idx_lock, flags); | ||
123 | } | ||
124 | |||
125 | static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) | ||
126 | { | ||
127 | unsigned long flags; | ||
128 | u32 r; | ||
129 | |||
130 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | ||
131 | WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); | ||
132 | r = RREG32(mmUVD_CTX_DATA); | ||
133 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | ||
134 | return r; | ||
135 | } | ||
136 | |||
137 | static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | |||
141 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); | ||
142 | WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); | ||
143 | WREG32(mmUVD_CTX_DATA, (v)); | ||
144 | spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); | ||
145 | } | ||
146 | |||
147 | static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | u32 r; | ||
151 | |||
152 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | ||
153 | WREG32(mmDIDT_IND_INDEX, (reg)); | ||
154 | r = RREG32(mmDIDT_IND_DATA); | ||
155 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | ||
156 | return r; | ||
157 | } | ||
158 | |||
159 | static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) | ||
160 | { | ||
161 | unsigned long flags; | ||
162 | |||
163 | spin_lock_irqsave(&adev->didt_idx_lock, flags); | ||
164 | WREG32(mmDIDT_IND_INDEX, (reg)); | ||
165 | WREG32(mmDIDT_IND_DATA, (v)); | ||
166 | spin_unlock_irqrestore(&adev->didt_idx_lock, flags); | ||
167 | } | ||
168 | |||
169 | static const u32 tonga_mgcg_cgcg_init[] = | ||
170 | { | ||
171 | mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, | ||
172 | mmPCIE_INDEX, 0xffffffff, 0x0140001c, | ||
173 | mmPCIE_DATA, 0x000f0000, 0x00000000, | ||
174 | mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, | ||
175 | mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, | ||
176 | mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, | ||
177 | mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, | ||
178 | mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, | ||
179 | mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, | ||
180 | }; | ||
181 | |||
182 | static const u32 iceland_mgcg_cgcg_init[] = | ||
183 | { | ||
184 | mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, | ||
185 | mmPCIE_DATA, 0x000f0000, 0x00000000, | ||
186 | mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, | ||
187 | mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, | ||
188 | mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, | ||
189 | }; | ||
190 | |||
191 | static const u32 cz_mgcg_cgcg_init[] = | ||
192 | { | ||
193 | mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, | ||
194 | mmPCIE_INDEX, 0xffffffff, 0x0140001c, | ||
195 | mmPCIE_DATA, 0x000f0000, 0x00000000, | ||
196 | mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, | ||
197 | mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, | ||
198 | mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, | ||
199 | mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, | ||
200 | }; | ||
201 | |||
202 | static void vi_init_golden_registers(struct amdgpu_device *adev) | ||
203 | { | ||
204 | /* Some of the registers might be dependent on GRBM_GFX_INDEX */ | ||
205 | mutex_lock(&adev->grbm_idx_mutex); | ||
206 | |||
207 | switch (adev->asic_type) { | ||
208 | case CHIP_TOPAZ: | ||
209 | amdgpu_program_register_sequence(adev, | ||
210 | iceland_mgcg_cgcg_init, | ||
211 | (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); | ||
212 | break; | ||
213 | case CHIP_TONGA: | ||
214 | amdgpu_program_register_sequence(adev, | ||
215 | tonga_mgcg_cgcg_init, | ||
216 | (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); | ||
217 | break; | ||
218 | case CHIP_CARRIZO: | ||
219 | amdgpu_program_register_sequence(adev, | ||
220 | cz_mgcg_cgcg_init, | ||
221 | (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); | ||
222 | break; | ||
223 | default: | ||
224 | break; | ||
225 | } | ||
226 | mutex_unlock(&adev->grbm_idx_mutex); | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * vi_get_xclk - get the xclk | ||
231 | * | ||
232 | * @adev: amdgpu_device pointer | ||
233 | * | ||
234 | * Returns the reference clock used by the gfx engine | ||
235 | * (VI). | ||
236 | */ | ||
237 | static u32 vi_get_xclk(struct amdgpu_device *adev) | ||
238 | { | ||
239 | u32 reference_clock = adev->clock.spll.reference_freq; | ||
240 | u32 tmp; | ||
241 | |||
242 | if (adev->flags & AMDGPU_IS_APU) | ||
243 | return reference_clock; | ||
244 | |||
245 | tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); | ||
246 | if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) | ||
247 | return 1000; | ||
248 | |||
249 | tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); | ||
250 | if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) | ||
251 | return reference_clock / 4; | ||
252 | |||
253 | return reference_clock; | ||
254 | } | ||
255 | |||
256 | /** | ||
257 | * vi_srbm_select - select specific register instances | ||
258 | * | ||
259 | * @adev: amdgpu_device pointer | ||
260 | * @me: selected ME (micro engine) | ||
261 | * @pipe: pipe | ||
262 | * @queue: queue | ||
263 | * @vmid: VMID | ||
264 | * | ||
265 | * Switches the currently active registers instances. Some | ||
266 | * registers are instanced per VMID, others are instanced per | ||
267 | * me/pipe/queue combination. | ||
268 | */ | ||
269 | void vi_srbm_select(struct amdgpu_device *adev, | ||
270 | u32 me, u32 pipe, u32 queue, u32 vmid) | ||
271 | { | ||
272 | u32 srbm_gfx_cntl = 0; | ||
273 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); | ||
274 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); | ||
275 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); | ||
276 | srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); | ||
277 | WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); | ||
278 | } | ||
279 | |||
280 | static void vi_vga_set_state(struct amdgpu_device *adev, bool state) | ||
281 | { | ||
282 | /* todo */ | ||
283 | } | ||
284 | |||
285 | static bool vi_read_disabled_bios(struct amdgpu_device *adev) | ||
286 | { | ||
287 | u32 bus_cntl; | ||
288 | u32 d1vga_control = 0; | ||
289 | u32 d2vga_control = 0; | ||
290 | u32 vga_render_control = 0; | ||
291 | u32 rom_cntl; | ||
292 | bool r; | ||
293 | |||
294 | bus_cntl = RREG32(mmBUS_CNTL); | ||
295 | if (adev->mode_info.num_crtc) { | ||
296 | d1vga_control = RREG32(mmD1VGA_CONTROL); | ||
297 | d2vga_control = RREG32(mmD2VGA_CONTROL); | ||
298 | vga_render_control = RREG32(mmVGA_RENDER_CONTROL); | ||
299 | } | ||
300 | rom_cntl = RREG32_SMC(ixROM_CNTL); | ||
301 | |||
302 | /* enable the rom */ | ||
303 | WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); | ||
304 | if (adev->mode_info.num_crtc) { | ||
305 | /* Disable VGA mode */ | ||
306 | WREG32(mmD1VGA_CONTROL, | ||
307 | (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | | ||
308 | D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); | ||
309 | WREG32(mmD2VGA_CONTROL, | ||
310 | (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | | ||
311 | D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); | ||
312 | WREG32(mmVGA_RENDER_CONTROL, | ||
313 | (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); | ||
314 | } | ||
315 | WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); | ||
316 | |||
317 | r = amdgpu_read_bios(adev); | ||
318 | |||
319 | /* restore regs */ | ||
320 | WREG32(mmBUS_CNTL, bus_cntl); | ||
321 | if (adev->mode_info.num_crtc) { | ||
322 | WREG32(mmD1VGA_CONTROL, d1vga_control); | ||
323 | WREG32(mmD2VGA_CONTROL, d2vga_control); | ||
324 | WREG32(mmVGA_RENDER_CONTROL, vga_render_control); | ||
325 | } | ||
326 | WREG32_SMC(ixROM_CNTL, rom_cntl); | ||
327 | return r; | ||
328 | } | ||
329 | static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { | ||
330 | {mmGB_MACROTILE_MODE7, true}, | ||
331 | }; | ||
332 | |||
333 | static struct amdgpu_allowed_register_entry cz_allowed_read_registers[] = { | ||
334 | {mmGB_TILE_MODE7, true}, | ||
335 | {mmGB_TILE_MODE12, true}, | ||
336 | {mmGB_TILE_MODE17, true}, | ||
337 | {mmGB_TILE_MODE23, true}, | ||
338 | {mmGB_MACROTILE_MODE7, true}, | ||
339 | }; | ||
340 | |||
341 | static struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { | ||
342 | {mmGRBM_STATUS, false}, | ||
343 | {mmGB_ADDR_CONFIG, false}, | ||
344 | {mmMC_ARB_RAMCFG, false}, | ||
345 | {mmGB_TILE_MODE0, false}, | ||
346 | {mmGB_TILE_MODE1, false}, | ||
347 | {mmGB_TILE_MODE2, false}, | ||
348 | {mmGB_TILE_MODE3, false}, | ||
349 | {mmGB_TILE_MODE4, false}, | ||
350 | {mmGB_TILE_MODE5, false}, | ||
351 | {mmGB_TILE_MODE6, false}, | ||
352 | {mmGB_TILE_MODE7, false}, | ||
353 | {mmGB_TILE_MODE8, false}, | ||
354 | {mmGB_TILE_MODE9, false}, | ||
355 | {mmGB_TILE_MODE10, false}, | ||
356 | {mmGB_TILE_MODE11, false}, | ||
357 | {mmGB_TILE_MODE12, false}, | ||
358 | {mmGB_TILE_MODE13, false}, | ||
359 | {mmGB_TILE_MODE14, false}, | ||
360 | {mmGB_TILE_MODE15, false}, | ||
361 | {mmGB_TILE_MODE16, false}, | ||
362 | {mmGB_TILE_MODE17, false}, | ||
363 | {mmGB_TILE_MODE18, false}, | ||
364 | {mmGB_TILE_MODE19, false}, | ||
365 | {mmGB_TILE_MODE20, false}, | ||
366 | {mmGB_TILE_MODE21, false}, | ||
367 | {mmGB_TILE_MODE22, false}, | ||
368 | {mmGB_TILE_MODE23, false}, | ||
369 | {mmGB_TILE_MODE24, false}, | ||
370 | {mmGB_TILE_MODE25, false}, | ||
371 | {mmGB_TILE_MODE26, false}, | ||
372 | {mmGB_TILE_MODE27, false}, | ||
373 | {mmGB_TILE_MODE28, false}, | ||
374 | {mmGB_TILE_MODE29, false}, | ||
375 | {mmGB_TILE_MODE30, false}, | ||
376 | {mmGB_TILE_MODE31, false}, | ||
377 | {mmGB_MACROTILE_MODE0, false}, | ||
378 | {mmGB_MACROTILE_MODE1, false}, | ||
379 | {mmGB_MACROTILE_MODE2, false}, | ||
380 | {mmGB_MACROTILE_MODE3, false}, | ||
381 | {mmGB_MACROTILE_MODE4, false}, | ||
382 | {mmGB_MACROTILE_MODE5, false}, | ||
383 | {mmGB_MACROTILE_MODE6, false}, | ||
384 | {mmGB_MACROTILE_MODE7, false}, | ||
385 | {mmGB_MACROTILE_MODE8, false}, | ||
386 | {mmGB_MACROTILE_MODE9, false}, | ||
387 | {mmGB_MACROTILE_MODE10, false}, | ||
388 | {mmGB_MACROTILE_MODE11, false}, | ||
389 | {mmGB_MACROTILE_MODE12, false}, | ||
390 | {mmGB_MACROTILE_MODE13, false}, | ||
391 | {mmGB_MACROTILE_MODE14, false}, | ||
392 | {mmGB_MACROTILE_MODE15, false}, | ||
393 | {mmCC_RB_BACKEND_DISABLE, false, true}, | ||
394 | {mmGC_USER_RB_BACKEND_DISABLE, false, true}, | ||
395 | {mmGB_BACKEND_MAP, false, false}, | ||
396 | {mmPA_SC_RASTER_CONFIG, false, true}, | ||
397 | {mmPA_SC_RASTER_CONFIG_1, false, true}, | ||
398 | }; | ||
399 | |||
400 | static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, | ||
401 | u32 sh_num, u32 reg_offset) | ||
402 | { | ||
403 | uint32_t val; | ||
404 | |||
405 | mutex_lock(&adev->grbm_idx_mutex); | ||
406 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | ||
407 | gfx_v8_0_select_se_sh(adev, se_num, sh_num); | ||
408 | |||
409 | val = RREG32(reg_offset); | ||
410 | |||
411 | if (se_num != 0xffffffff || sh_num != 0xffffffff) | ||
412 | gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); | ||
413 | mutex_unlock(&adev->grbm_idx_mutex); | ||
414 | return val; | ||
415 | } | ||
416 | |||
417 | static int vi_read_register(struct amdgpu_device *adev, u32 se_num, | ||
418 | u32 sh_num, u32 reg_offset, u32 *value) | ||
419 | { | ||
420 | struct amdgpu_allowed_register_entry *asic_register_table = NULL; | ||
421 | struct amdgpu_allowed_register_entry *asic_register_entry; | ||
422 | uint32_t size, i; | ||
423 | |||
424 | *value = 0; | ||
425 | switch (adev->asic_type) { | ||
426 | case CHIP_TOPAZ: | ||
427 | asic_register_table = tonga_allowed_read_registers; | ||
428 | size = ARRAY_SIZE(tonga_allowed_read_registers); | ||
429 | break; | ||
430 | case CHIP_TONGA: | ||
431 | case CHIP_CARRIZO: | ||
432 | asic_register_table = cz_allowed_read_registers; | ||
433 | size = ARRAY_SIZE(cz_allowed_read_registers); | ||
434 | break; | ||
435 | default: | ||
436 | return -EINVAL; | ||
437 | } | ||
438 | |||
439 | if (asic_register_table) { | ||
440 | for (i = 0; i < size; i++) { | ||
441 | asic_register_entry = asic_register_table + i; | ||
442 | if (reg_offset != asic_register_entry->reg_offset) | ||
443 | continue; | ||
444 | if (!asic_register_entry->untouched) | ||
445 | *value = asic_register_entry->grbm_indexed ? | ||
446 | vi_read_indexed_register(adev, se_num, | ||
447 | sh_num, reg_offset) : | ||
448 | RREG32(reg_offset); | ||
449 | return 0; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { | ||
454 | if (reg_offset != vi_allowed_read_registers[i].reg_offset) | ||
455 | continue; | ||
456 | |||
457 | if (!vi_allowed_read_registers[i].untouched) | ||
458 | *value = vi_allowed_read_registers[i].grbm_indexed ? | ||
459 | vi_read_indexed_register(adev, se_num, | ||
460 | sh_num, reg_offset) : | ||
461 | RREG32(reg_offset); | ||
462 | return 0; | ||
463 | } | ||
464 | return -EINVAL; | ||
465 | } | ||
466 | |||
467 | static void vi_print_gpu_status_regs(struct amdgpu_device *adev) | ||
468 | { | ||
469 | dev_info(adev->dev, " GRBM_STATUS=0x%08X\n", | ||
470 | RREG32(mmGRBM_STATUS)); | ||
471 | dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n", | ||
472 | RREG32(mmGRBM_STATUS2)); | ||
473 | dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n", | ||
474 | RREG32(mmGRBM_STATUS_SE0)); | ||
475 | dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n", | ||
476 | RREG32(mmGRBM_STATUS_SE1)); | ||
477 | dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n", | ||
478 | RREG32(mmGRBM_STATUS_SE2)); | ||
479 | dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n", | ||
480 | RREG32(mmGRBM_STATUS_SE3)); | ||
481 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
482 | RREG32(mmSRBM_STATUS)); | ||
483 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
484 | RREG32(mmSRBM_STATUS2)); | ||
485 | dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", | ||
486 | RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); | ||
487 | dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", | ||
488 | RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); | ||
489 | dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); | ||
490 | dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", | ||
491 | RREG32(mmCP_STALLED_STAT1)); | ||
492 | dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n", | ||
493 | RREG32(mmCP_STALLED_STAT2)); | ||
494 | dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n", | ||
495 | RREG32(mmCP_STALLED_STAT3)); | ||
496 | dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n", | ||
497 | RREG32(mmCP_CPF_BUSY_STAT)); | ||
498 | dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n", | ||
499 | RREG32(mmCP_CPF_STALLED_STAT1)); | ||
500 | dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(mmCP_CPF_STATUS)); | ||
501 | dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(mmCP_CPC_BUSY_STAT)); | ||
502 | dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n", | ||
503 | RREG32(mmCP_CPC_STALLED_STAT1)); | ||
504 | dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(mmCP_CPC_STATUS)); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * vi_gpu_check_soft_reset - check which blocks are busy | ||
509 | * | ||
510 | * @adev: amdgpu_device pointer | ||
511 | * | ||
512 | * Check which blocks are busy and return the relevant reset | ||
513 | * mask to be used by vi_gpu_soft_reset(). | ||
514 | * Returns a mask of the blocks to be reset. | ||
515 | */ | ||
516 | u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev) | ||
517 | { | ||
518 | u32 reset_mask = 0; | ||
519 | u32 tmp; | ||
520 | |||
521 | /* GRBM_STATUS */ | ||
522 | tmp = RREG32(mmGRBM_STATUS); | ||
523 | if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | | ||
524 | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | | ||
525 | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | | ||
526 | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | | ||
527 | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | | ||
528 | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) | ||
529 | reset_mask |= AMDGPU_RESET_GFX; | ||
530 | |||
531 | if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) | ||
532 | reset_mask |= AMDGPU_RESET_CP; | ||
533 | |||
534 | /* GRBM_STATUS2 */ | ||
535 | tmp = RREG32(mmGRBM_STATUS2); | ||
536 | if (tmp & GRBM_STATUS2__RLC_BUSY_MASK) | ||
537 | reset_mask |= AMDGPU_RESET_RLC; | ||
538 | |||
539 | if (tmp & (GRBM_STATUS2__CPF_BUSY_MASK | | ||
540 | GRBM_STATUS2__CPC_BUSY_MASK | | ||
541 | GRBM_STATUS2__CPG_BUSY_MASK)) | ||
542 | reset_mask |= AMDGPU_RESET_CP; | ||
543 | |||
544 | /* SRBM_STATUS2 */ | ||
545 | tmp = RREG32(mmSRBM_STATUS2); | ||
546 | if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) | ||
547 | reset_mask |= AMDGPU_RESET_DMA; | ||
548 | |||
549 | if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) | ||
550 | reset_mask |= AMDGPU_RESET_DMA1; | ||
551 | |||
552 | /* SRBM_STATUS */ | ||
553 | tmp = RREG32(mmSRBM_STATUS); | ||
554 | |||
555 | if (tmp & SRBM_STATUS__IH_BUSY_MASK) | ||
556 | reset_mask |= AMDGPU_RESET_IH; | ||
557 | |||
558 | if (tmp & SRBM_STATUS__SEM_BUSY_MASK) | ||
559 | reset_mask |= AMDGPU_RESET_SEM; | ||
560 | |||
561 | if (tmp & SRBM_STATUS__GRBM_RQ_PENDING_MASK) | ||
562 | reset_mask |= AMDGPU_RESET_GRBM; | ||
563 | |||
564 | if (adev->asic_type != CHIP_TOPAZ) { | ||
565 | if (tmp & (SRBM_STATUS__UVD_RQ_PENDING_MASK | | ||
566 | SRBM_STATUS__UVD_BUSY_MASK)) | ||
567 | reset_mask |= AMDGPU_RESET_UVD; | ||
568 | } | ||
569 | |||
570 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | ||
571 | reset_mask |= AMDGPU_RESET_VMC; | ||
572 | |||
573 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
574 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) | ||
575 | reset_mask |= AMDGPU_RESET_MC; | ||
576 | |||
577 | /* SDMA0_STATUS_REG */ | ||
578 | tmp = RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET); | ||
579 | if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) | ||
580 | reset_mask |= AMDGPU_RESET_DMA; | ||
581 | |||
582 | /* SDMA1_STATUS_REG */ | ||
583 | tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); | ||
584 | if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) | ||
585 | reset_mask |= AMDGPU_RESET_DMA1; | ||
586 | #if 0 | ||
587 | /* VCE_STATUS */ | ||
588 | if (adev->asic_type != CHIP_TOPAZ) { | ||
589 | tmp = RREG32(mmVCE_STATUS); | ||
590 | if (tmp & VCE_STATUS__VCPU_REPORT_RB0_BUSY_MASK) | ||
591 | reset_mask |= AMDGPU_RESET_VCE; | ||
592 | if (tmp & VCE_STATUS__VCPU_REPORT_RB1_BUSY_MASK) | ||
593 | reset_mask |= AMDGPU_RESET_VCE1; | ||
594 | |||
595 | } | ||
596 | |||
597 | if (adev->asic_type != CHIP_TOPAZ) { | ||
598 | if (amdgpu_display_is_display_hung(adev)) | ||
599 | reset_mask |= AMDGPU_RESET_DISPLAY; | ||
600 | } | ||
601 | #endif | ||
602 | |||
603 | /* Skip MC reset as it's mostly likely not hung, just busy */ | ||
604 | if (reset_mask & AMDGPU_RESET_MC) { | ||
605 | DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); | ||
606 | reset_mask &= ~AMDGPU_RESET_MC; | ||
607 | } | ||
608 | |||
609 | return reset_mask; | ||
610 | } | ||
611 | |||
612 | /** | ||
613 | * vi_gpu_soft_reset - soft reset GPU | ||
614 | * | ||
615 | * @adev: amdgpu_device pointer | ||
616 | * @reset_mask: mask of which blocks to reset | ||
617 | * | ||
618 | * Soft reset the blocks specified in @reset_mask. | ||
619 | */ | ||
620 | static void vi_gpu_soft_reset(struct amdgpu_device *adev, u32 reset_mask) | ||
621 | { | ||
622 | struct amdgpu_mode_mc_save save; | ||
623 | u32 grbm_soft_reset = 0, srbm_soft_reset = 0; | ||
624 | u32 tmp; | ||
625 | |||
626 | if (reset_mask == 0) | ||
627 | return; | ||
628 | |||
629 | dev_info(adev->dev, "GPU softreset: 0x%08X\n", reset_mask); | ||
630 | |||
631 | vi_print_gpu_status_regs(adev); | ||
632 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
633 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); | ||
634 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
635 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); | ||
636 | |||
637 | /* disable CG/PG */ | ||
638 | |||
639 | /* stop the rlc */ | ||
640 | //XXX | ||
641 | //gfx_v8_0_rlc_stop(adev); | ||
642 | |||
643 | /* Disable GFX parsing/prefetching */ | ||
644 | tmp = RREG32(mmCP_ME_CNTL); | ||
645 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); | ||
646 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); | ||
647 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); | ||
648 | WREG32(mmCP_ME_CNTL, tmp); | ||
649 | |||
650 | /* Disable MEC parsing/prefetching */ | ||
651 | tmp = RREG32(mmCP_MEC_CNTL); | ||
652 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); | ||
653 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); | ||
654 | WREG32(mmCP_MEC_CNTL, tmp); | ||
655 | |||
656 | if (reset_mask & AMDGPU_RESET_DMA) { | ||
657 | /* sdma0 */ | ||
658 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | ||
659 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); | ||
660 | WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
661 | } | ||
662 | if (reset_mask & AMDGPU_RESET_DMA1) { | ||
663 | /* sdma1 */ | ||
664 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | ||
665 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); | ||
666 | WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
667 | } | ||
668 | |||
669 | gmc_v8_0_mc_stop(adev, &save); | ||
670 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
671 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
672 | } | ||
673 | |||
674 | if (reset_mask & (AMDGPU_RESET_GFX | AMDGPU_RESET_COMPUTE | AMDGPU_RESET_CP)) { | ||
675 | grbm_soft_reset = | ||
676 | REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | ||
677 | grbm_soft_reset = | ||
678 | REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); | ||
679 | } | ||
680 | |||
681 | if (reset_mask & AMDGPU_RESET_CP) { | ||
682 | grbm_soft_reset = | ||
683 | REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP, 1); | ||
684 | srbm_soft_reset = | ||
685 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); | ||
686 | } | ||
687 | |||
688 | if (reset_mask & AMDGPU_RESET_DMA) | ||
689 | srbm_soft_reset = | ||
690 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA, 1); | ||
691 | |||
692 | if (reset_mask & AMDGPU_RESET_DMA1) | ||
693 | srbm_soft_reset = | ||
694 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1, 1); | ||
695 | |||
696 | if (reset_mask & AMDGPU_RESET_DISPLAY) | ||
697 | srbm_soft_reset = | ||
698 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_DC, 1); | ||
699 | |||
700 | if (reset_mask & AMDGPU_RESET_RLC) | ||
701 | grbm_soft_reset = | ||
702 | REG_SET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); | ||
703 | |||
704 | if (reset_mask & AMDGPU_RESET_SEM) | ||
705 | srbm_soft_reset = | ||
706 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SEM, 1); | ||
707 | |||
708 | if (reset_mask & AMDGPU_RESET_IH) | ||
709 | srbm_soft_reset = | ||
710 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_IH, 1); | ||
711 | |||
712 | if (reset_mask & AMDGPU_RESET_GRBM) | ||
713 | srbm_soft_reset = | ||
714 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_GRBM, 1); | ||
715 | |||
716 | if (reset_mask & AMDGPU_RESET_VMC) | ||
717 | srbm_soft_reset = | ||
718 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | ||
719 | |||
720 | if (reset_mask & AMDGPU_RESET_UVD) | ||
721 | srbm_soft_reset = | ||
722 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); | ||
723 | |||
724 | if (reset_mask & AMDGPU_RESET_VCE) | ||
725 | srbm_soft_reset = | ||
726 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1); | ||
727 | |||
728 | if (reset_mask & AMDGPU_RESET_VCE) | ||
729 | srbm_soft_reset = | ||
730 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1); | ||
731 | |||
732 | if (!(adev->flags & AMDGPU_IS_APU)) { | ||
733 | if (reset_mask & AMDGPU_RESET_MC) | ||
734 | srbm_soft_reset = | ||
735 | REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | ||
736 | } | ||
737 | |||
738 | if (grbm_soft_reset) { | ||
739 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
740 | tmp |= grbm_soft_reset; | ||
741 | dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); | ||
742 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
743 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
744 | |||
745 | udelay(50); | ||
746 | |||
747 | tmp &= ~grbm_soft_reset; | ||
748 | WREG32(mmGRBM_SOFT_RESET, tmp); | ||
749 | tmp = RREG32(mmGRBM_SOFT_RESET); | ||
750 | } | ||
751 | |||
752 | if (srbm_soft_reset) { | ||
753 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
754 | tmp |= srbm_soft_reset; | ||
755 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
756 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
757 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
758 | |||
759 | udelay(50); | ||
760 | |||
761 | tmp &= ~srbm_soft_reset; | ||
762 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
763 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
764 | } | ||
765 | |||
766 | /* Wait a little for things to settle down */ | ||
767 | udelay(50); | ||
768 | |||
769 | gmc_v8_0_mc_resume(adev, &save); | ||
770 | udelay(50); | ||
771 | |||
772 | vi_print_gpu_status_regs(adev); | ||
773 | } | ||
774 | |||
775 | static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) | ||
776 | { | ||
777 | struct amdgpu_mode_mc_save save; | ||
778 | u32 tmp, i; | ||
779 | |||
780 | dev_info(adev->dev, "GPU pci config reset\n"); | ||
781 | |||
782 | /* disable dpm? */ | ||
783 | |||
784 | /* disable cg/pg */ | ||
785 | |||
786 | /* Disable GFX parsing/prefetching */ | ||
787 | tmp = RREG32(mmCP_ME_CNTL); | ||
788 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1); | ||
789 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); | ||
790 | tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); | ||
791 | WREG32(mmCP_ME_CNTL, tmp); | ||
792 | |||
793 | /* Disable MEC parsing/prefetching */ | ||
794 | tmp = RREG32(mmCP_MEC_CNTL); | ||
795 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1); | ||
796 | tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1); | ||
797 | WREG32(mmCP_MEC_CNTL, tmp); | ||
798 | |||
799 | /* Disable GFX parsing/prefetching */ | ||
800 | WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | | ||
801 | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK); | ||
802 | |||
803 | /* Disable MEC parsing/prefetching */ | ||
804 | WREG32(mmCP_MEC_CNTL, | ||
805 | CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK); | ||
806 | |||
807 | /* sdma0 */ | ||
808 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET); | ||
809 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); | ||
810 | WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp); | ||
811 | |||
812 | /* sdma1 */ | ||
813 | tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET); | ||
814 | tmp = REG_SET_FIELD(tmp, SDMA0_F32_CNTL, HALT, 1); | ||
815 | WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp); | ||
816 | |||
817 | /* XXX other engines? */ | ||
818 | |||
819 | /* halt the rlc, disable cp internal ints */ | ||
820 | //XXX | ||
821 | //gfx_v8_0_rlc_stop(adev); | ||
822 | |||
823 | udelay(50); | ||
824 | |||
825 | /* disable mem access */ | ||
826 | gmc_v8_0_mc_stop(adev, &save); | ||
827 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
828 | dev_warn(adev->dev, "Wait for MC idle timed out !\n"); | ||
829 | } | ||
830 | |||
831 | /* disable BM */ | ||
832 | pci_clear_master(adev->pdev); | ||
833 | /* reset */ | ||
834 | amdgpu_pci_config_reset(adev); | ||
835 | |||
836 | udelay(100); | ||
837 | |||
838 | /* wait for asic to come out of reset */ | ||
839 | for (i = 0; i < adev->usec_timeout; i++) { | ||
840 | if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) | ||
841 | break; | ||
842 | udelay(1); | ||
843 | } | ||
844 | |||
845 | } | ||
846 | |||
847 | static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) | ||
848 | { | ||
849 | u32 tmp = RREG32(mmBIOS_SCRATCH_3); | ||
850 | |||
851 | if (hung) | ||
852 | tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
853 | else | ||
854 | tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG; | ||
855 | |||
856 | WREG32(mmBIOS_SCRATCH_3, tmp); | ||
857 | } | ||
858 | |||
859 | /** | ||
860 | * vi_asic_reset - soft reset GPU | ||
861 | * | ||
862 | * @adev: amdgpu_device pointer | ||
863 | * | ||
864 | * Look up which blocks are hung and attempt | ||
865 | * to reset them. | ||
866 | * Returns 0 for success. | ||
867 | */ | ||
868 | static int vi_asic_reset(struct amdgpu_device *adev) | ||
869 | { | ||
870 | u32 reset_mask; | ||
871 | |||
872 | reset_mask = vi_gpu_check_soft_reset(adev); | ||
873 | |||
874 | if (reset_mask) | ||
875 | vi_set_bios_scratch_engine_hung(adev, true); | ||
876 | |||
877 | /* try soft reset */ | ||
878 | vi_gpu_soft_reset(adev, reset_mask); | ||
879 | |||
880 | reset_mask = vi_gpu_check_soft_reset(adev); | ||
881 | |||
882 | /* try pci config reset */ | ||
883 | if (reset_mask && amdgpu_hard_reset) | ||
884 | vi_gpu_pci_config_reset(adev); | ||
885 | |||
886 | reset_mask = vi_gpu_check_soft_reset(adev); | ||
887 | |||
888 | if (!reset_mask) | ||
889 | vi_set_bios_scratch_engine_hung(adev, false); | ||
890 | |||
891 | return 0; | ||
892 | } | ||
893 | |||
894 | static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, | ||
895 | u32 cntl_reg, u32 status_reg) | ||
896 | { | ||
897 | int r, i; | ||
898 | struct atom_clock_dividers dividers; | ||
899 | uint32_t tmp; | ||
900 | |||
901 | r = amdgpu_atombios_get_clock_dividers(adev, | ||
902 | COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, | ||
903 | clock, false, ÷rs); | ||
904 | if (r) | ||
905 | return r; | ||
906 | |||
907 | tmp = RREG32_SMC(cntl_reg); | ||
908 | tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | | ||
909 | CG_DCLK_CNTL__DCLK_DIVIDER_MASK); | ||
910 | tmp |= dividers.post_divider; | ||
911 | WREG32_SMC(cntl_reg, tmp); | ||
912 | |||
913 | for (i = 0; i < 100; i++) { | ||
914 | if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) | ||
915 | break; | ||
916 | mdelay(10); | ||
917 | } | ||
918 | if (i == 100) | ||
919 | return -ETIMEDOUT; | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | |||
924 | static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) | ||
925 | { | ||
926 | int r; | ||
927 | |||
928 | r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); | ||
929 | if (r) | ||
930 | return r; | ||
931 | |||
932 | r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) | ||
938 | { | ||
939 | /* todo */ | ||
940 | |||
941 | return 0; | ||
942 | } | ||
943 | |||
944 | static void vi_pcie_gen3_enable(struct amdgpu_device *adev) | ||
945 | { | ||
946 | u32 mask; | ||
947 | int ret; | ||
948 | |||
949 | if (amdgpu_pcie_gen2 == 0) | ||
950 | return; | ||
951 | |||
952 | if (adev->flags & AMDGPU_IS_APU) | ||
953 | return; | ||
954 | |||
955 | ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); | ||
956 | if (ret != 0) | ||
957 | return; | ||
958 | |||
959 | if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80))) | ||
960 | return; | ||
961 | |||
962 | /* todo */ | ||
963 | } | ||
964 | |||
965 | static void vi_program_aspm(struct amdgpu_device *adev) | ||
966 | { | ||
967 | |||
968 | if (amdgpu_aspm == 0) | ||
969 | return; | ||
970 | |||
971 | /* todo */ | ||
972 | } | ||
973 | |||
974 | static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, | ||
975 | bool enable) | ||
976 | { | ||
977 | u32 tmp; | ||
978 | |||
979 | /* not necessary on CZ */ | ||
980 | if (adev->flags & AMDGPU_IS_APU) | ||
981 | return; | ||
982 | |||
983 | tmp = RREG32(mmBIF_DOORBELL_APER_EN); | ||
984 | if (enable) | ||
985 | tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); | ||
986 | else | ||
987 | tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); | ||
988 | |||
989 | WREG32(mmBIF_DOORBELL_APER_EN, tmp); | ||
990 | } | ||
991 | |||
992 | /* topaz has no DCE, UVD, VCE */ | ||
993 | static const struct amdgpu_ip_block_version topaz_ip_blocks[] = | ||
994 | { | ||
995 | /* ORDER MATTERS! */ | ||
996 | { | ||
997 | .type = AMDGPU_IP_BLOCK_TYPE_COMMON, | ||
998 | .major = 2, | ||
999 | .minor = 0, | ||
1000 | .rev = 0, | ||
1001 | .funcs = &vi_common_ip_funcs, | ||
1002 | }, | ||
1003 | { | ||
1004 | .type = AMDGPU_IP_BLOCK_TYPE_GMC, | ||
1005 | .major = 8, | ||
1006 | .minor = 0, | ||
1007 | .rev = 0, | ||
1008 | .funcs = &gmc_v8_0_ip_funcs, | ||
1009 | }, | ||
1010 | { | ||
1011 | .type = AMDGPU_IP_BLOCK_TYPE_IH, | ||
1012 | .major = 2, | ||
1013 | .minor = 4, | ||
1014 | .rev = 0, | ||
1015 | .funcs = &iceland_ih_ip_funcs, | ||
1016 | }, | ||
1017 | { | ||
1018 | .type = AMDGPU_IP_BLOCK_TYPE_SMC, | ||
1019 | .major = 7, | ||
1020 | .minor = 1, | ||
1021 | .rev = 0, | ||
1022 | .funcs = &iceland_dpm_ip_funcs, | ||
1023 | }, | ||
1024 | { | ||
1025 | .type = AMDGPU_IP_BLOCK_TYPE_GFX, | ||
1026 | .major = 8, | ||
1027 | .minor = 0, | ||
1028 | .rev = 0, | ||
1029 | .funcs = &gfx_v8_0_ip_funcs, | ||
1030 | }, | ||
1031 | { | ||
1032 | .type = AMDGPU_IP_BLOCK_TYPE_SDMA, | ||
1033 | .major = 2, | ||
1034 | .minor = 4, | ||
1035 | .rev = 0, | ||
1036 | .funcs = &sdma_v2_4_ip_funcs, | ||
1037 | }, | ||
1038 | }; | ||
1039 | |||
1040 | static const struct amdgpu_ip_block_version tonga_ip_blocks[] = | ||
1041 | { | ||
1042 | /* ORDER MATTERS! */ | ||
1043 | { | ||
1044 | .type = AMDGPU_IP_BLOCK_TYPE_COMMON, | ||
1045 | .major = 2, | ||
1046 | .minor = 0, | ||
1047 | .rev = 0, | ||
1048 | .funcs = &vi_common_ip_funcs, | ||
1049 | }, | ||
1050 | { | ||
1051 | .type = AMDGPU_IP_BLOCK_TYPE_GMC, | ||
1052 | .major = 8, | ||
1053 | .minor = 0, | ||
1054 | .rev = 0, | ||
1055 | .funcs = &gmc_v8_0_ip_funcs, | ||
1056 | }, | ||
1057 | { | ||
1058 | .type = AMDGPU_IP_BLOCK_TYPE_IH, | ||
1059 | .major = 3, | ||
1060 | .minor = 0, | ||
1061 | .rev = 0, | ||
1062 | .funcs = &tonga_ih_ip_funcs, | ||
1063 | }, | ||
1064 | { | ||
1065 | .type = AMDGPU_IP_BLOCK_TYPE_SMC, | ||
1066 | .major = 7, | ||
1067 | .minor = 1, | ||
1068 | .rev = 0, | ||
1069 | .funcs = &tonga_dpm_ip_funcs, | ||
1070 | }, | ||
1071 | { | ||
1072 | .type = AMDGPU_IP_BLOCK_TYPE_DCE, | ||
1073 | .major = 10, | ||
1074 | .minor = 0, | ||
1075 | .rev = 0, | ||
1076 | .funcs = &dce_v10_0_ip_funcs, | ||
1077 | }, | ||
1078 | { | ||
1079 | .type = AMDGPU_IP_BLOCK_TYPE_GFX, | ||
1080 | .major = 8, | ||
1081 | .minor = 0, | ||
1082 | .rev = 0, | ||
1083 | .funcs = &gfx_v8_0_ip_funcs, | ||
1084 | }, | ||
1085 | { | ||
1086 | .type = AMDGPU_IP_BLOCK_TYPE_SDMA, | ||
1087 | .major = 3, | ||
1088 | .minor = 0, | ||
1089 | .rev = 0, | ||
1090 | .funcs = &sdma_v3_0_ip_funcs, | ||
1091 | }, | ||
1092 | { | ||
1093 | .type = AMDGPU_IP_BLOCK_TYPE_UVD, | ||
1094 | .major = 5, | ||
1095 | .minor = 0, | ||
1096 | .rev = 0, | ||
1097 | .funcs = &uvd_v5_0_ip_funcs, | ||
1098 | }, | ||
1099 | { | ||
1100 | .type = AMDGPU_IP_BLOCK_TYPE_VCE, | ||
1101 | .major = 3, | ||
1102 | .minor = 0, | ||
1103 | .rev = 0, | ||
1104 | .funcs = &vce_v3_0_ip_funcs, | ||
1105 | }, | ||
1106 | }; | ||
1107 | |||
1108 | static const struct amdgpu_ip_block_version cz_ip_blocks[] = | ||
1109 | { | ||
1110 | /* ORDER MATTERS! */ | ||
1111 | { | ||
1112 | .type = AMDGPU_IP_BLOCK_TYPE_COMMON, | ||
1113 | .major = 2, | ||
1114 | .minor = 0, | ||
1115 | .rev = 0, | ||
1116 | .funcs = &vi_common_ip_funcs, | ||
1117 | }, | ||
1118 | { | ||
1119 | .type = AMDGPU_IP_BLOCK_TYPE_GMC, | ||
1120 | .major = 8, | ||
1121 | .minor = 0, | ||
1122 | .rev = 0, | ||
1123 | .funcs = &gmc_v8_0_ip_funcs, | ||
1124 | }, | ||
1125 | { | ||
1126 | .type = AMDGPU_IP_BLOCK_TYPE_IH, | ||
1127 | .major = 3, | ||
1128 | .minor = 0, | ||
1129 | .rev = 0, | ||
1130 | .funcs = &cz_ih_ip_funcs, | ||
1131 | }, | ||
1132 | { | ||
1133 | .type = AMDGPU_IP_BLOCK_TYPE_SMC, | ||
1134 | .major = 8, | ||
1135 | .minor = 0, | ||
1136 | .rev = 0, | ||
1137 | .funcs = &cz_dpm_ip_funcs, | ||
1138 | }, | ||
1139 | { | ||
1140 | .type = AMDGPU_IP_BLOCK_TYPE_DCE, | ||
1141 | .major = 11, | ||
1142 | .minor = 0, | ||
1143 | .rev = 0, | ||
1144 | .funcs = &dce_v11_0_ip_funcs, | ||
1145 | }, | ||
1146 | { | ||
1147 | .type = AMDGPU_IP_BLOCK_TYPE_GFX, | ||
1148 | .major = 8, | ||
1149 | .minor = 0, | ||
1150 | .rev = 0, | ||
1151 | .funcs = &gfx_v8_0_ip_funcs, | ||
1152 | }, | ||
1153 | { | ||
1154 | .type = AMDGPU_IP_BLOCK_TYPE_SDMA, | ||
1155 | .major = 3, | ||
1156 | .minor = 0, | ||
1157 | .rev = 0, | ||
1158 | .funcs = &sdma_v3_0_ip_funcs, | ||
1159 | }, | ||
1160 | { | ||
1161 | .type = AMDGPU_IP_BLOCK_TYPE_UVD, | ||
1162 | .major = 6, | ||
1163 | .minor = 0, | ||
1164 | .rev = 0, | ||
1165 | .funcs = &uvd_v6_0_ip_funcs, | ||
1166 | }, | ||
1167 | { | ||
1168 | .type = AMDGPU_IP_BLOCK_TYPE_VCE, | ||
1169 | .major = 3, | ||
1170 | .minor = 0, | ||
1171 | .rev = 0, | ||
1172 | .funcs = &vce_v3_0_ip_funcs, | ||
1173 | }, | ||
1174 | }; | ||
1175 | |||
1176 | int vi_set_ip_blocks(struct amdgpu_device *adev) | ||
1177 | { | ||
1178 | switch (adev->asic_type) { | ||
1179 | case CHIP_TOPAZ: | ||
1180 | adev->ip_blocks = topaz_ip_blocks; | ||
1181 | adev->num_ip_blocks = ARRAY_SIZE(topaz_ip_blocks); | ||
1182 | break; | ||
1183 | case CHIP_TONGA: | ||
1184 | adev->ip_blocks = tonga_ip_blocks; | ||
1185 | adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); | ||
1186 | break; | ||
1187 | case CHIP_CARRIZO: | ||
1188 | adev->ip_blocks = cz_ip_blocks; | ||
1189 | adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); | ||
1190 | break; | ||
1191 | default: | ||
1192 | /* FIXME: not supported yet */ | ||
1193 | return -EINVAL; | ||
1194 | } | ||
1195 | |||
1196 | adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); | ||
1197 | if (adev->ip_block_enabled == NULL) | ||
1198 | return -ENOMEM; | ||
1199 | |||
1200 | return 0; | ||
1201 | } | ||
1202 | |||
1203 | static uint32_t vi_get_rev_id(struct amdgpu_device *adev) | ||
1204 | { | ||
1205 | if (adev->asic_type == CHIP_TOPAZ) | ||
1206 | return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) | ||
1207 | >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; | ||
1208 | else | ||
1209 | return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) | ||
1210 | >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; | ||
1211 | } | ||
1212 | |||
1213 | static const struct amdgpu_asic_funcs vi_asic_funcs = | ||
1214 | { | ||
1215 | .read_disabled_bios = &vi_read_disabled_bios, | ||
1216 | .read_register = &vi_read_register, | ||
1217 | .reset = &vi_asic_reset, | ||
1218 | .set_vga_state = &vi_vga_set_state, | ||
1219 | .get_xclk = &vi_get_xclk, | ||
1220 | .set_uvd_clocks = &vi_set_uvd_clocks, | ||
1221 | .set_vce_clocks = &vi_set_vce_clocks, | ||
1222 | .get_cu_info = &gfx_v8_0_get_cu_info, | ||
1223 | /* these should be moved to their own ip modules */ | ||
1224 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, | ||
1225 | .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, | ||
1226 | }; | ||
1227 | |||
1228 | static int vi_common_early_init(struct amdgpu_device *adev) | ||
1229 | { | ||
1230 | bool smc_enabled = false; | ||
1231 | |||
1232 | adev->smc_rreg = &vi_smc_rreg; | ||
1233 | adev->smc_wreg = &vi_smc_wreg; | ||
1234 | adev->pcie_rreg = &vi_pcie_rreg; | ||
1235 | adev->pcie_wreg = &vi_pcie_wreg; | ||
1236 | adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; | ||
1237 | adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; | ||
1238 | adev->didt_rreg = &vi_didt_rreg; | ||
1239 | adev->didt_wreg = &vi_didt_wreg; | ||
1240 | |||
1241 | adev->asic_funcs = &vi_asic_funcs; | ||
1242 | |||
1243 | if (amdgpu_get_ip_block(adev, AMDGPU_IP_BLOCK_TYPE_SMC) && | ||
1244 | (amdgpu_ip_block_mask & (1 << AMDGPU_IP_BLOCK_TYPE_SMC))) | ||
1245 | smc_enabled = true; | ||
1246 | |||
1247 | adev->rev_id = vi_get_rev_id(adev); | ||
1248 | adev->external_rev_id = 0xFF; | ||
1249 | switch (adev->asic_type) { | ||
1250 | case CHIP_TOPAZ: | ||
1251 | adev->has_uvd = false; | ||
1252 | adev->cg_flags = 0; | ||
1253 | adev->pg_flags = 0; | ||
1254 | adev->external_rev_id = 0x1; | ||
1255 | if (amdgpu_smc_load_fw && smc_enabled) | ||
1256 | adev->firmware.smu_load = true; | ||
1257 | break; | ||
1258 | case CHIP_TONGA: | ||
1259 | adev->has_uvd = true; | ||
1260 | adev->cg_flags = 0; | ||
1261 | adev->pg_flags = 0; | ||
1262 | adev->external_rev_id = adev->rev_id + 0x14; | ||
1263 | if (amdgpu_smc_load_fw && smc_enabled) | ||
1264 | adev->firmware.smu_load = true; | ||
1265 | break; | ||
1266 | case CHIP_CARRIZO: | ||
1267 | adev->has_uvd = true; | ||
1268 | adev->cg_flags = 0; | ||
1269 | adev->pg_flags = 0; | ||
1270 | adev->external_rev_id = adev->rev_id + 0x1; | ||
1271 | if (amdgpu_smc_load_fw && smc_enabled) | ||
1272 | adev->firmware.smu_load = true; | ||
1273 | break; | ||
1274 | default: | ||
1275 | /* FIXME: not supported yet */ | ||
1276 | return -EINVAL; | ||
1277 | } | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | static int vi_common_sw_init(struct amdgpu_device *adev) | ||
1283 | { | ||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | static int vi_common_sw_fini(struct amdgpu_device *adev) | ||
1288 | { | ||
1289 | return 0; | ||
1290 | } | ||
1291 | |||
1292 | static int vi_common_hw_init(struct amdgpu_device *adev) | ||
1293 | { | ||
1294 | /* move the golden regs per IP block */ | ||
1295 | vi_init_golden_registers(adev); | ||
1296 | /* enable pcie gen2/3 link */ | ||
1297 | vi_pcie_gen3_enable(adev); | ||
1298 | /* enable aspm */ | ||
1299 | vi_program_aspm(adev); | ||
1300 | /* enable the doorbell aperture */ | ||
1301 | vi_enable_doorbell_aperture(adev, true); | ||
1302 | |||
1303 | return 0; | ||
1304 | } | ||
1305 | |||
1306 | static int vi_common_hw_fini(struct amdgpu_device *adev) | ||
1307 | { | ||
1308 | /* enable the doorbell aperture */ | ||
1309 | vi_enable_doorbell_aperture(adev, false); | ||
1310 | |||
1311 | return 0; | ||
1312 | } | ||
1313 | |||
1314 | static int vi_common_suspend(struct amdgpu_device *adev) | ||
1315 | { | ||
1316 | return vi_common_hw_fini(adev); | ||
1317 | } | ||
1318 | |||
1319 | static int vi_common_resume(struct amdgpu_device *adev) | ||
1320 | { | ||
1321 | return vi_common_hw_init(adev); | ||
1322 | } | ||
1323 | |||
1324 | static bool vi_common_is_idle(struct amdgpu_device *adev) | ||
1325 | { | ||
1326 | return true; | ||
1327 | } | ||
1328 | |||
1329 | static int vi_common_wait_for_idle(struct amdgpu_device *adev) | ||
1330 | { | ||
1331 | return 0; | ||
1332 | } | ||
1333 | |||
1334 | static void vi_common_print_status(struct amdgpu_device *adev) | ||
1335 | { | ||
1336 | |||
1337 | } | ||
1338 | |||
1339 | static int vi_common_soft_reset(struct amdgpu_device *adev) | ||
1340 | { | ||
1341 | /* XXX hard reset?? */ | ||
1342 | return 0; | ||
1343 | } | ||
1344 | |||
1345 | static int vi_common_set_clockgating_state(struct amdgpu_device *adev, | ||
1346 | enum amdgpu_clockgating_state state) | ||
1347 | { | ||
1348 | return 0; | ||
1349 | } | ||
1350 | |||
1351 | static int vi_common_set_powergating_state(struct amdgpu_device *adev, | ||
1352 | enum amdgpu_powergating_state state) | ||
1353 | { | ||
1354 | return 0; | ||
1355 | } | ||
1356 | |||
1357 | const struct amdgpu_ip_funcs vi_common_ip_funcs = { | ||
1358 | .early_init = vi_common_early_init, | ||
1359 | .late_init = NULL, | ||
1360 | .sw_init = vi_common_sw_init, | ||
1361 | .sw_fini = vi_common_sw_fini, | ||
1362 | .hw_init = vi_common_hw_init, | ||
1363 | .hw_fini = vi_common_hw_fini, | ||
1364 | .suspend = vi_common_suspend, | ||
1365 | .resume = vi_common_resume, | ||
1366 | .is_idle = vi_common_is_idle, | ||
1367 | .wait_for_idle = vi_common_wait_for_idle, | ||
1368 | .soft_reset = vi_common_soft_reset, | ||
1369 | .print_status = vi_common_print_status, | ||
1370 | .set_clockgating_state = vi_common_set_clockgating_state, | ||
1371 | .set_powergating_state = vi_common_set_powergating_state, | ||
1372 | }; | ||
1373 | |||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h new file mode 100644 index 000000000000..d16a5f7e4edd --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __VI_H__ | ||
25 | #define __VI_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs vi_common_ip_funcs; | ||
28 | |||
29 | void vi_srbm_select(struct amdgpu_device *adev, | ||
30 | u32 me, u32 pipe, u32 queue, u32 vmid); | ||
31 | int vi_set_ip_blocks(struct amdgpu_device *adev); | ||
32 | |||
33 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi_dpm.h b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h new file mode 100644 index 000000000000..11cb1f7eeba5 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vi_dpm.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #ifndef __VI_DPM_H__ | ||
25 | #define __VI_DPM_H__ | ||
26 | |||
27 | extern const struct amdgpu_ip_funcs cz_dpm_ip_funcs; | ||
28 | int cz_smu_init(struct amdgpu_device *adev); | ||
29 | int cz_smu_start(struct amdgpu_device *adev); | ||
30 | int cz_smu_fini(struct amdgpu_device *adev); | ||
31 | |||
32 | extern const struct amdgpu_ip_funcs tonga_dpm_ip_funcs; | ||
33 | |||
34 | extern const struct amdgpu_ip_funcs iceland_dpm_ip_funcs; | ||
35 | |||
36 | #endif | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h new file mode 100644 index 000000000000..385267c31d11 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vid.h | |||
@@ -0,0 +1,363 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #ifndef VI_H | ||
24 | #define VI_H | ||
25 | |||
26 | #define SDMA0_REGISTER_OFFSET 0x0 /* not a register */ | ||
27 | #define SDMA1_REGISTER_OFFSET 0x200 /* not a register */ | ||
28 | #define SDMA_MAX_INSTANCE 2 | ||
29 | |||
30 | /* crtc instance offsets */ | ||
31 | #define CRTC0_REGISTER_OFFSET (0x1b9c - 0x1b9c) | ||
32 | #define CRTC1_REGISTER_OFFSET (0x1d9c - 0x1b9c) | ||
33 | #define CRTC2_REGISTER_OFFSET (0x1f9c - 0x1b9c) | ||
34 | #define CRTC3_REGISTER_OFFSET (0x419c - 0x1b9c) | ||
35 | #define CRTC4_REGISTER_OFFSET (0x439c - 0x1b9c) | ||
36 | #define CRTC5_REGISTER_OFFSET (0x459c - 0x1b9c) | ||
37 | #define CRTC6_REGISTER_OFFSET (0x479c - 0x1b9c) | ||
38 | |||
39 | /* dig instance offsets */ | ||
40 | #define DIG0_REGISTER_OFFSET (0x4a00 - 0x4a00) | ||
41 | #define DIG1_REGISTER_OFFSET (0x4b00 - 0x4a00) | ||
42 | #define DIG2_REGISTER_OFFSET (0x4c00 - 0x4a00) | ||
43 | #define DIG3_REGISTER_OFFSET (0x4d00 - 0x4a00) | ||
44 | #define DIG4_REGISTER_OFFSET (0x4e00 - 0x4a00) | ||
45 | #define DIG5_REGISTER_OFFSET (0x4f00 - 0x4a00) | ||
46 | #define DIG6_REGISTER_OFFSET (0x5400 - 0x4a00) | ||
47 | #define DIG7_REGISTER_OFFSET (0x5600 - 0x4a00) | ||
48 | #define DIG8_REGISTER_OFFSET (0x5700 - 0x4a00) | ||
49 | |||
50 | /* audio endpt instance offsets */ | ||
51 | #define AUD0_REGISTER_OFFSET (0x17a8 - 0x17a8) | ||
52 | #define AUD1_REGISTER_OFFSET (0x17ac - 0x17a8) | ||
53 | #define AUD2_REGISTER_OFFSET (0x17b0 - 0x17a8) | ||
54 | #define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8) | ||
55 | #define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8) | ||
56 | #define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8) | ||
57 | #define AUD6_REGISTER_OFFSET (0x17c4 - 0x17a8) | ||
58 | |||
59 | /* hpd instance offsets */ | ||
60 | #define HPD0_REGISTER_OFFSET (0x1898 - 0x1898) | ||
61 | #define HPD1_REGISTER_OFFSET (0x18a0 - 0x1898) | ||
62 | #define HPD2_REGISTER_OFFSET (0x18a8 - 0x1898) | ||
63 | #define HPD3_REGISTER_OFFSET (0x18b0 - 0x1898) | ||
64 | #define HPD4_REGISTER_OFFSET (0x18b8 - 0x1898) | ||
65 | #define HPD5_REGISTER_OFFSET (0x18c0 - 0x1898) | ||
66 | |||
67 | #define AMDGPU_NUM_OF_VMIDS 8 | ||
68 | |||
69 | #define RB_BITMAP_WIDTH_PER_SH 2 | ||
70 | |||
71 | #define MC_SEQ_MISC0__GDDR5__SHIFT 0x1c | ||
72 | #define MC_SEQ_MISC0__GDDR5_MASK 0xf0000000 | ||
73 | #define MC_SEQ_MISC0__GDDR5_VALUE 5 | ||
74 | |||
75 | /* | ||
76 | * PM4 | ||
77 | */ | ||
78 | #define PACKET_TYPE0 0 | ||
79 | #define PACKET_TYPE1 1 | ||
80 | #define PACKET_TYPE2 2 | ||
81 | #define PACKET_TYPE3 3 | ||
82 | |||
83 | #define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3) | ||
84 | #define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF) | ||
85 | #define CP_PACKET0_GET_REG(h) ((h) & 0xFFFF) | ||
86 | #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) | ||
87 | #define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \ | ||
88 | ((reg) & 0xFFFF) | \ | ||
89 | ((n) & 0x3FFF) << 16) | ||
90 | #define CP_PACKET2 0x80000000 | ||
91 | #define PACKET2_PAD_SHIFT 0 | ||
92 | #define PACKET2_PAD_MASK (0x3fffffff << 0) | ||
93 | |||
94 | #define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) | ||
95 | |||
96 | #define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ | ||
97 | (((op) & 0xFF) << 8) | \ | ||
98 | ((n) & 0x3FFF) << 16) | ||
99 | |||
100 | #define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1) | ||
101 | |||
102 | /* Packet 3 types */ | ||
103 | #define PACKET3_NOP 0x10 | ||
104 | #define PACKET3_SET_BASE 0x11 | ||
105 | #define PACKET3_BASE_INDEX(x) ((x) << 0) | ||
106 | #define CE_PARTITION_BASE 3 | ||
107 | #define PACKET3_CLEAR_STATE 0x12 | ||
108 | #define PACKET3_INDEX_BUFFER_SIZE 0x13 | ||
109 | #define PACKET3_DISPATCH_DIRECT 0x15 | ||
110 | #define PACKET3_DISPATCH_INDIRECT 0x16 | ||
111 | #define PACKET3_ATOMIC_GDS 0x1D | ||
112 | #define PACKET3_ATOMIC_MEM 0x1E | ||
113 | #define PACKET3_OCCLUSION_QUERY 0x1F | ||
114 | #define PACKET3_SET_PREDICATION 0x20 | ||
115 | #define PACKET3_REG_RMW 0x21 | ||
116 | #define PACKET3_COND_EXEC 0x22 | ||
117 | #define PACKET3_PRED_EXEC 0x23 | ||
118 | #define PACKET3_DRAW_INDIRECT 0x24 | ||
119 | #define PACKET3_DRAW_INDEX_INDIRECT 0x25 | ||
120 | #define PACKET3_INDEX_BASE 0x26 | ||
121 | #define PACKET3_DRAW_INDEX_2 0x27 | ||
122 | #define PACKET3_CONTEXT_CONTROL 0x28 | ||
123 | #define PACKET3_INDEX_TYPE 0x2A | ||
124 | #define PACKET3_DRAW_INDIRECT_MULTI 0x2C | ||
125 | #define PACKET3_DRAW_INDEX_AUTO 0x2D | ||
126 | #define PACKET3_NUM_INSTANCES 0x2F | ||
127 | #define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30 | ||
128 | #define PACKET3_INDIRECT_BUFFER_CONST 0x33 | ||
129 | #define PACKET3_STRMOUT_BUFFER_UPDATE 0x34 | ||
130 | #define PACKET3_DRAW_INDEX_OFFSET_2 0x35 | ||
131 | #define PACKET3_DRAW_PREAMBLE 0x36 | ||
132 | #define PACKET3_WRITE_DATA 0x37 | ||
133 | #define WRITE_DATA_DST_SEL(x) ((x) << 8) | ||
134 | /* 0 - register | ||
135 | * 1 - memory (sync - via GRBM) | ||
136 | * 2 - gl2 | ||
137 | * 3 - gds | ||
138 | * 4 - reserved | ||
139 | * 5 - memory (async - direct) | ||
140 | */ | ||
141 | #define WR_ONE_ADDR (1 << 16) | ||
142 | #define WR_CONFIRM (1 << 20) | ||
143 | #define WRITE_DATA_CACHE_POLICY(x) ((x) << 25) | ||
144 | /* 0 - LRU | ||
145 | * 1 - Stream | ||
146 | */ | ||
147 | #define WRITE_DATA_ENGINE_SEL(x) ((x) << 30) | ||
148 | /* 0 - me | ||
149 | * 1 - pfp | ||
150 | * 2 - ce | ||
151 | */ | ||
152 | #define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38 | ||
153 | #define PACKET3_MEM_SEMAPHORE 0x39 | ||
154 | # define PACKET3_SEM_USE_MAILBOX (0x1 << 16) | ||
155 | # define PACKET3_SEM_SEL_SIGNAL_TYPE (0x1 << 20) /* 0 = increment, 1 = write 1 */ | ||
156 | # define PACKET3_SEM_CLIENT_CODE ((x) << 24) /* 0 = CP, 1 = CB, 2 = DB */ | ||
157 | # define PACKET3_SEM_SEL_SIGNAL (0x6 << 29) | ||
158 | # define PACKET3_SEM_SEL_WAIT (0x7 << 29) | ||
159 | #define PACKET3_WAIT_REG_MEM 0x3C | ||
160 | #define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) | ||
161 | /* 0 - always | ||
162 | * 1 - < | ||
163 | * 2 - <= | ||
164 | * 3 - == | ||
165 | * 4 - != | ||
166 | * 5 - >= | ||
167 | * 6 - > | ||
168 | */ | ||
169 | #define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) | ||
170 | /* 0 - reg | ||
171 | * 1 - mem | ||
172 | */ | ||
173 | #define WAIT_REG_MEM_OPERATION(x) ((x) << 6) | ||
174 | /* 0 - wait_reg_mem | ||
175 | * 1 - wr_wait_wr_reg | ||
176 | */ | ||
177 | #define WAIT_REG_MEM_ENGINE(x) ((x) << 8) | ||
178 | /* 0 - me | ||
179 | * 1 - pfp | ||
180 | */ | ||
181 | #define PACKET3_INDIRECT_BUFFER 0x3F | ||
182 | #define INDIRECT_BUFFER_TCL2_VOLATILE (1 << 22) | ||
183 | #define INDIRECT_BUFFER_VALID (1 << 23) | ||
184 | #define INDIRECT_BUFFER_CACHE_POLICY(x) ((x) << 28) | ||
185 | /* 0 - LRU | ||
186 | * 1 - Stream | ||
187 | * 2 - Bypass | ||
188 | */ | ||
189 | #define PACKET3_COPY_DATA 0x40 | ||
190 | #define PACKET3_PFP_SYNC_ME 0x42 | ||
191 | #define PACKET3_SURFACE_SYNC 0x43 | ||
192 | # define PACKET3_DEST_BASE_0_ENA (1 << 0) | ||
193 | # define PACKET3_DEST_BASE_1_ENA (1 << 1) | ||
194 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | ||
195 | # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) | ||
196 | # define PACKET3_CB2_DEST_BASE_ENA (1 << 8) | ||
197 | # define PACKET3_CB3_DEST_BASE_ENA (1 << 9) | ||
198 | # define PACKET3_CB4_DEST_BASE_ENA (1 << 10) | ||
199 | # define PACKET3_CB5_DEST_BASE_ENA (1 << 11) | ||
200 | # define PACKET3_CB6_DEST_BASE_ENA (1 << 12) | ||
201 | # define PACKET3_CB7_DEST_BASE_ENA (1 << 13) | ||
202 | # define PACKET3_DB_DEST_BASE_ENA (1 << 14) | ||
203 | # define PACKET3_TCL1_VOL_ACTION_ENA (1 << 15) | ||
204 | # define PACKET3_TC_VOL_ACTION_ENA (1 << 16) /* L2 */ | ||
205 | # define PACKET3_TC_WB_ACTION_ENA (1 << 18) /* L2 */ | ||
206 | # define PACKET3_DEST_BASE_2_ENA (1 << 19) | ||
207 | # define PACKET3_DEST_BASE_3_ENA (1 << 21) | ||
208 | # define PACKET3_TCL1_ACTION_ENA (1 << 22) | ||
209 | # define PACKET3_TC_ACTION_ENA (1 << 23) /* L2 */ | ||
210 | # define PACKET3_CB_ACTION_ENA (1 << 25) | ||
211 | # define PACKET3_DB_ACTION_ENA (1 << 26) | ||
212 | # define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27) | ||
213 | # define PACKET3_SH_KCACHE_VOL_ACTION_ENA (1 << 28) | ||
214 | # define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29) | ||
215 | #define PACKET3_COND_WRITE 0x45 | ||
216 | #define PACKET3_EVENT_WRITE 0x46 | ||
217 | #define EVENT_TYPE(x) ((x) << 0) | ||
218 | #define EVENT_INDEX(x) ((x) << 8) | ||
219 | /* 0 - any non-TS event | ||
220 | * 1 - ZPASS_DONE, PIXEL_PIPE_STAT_* | ||
221 | * 2 - SAMPLE_PIPELINESTAT | ||
222 | * 3 - SAMPLE_STREAMOUTSTAT* | ||
223 | * 4 - *S_PARTIAL_FLUSH | ||
224 | * 5 - EOP events | ||
225 | * 6 - EOS events | ||
226 | */ | ||
227 | #define PACKET3_EVENT_WRITE_EOP 0x47 | ||
228 | #define EOP_TCL1_VOL_ACTION_EN (1 << 12) | ||
229 | #define EOP_TC_VOL_ACTION_EN (1 << 13) /* L2 */ | ||
230 | #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ | ||
231 | #define EOP_TCL1_ACTION_EN (1 << 16) | ||
232 | #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ | ||
233 | #define EOP_TCL2_VOLATILE (1 << 24) | ||
234 | #define EOP_CACHE_POLICY(x) ((x) << 25) | ||
235 | /* 0 - LRU | ||
236 | * 1 - Stream | ||
237 | * 2 - Bypass | ||
238 | */ | ||
239 | #define DATA_SEL(x) ((x) << 29) | ||
240 | /* 0 - discard | ||
241 | * 1 - send low 32bit data | ||
242 | * 2 - send 64bit data | ||
243 | * 3 - send 64bit GPU counter value | ||
244 | * 4 - send 64bit sys counter value | ||
245 | */ | ||
246 | #define INT_SEL(x) ((x) << 24) | ||
247 | /* 0 - none | ||
248 | * 1 - interrupt only (DATA_SEL = 0) | ||
249 | * 2 - interrupt when data write is confirmed | ||
250 | */ | ||
251 | #define DST_SEL(x) ((x) << 16) | ||
252 | /* 0 - MC | ||
253 | * 1 - TC/L2 | ||
254 | */ | ||
255 | #define PACKET3_EVENT_WRITE_EOS 0x48 | ||
256 | #define PACKET3_RELEASE_MEM 0x49 | ||
257 | #define PACKET3_PREAMBLE_CNTL 0x4A | ||
258 | # define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28) | ||
259 | # define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28) | ||
260 | #define PACKET3_DMA_DATA 0x50 | ||
261 | /* 1. header | ||
262 | * 2. CONTROL | ||
263 | * 3. SRC_ADDR_LO or DATA [31:0] | ||
264 | * 4. SRC_ADDR_HI [31:0] | ||
265 | * 5. DST_ADDR_LO [31:0] | ||
266 | * 6. DST_ADDR_HI [7:0] | ||
267 | * 7. COMMAND [30:21] | BYTE_COUNT [20:0] | ||
268 | */ | ||
269 | /* CONTROL */ | ||
270 | # define PACKET3_DMA_DATA_ENGINE(x) ((x) << 0) | ||
271 | /* 0 - ME | ||
272 | * 1 - PFP | ||
273 | */ | ||
274 | # define PACKET3_DMA_DATA_SRC_CACHE_POLICY(x) ((x) << 13) | ||
275 | /* 0 - LRU | ||
276 | * 1 - Stream | ||
277 | * 2 - Bypass | ||
278 | */ | ||
279 | # define PACKET3_DMA_DATA_SRC_VOLATILE (1 << 15) | ||
280 | # define PACKET3_DMA_DATA_DST_SEL(x) ((x) << 20) | ||
281 | /* 0 - DST_ADDR using DAS | ||
282 | * 1 - GDS | ||
283 | * 3 - DST_ADDR using L2 | ||
284 | */ | ||
285 | # define PACKET3_DMA_DATA_DST_CACHE_POLICY(x) ((x) << 25) | ||
286 | /* 0 - LRU | ||
287 | * 1 - Stream | ||
288 | * 2 - Bypass | ||
289 | */ | ||
290 | # define PACKET3_DMA_DATA_DST_VOLATILE (1 << 27) | ||
291 | # define PACKET3_DMA_DATA_SRC_SEL(x) ((x) << 29) | ||
292 | /* 0 - SRC_ADDR using SAS | ||
293 | * 1 - GDS | ||
294 | * 2 - DATA | ||
295 | * 3 - SRC_ADDR using L2 | ||
296 | */ | ||
297 | # define PACKET3_DMA_DATA_CP_SYNC (1 << 31) | ||
298 | /* COMMAND */ | ||
299 | # define PACKET3_DMA_DATA_DIS_WC (1 << 21) | ||
300 | # define PACKET3_DMA_DATA_CMD_SRC_SWAP(x) ((x) << 22) | ||
301 | /* 0 - none | ||
302 | * 1 - 8 in 16 | ||
303 | * 2 - 8 in 32 | ||
304 | * 3 - 8 in 64 | ||
305 | */ | ||
306 | # define PACKET3_DMA_DATA_CMD_DST_SWAP(x) ((x) << 24) | ||
307 | /* 0 - none | ||
308 | * 1 - 8 in 16 | ||
309 | * 2 - 8 in 32 | ||
310 | * 3 - 8 in 64 | ||
311 | */ | ||
312 | # define PACKET3_DMA_DATA_CMD_SAS (1 << 26) | ||
313 | /* 0 - memory | ||
314 | * 1 - register | ||
315 | */ | ||
316 | # define PACKET3_DMA_DATA_CMD_DAS (1 << 27) | ||
317 | /* 0 - memory | ||
318 | * 1 - register | ||
319 | */ | ||
320 | # define PACKET3_DMA_DATA_CMD_SAIC (1 << 28) | ||
321 | # define PACKET3_DMA_DATA_CMD_DAIC (1 << 29) | ||
322 | # define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30) | ||
323 | #define PACKET3_AQUIRE_MEM 0x58 | ||
324 | #define PACKET3_REWIND 0x59 | ||
325 | #define PACKET3_LOAD_UCONFIG_REG 0x5E | ||
326 | #define PACKET3_LOAD_SH_REG 0x5F | ||
327 | #define PACKET3_LOAD_CONFIG_REG 0x60 | ||
328 | #define PACKET3_LOAD_CONTEXT_REG 0x61 | ||
329 | #define PACKET3_SET_CONFIG_REG 0x68 | ||
330 | #define PACKET3_SET_CONFIG_REG_START 0x00002000 | ||
331 | #define PACKET3_SET_CONFIG_REG_END 0x00002c00 | ||
332 | #define PACKET3_SET_CONTEXT_REG 0x69 | ||
333 | #define PACKET3_SET_CONTEXT_REG_START 0x0000a000 | ||
334 | #define PACKET3_SET_CONTEXT_REG_END 0x0000a400 | ||
335 | #define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73 | ||
336 | #define PACKET3_SET_SH_REG 0x76 | ||
337 | #define PACKET3_SET_SH_REG_START 0x00002c00 | ||
338 | #define PACKET3_SET_SH_REG_END 0x00003000 | ||
339 | #define PACKET3_SET_SH_REG_OFFSET 0x77 | ||
340 | #define PACKET3_SET_QUEUE_REG 0x78 | ||
341 | #define PACKET3_SET_UCONFIG_REG 0x79 | ||
342 | #define PACKET3_SET_UCONFIG_REG_START 0x0000c000 | ||
343 | #define PACKET3_SET_UCONFIG_REG_END 0x0000c400 | ||
344 | #define PACKET3_SCRATCH_RAM_WRITE 0x7D | ||
345 | #define PACKET3_SCRATCH_RAM_READ 0x7E | ||
346 | #define PACKET3_LOAD_CONST_RAM 0x80 | ||
347 | #define PACKET3_WRITE_CONST_RAM 0x81 | ||
348 | #define PACKET3_DUMP_CONST_RAM 0x83 | ||
349 | #define PACKET3_INCREMENT_CE_COUNTER 0x84 | ||
350 | #define PACKET3_INCREMENT_DE_COUNTER 0x85 | ||
351 | #define PACKET3_WAIT_ON_CE_COUNTER 0x86 | ||
352 | #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 | ||
353 | #define PACKET3_SWITCH_BUFFER 0x8B | ||
354 | |||
355 | #define VCE_CMD_NO_OP 0x00000000 | ||
356 | #define VCE_CMD_END 0x00000001 | ||
357 | #define VCE_CMD_IB 0x00000002 | ||
358 | #define VCE_CMD_FENCE 0x00000003 | ||
359 | #define VCE_CMD_TRAP 0x00000004 | ||
360 | #define VCE_CMD_IB_AUTO 0x00000005 | ||
361 | #define VCE_CMD_SEMAPHORE 0x00000006 | ||
362 | |||
363 | #endif | ||