diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1271 |
1 files changed, 1271 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c new file mode 100644 index 000000000000..ac8cff85cde3 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -0,0 +1,1271 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "amdgpu.h" | ||
26 | #include "gmc_v8_0.h" | ||
27 | #include "amdgpu_ucode.h" | ||
28 | |||
29 | #include "gmc/gmc_8_1_d.h" | ||
30 | #include "gmc/gmc_8_1_sh_mask.h" | ||
31 | |||
32 | #include "bif/bif_5_0_d.h" | ||
33 | #include "bif/bif_5_0_sh_mask.h" | ||
34 | |||
35 | #include "oss/oss_3_0_d.h" | ||
36 | #include "oss/oss_3_0_sh_mask.h" | ||
37 | |||
38 | #include "vid.h" | ||
39 | #include "vi.h" | ||
40 | |||
41 | static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); | ||
42 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); | ||
43 | |||
44 | MODULE_FIRMWARE("radeon/topaz_mc.bin"); | ||
45 | MODULE_FIRMWARE("radeon/tonga_mc.bin"); | ||
46 | |||
47 | static const u32 golden_settings_tonga_a11[] = | ||
48 | { | ||
49 | mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000, | ||
50 | mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028, | ||
51 | mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991, | ||
52 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
53 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
54 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
55 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
56 | }; | ||
57 | |||
58 | static const u32 tonga_mgcg_cgcg_init[] = | ||
59 | { | ||
60 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
61 | }; | ||
62 | |||
63 | static const u32 golden_settings_iceland_a11[] = | ||
64 | { | ||
65 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
66 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
67 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, | ||
68 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff | ||
69 | }; | ||
70 | |||
71 | static const u32 iceland_mgcg_cgcg_init[] = | ||
72 | { | ||
73 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
74 | }; | ||
75 | |||
76 | static const u32 cz_mgcg_cgcg_init[] = | ||
77 | { | ||
78 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 | ||
79 | }; | ||
80 | |||
81 | static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) | ||
82 | { | ||
83 | switch (adev->asic_type) { | ||
84 | case CHIP_TOPAZ: | ||
85 | amdgpu_program_register_sequence(adev, | ||
86 | iceland_mgcg_cgcg_init, | ||
87 | (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); | ||
88 | amdgpu_program_register_sequence(adev, | ||
89 | golden_settings_iceland_a11, | ||
90 | (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); | ||
91 | break; | ||
92 | case CHIP_TONGA: | ||
93 | amdgpu_program_register_sequence(adev, | ||
94 | tonga_mgcg_cgcg_init, | ||
95 | (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init)); | ||
96 | amdgpu_program_register_sequence(adev, | ||
97 | golden_settings_tonga_a11, | ||
98 | (const u32)ARRAY_SIZE(golden_settings_tonga_a11)); | ||
99 | break; | ||
100 | case CHIP_CARRIZO: | ||
101 | amdgpu_program_register_sequence(adev, | ||
102 | cz_mgcg_cgcg_init, | ||
103 | (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); | ||
104 | break; | ||
105 | default: | ||
106 | break; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * gmc8_mc_wait_for_idle - wait for MC idle callback. | ||
112 | * | ||
113 | * @adev: amdgpu_device pointer | ||
114 | * | ||
115 | * Wait for the MC (memory controller) to be idle. | ||
116 | * (evergreen+). | ||
117 | * Returns 0 if the MC is idle, -1 if not. | ||
118 | */ | ||
119 | int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) | ||
120 | { | ||
121 | unsigned i; | ||
122 | u32 tmp; | ||
123 | |||
124 | for (i = 0; i < adev->usec_timeout; i++) { | ||
125 | /* read MC_STATUS */ | ||
126 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | | ||
127 | SRBM_STATUS__MCB_BUSY_MASK | | ||
128 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
129 | SRBM_STATUS__MCC_BUSY_MASK | | ||
130 | SRBM_STATUS__MCD_BUSY_MASK | | ||
131 | SRBM_STATUS__VMC1_BUSY_MASK); | ||
132 | if (!tmp) | ||
133 | return 0; | ||
134 | udelay(1); | ||
135 | } | ||
136 | return -1; | ||
137 | } | ||
138 | |||
139 | void gmc_v8_0_mc_stop(struct amdgpu_device *adev, | ||
140 | struct amdgpu_mode_mc_save *save) | ||
141 | { | ||
142 | u32 blackout; | ||
143 | |||
144 | if (adev->mode_info.num_crtc) | ||
145 | amdgpu_display_stop_mc_access(adev, save); | ||
146 | |||
147 | amdgpu_asic_wait_for_mc_idle(adev); | ||
148 | |||
149 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
150 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | ||
151 | /* Block CPU access */ | ||
152 | WREG32(mmBIF_FB_EN, 0); | ||
153 | /* blackout the MC */ | ||
154 | blackout = REG_SET_FIELD(blackout, | ||
155 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 1); | ||
156 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | ||
157 | } | ||
158 | /* wait for the MC to settle */ | ||
159 | udelay(100); | ||
160 | } | ||
161 | |||
162 | void gmc_v8_0_mc_resume(struct amdgpu_device *adev, | ||
163 | struct amdgpu_mode_mc_save *save) | ||
164 | { | ||
165 | u32 tmp; | ||
166 | |||
167 | /* unblackout the MC */ | ||
168 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
169 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | ||
170 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | ||
171 | /* allow CPU access */ | ||
172 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); | ||
173 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | ||
174 | WREG32(mmBIF_FB_EN, tmp); | ||
175 | |||
176 | if (adev->mode_info.num_crtc) | ||
177 | amdgpu_display_resume_mc_access(adev, save); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * gmc_v8_0_init_microcode - load ucode images from disk | ||
182 | * | ||
183 | * @adev: amdgpu_device pointer | ||
184 | * | ||
185 | * Use the firmware interface to load the ucode images into | ||
186 | * the driver (not loaded into hw). | ||
187 | * Returns 0 on success, error on failure. | ||
188 | */ | ||
189 | static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) | ||
190 | { | ||
191 | const char *chip_name; | ||
192 | char fw_name[30]; | ||
193 | int err; | ||
194 | |||
195 | DRM_DEBUG("\n"); | ||
196 | |||
197 | switch (adev->asic_type) { | ||
198 | case CHIP_TOPAZ: | ||
199 | chip_name = "topaz"; | ||
200 | break; | ||
201 | case CHIP_TONGA: | ||
202 | chip_name = "tonga"; | ||
203 | break; | ||
204 | case CHIP_CARRIZO: | ||
205 | return 0; | ||
206 | default: BUG(); | ||
207 | } | ||
208 | |||
209 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
210 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); | ||
211 | if (err) | ||
212 | goto out; | ||
213 | err = amdgpu_ucode_validate(adev->mc.fw); | ||
214 | |||
215 | out: | ||
216 | if (err) { | ||
217 | printk(KERN_ERR | ||
218 | "mc: Failed to load firmware \"%s\"\n", | ||
219 | fw_name); | ||
220 | release_firmware(adev->mc.fw); | ||
221 | adev->mc.fw = NULL; | ||
222 | } | ||
223 | return err; | ||
224 | } | ||
225 | |||
226 | /** | ||
227 | * gmc_v8_0_mc_load_microcode - load MC ucode into the hw | ||
228 | * | ||
229 | * @adev: amdgpu_device pointer | ||
230 | * | ||
231 | * Load the GDDR MC ucode into the hw (CIK). | ||
232 | * Returns 0 on success, error on failure. | ||
233 | */ | ||
234 | static int gmc_v8_0_mc_load_microcode(struct amdgpu_device *adev) | ||
235 | { | ||
236 | const struct mc_firmware_header_v1_0 *hdr; | ||
237 | const __le32 *fw_data = NULL; | ||
238 | const __le32 *io_mc_regs = NULL; | ||
239 | u32 running, blackout = 0; | ||
240 | int i, ucode_size, regs_size; | ||
241 | |||
242 | if (!adev->mc.fw) | ||
243 | return -EINVAL; | ||
244 | |||
245 | hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; | ||
246 | amdgpu_ucode_print_mc_hdr(&hdr->header); | ||
247 | |||
248 | adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
249 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); | ||
250 | io_mc_regs = (const __le32 *) | ||
251 | (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); | ||
252 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
253 | fw_data = (const __le32 *) | ||
254 | (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
255 | |||
256 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); | ||
257 | |||
258 | if (running == 0) { | ||
259 | if (running) { | ||
260 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
261 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); | ||
262 | } | ||
263 | |||
264 | /* reset the engine and set to writable */ | ||
265 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
266 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | ||
267 | |||
268 | /* load mc io regs */ | ||
269 | for (i = 0; i < regs_size; i++) { | ||
270 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | ||
271 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | ||
272 | } | ||
273 | /* load the MC ucode */ | ||
274 | for (i = 0; i < ucode_size; i++) | ||
275 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | ||
276 | |||
277 | /* put the engine back into the active state */ | ||
278 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
279 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | ||
280 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | ||
281 | |||
282 | /* wait for training to complete */ | ||
283 | for (i = 0; i < adev->usec_timeout; i++) { | ||
284 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
285 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) | ||
286 | break; | ||
287 | udelay(1); | ||
288 | } | ||
289 | for (i = 0; i < adev->usec_timeout; i++) { | ||
290 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
291 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) | ||
292 | break; | ||
293 | udelay(1); | ||
294 | } | ||
295 | |||
296 | if (running) | ||
297 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, | ||
304 | struct amdgpu_mc *mc) | ||
305 | { | ||
306 | if (mc->mc_vram_size > 0xFFC0000000ULL) { | ||
307 | /* leave room for at least 1024M GTT */ | ||
308 | dev_warn(adev->dev, "limiting VRAM\n"); | ||
309 | mc->real_vram_size = 0xFFC0000000ULL; | ||
310 | mc->mc_vram_size = 0xFFC0000000ULL; | ||
311 | } | ||
312 | amdgpu_vram_location(adev, &adev->mc, 0); | ||
313 | adev->mc.gtt_base_align = 0; | ||
314 | amdgpu_gtt_location(adev, mc); | ||
315 | } | ||
316 | |||
317 | /** | ||
318 | * gmc_v8_0_mc_program - program the GPU memory controller | ||
319 | * | ||
320 | * @adev: amdgpu_device pointer | ||
321 | * | ||
322 | * Set the location of vram, gart, and AGP in the GPU's | ||
323 | * physical address space (CIK). | ||
324 | */ | ||
325 | static void gmc_v8_0_mc_program(struct amdgpu_device *adev) | ||
326 | { | ||
327 | struct amdgpu_mode_mc_save save; | ||
328 | u32 tmp; | ||
329 | int i, j; | ||
330 | |||
331 | /* Initialize HDP */ | ||
332 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
333 | WREG32((0xb05 + j), 0x00000000); | ||
334 | WREG32((0xb06 + j), 0x00000000); | ||
335 | WREG32((0xb07 + j), 0x00000000); | ||
336 | WREG32((0xb08 + j), 0x00000000); | ||
337 | WREG32((0xb09 + j), 0x00000000); | ||
338 | } | ||
339 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | ||
340 | |||
341 | if (adev->mode_info.num_crtc) | ||
342 | amdgpu_display_set_vga_render_state(adev, false); | ||
343 | |||
344 | gmc_v8_0_mc_stop(adev, &save); | ||
345 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
346 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
347 | } | ||
348 | /* Update configuration */ | ||
349 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
350 | adev->mc.vram_start >> 12); | ||
351 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
352 | adev->mc.vram_end >> 12); | ||
353 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, | ||
354 | adev->vram_scratch.gpu_addr >> 12); | ||
355 | tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; | ||
356 | tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); | ||
357 | WREG32(mmMC_VM_FB_LOCATION, tmp); | ||
358 | /* XXX double check these! */ | ||
359 | WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); | ||
360 | WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); | ||
361 | WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); | ||
362 | WREG32(mmMC_VM_AGP_BASE, 0); | ||
363 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); | ||
364 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); | ||
365 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
366 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
367 | } | ||
368 | gmc_v8_0_mc_resume(adev, &save); | ||
369 | |||
370 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); | ||
371 | |||
372 | tmp = RREG32(mmHDP_MISC_CNTL); | ||
373 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); | ||
374 | WREG32(mmHDP_MISC_CNTL, tmp); | ||
375 | |||
376 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); | ||
377 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * gmc_v8_0_mc_init - initialize the memory controller driver params | ||
382 | * | ||
383 | * @adev: amdgpu_device pointer | ||
384 | * | ||
385 | * Look up the amount of vram, vram width, and decide how to place | ||
386 | * vram and gart within the GPU's physical address space (CIK). | ||
387 | * Returns 0 for success. | ||
388 | */ | ||
389 | static int gmc_v8_0_mc_init(struct amdgpu_device *adev) | ||
390 | { | ||
391 | u32 tmp; | ||
392 | int chansize, numchan; | ||
393 | |||
394 | /* Get VRAM informations */ | ||
395 | tmp = RREG32(mmMC_ARB_RAMCFG); | ||
396 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { | ||
397 | chansize = 64; | ||
398 | } else { | ||
399 | chansize = 32; | ||
400 | } | ||
401 | tmp = RREG32(mmMC_SHARED_CHMAP); | ||
402 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | ||
403 | case 0: | ||
404 | default: | ||
405 | numchan = 1; | ||
406 | break; | ||
407 | case 1: | ||
408 | numchan = 2; | ||
409 | break; | ||
410 | case 2: | ||
411 | numchan = 4; | ||
412 | break; | ||
413 | case 3: | ||
414 | numchan = 8; | ||
415 | break; | ||
416 | case 4: | ||
417 | numchan = 3; | ||
418 | break; | ||
419 | case 5: | ||
420 | numchan = 6; | ||
421 | break; | ||
422 | case 6: | ||
423 | numchan = 10; | ||
424 | break; | ||
425 | case 7: | ||
426 | numchan = 12; | ||
427 | break; | ||
428 | case 8: | ||
429 | numchan = 16; | ||
430 | break; | ||
431 | } | ||
432 | adev->mc.vram_width = numchan * chansize; | ||
433 | /* Could aper size report 0 ? */ | ||
434 | adev->mc.aper_base = pci_resource_start(adev->pdev, 0); | ||
435 | adev->mc.aper_size = pci_resource_len(adev->pdev, 0); | ||
436 | /* size in MB on si */ | ||
437 | adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
438 | adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
439 | adev->mc.visible_vram_size = adev->mc.aper_size; | ||
440 | |||
441 | /* unless the user had overridden it, set the gart | ||
442 | * size equal to the 1024 or vram, whichever is larger. | ||
443 | */ | ||
444 | if (amdgpu_gart_size == -1) | ||
445 | adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); | ||
446 | else | ||
447 | adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; | ||
448 | |||
449 | gmc_v8_0_vram_gtt_location(adev, &adev->mc); | ||
450 | |||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * GART | ||
456 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
457 | * VMIDs 1-15 are used for userspace clients and are handled | ||
458 | * by the amdgpu vm/hsa code. | ||
459 | */ | ||
460 | |||
461 | /** | ||
462 | * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback | ||
463 | * | ||
464 | * @adev: amdgpu_device pointer | ||
465 | * @vmid: vm instance to flush | ||
466 | * | ||
467 | * Flush the TLB for the requested page table (CIK). | ||
468 | */ | ||
469 | static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, | ||
470 | uint32_t vmid) | ||
471 | { | ||
472 | /* flush hdp cache */ | ||
473 | WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); | ||
474 | |||
475 | /* bits 0-15 are the VM contexts0-15 */ | ||
476 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO | ||
481 | * | ||
482 | * @adev: amdgpu_device pointer | ||
483 | * @cpu_pt_addr: cpu address of the page table | ||
484 | * @gpu_page_idx: entry in the page table to update | ||
485 | * @addr: dst addr to write into pte/pde | ||
486 | * @flags: access flags | ||
487 | * | ||
488 | * Update the page tables using the CPU. | ||
489 | */ | ||
490 | static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, | ||
491 | void *cpu_pt_addr, | ||
492 | uint32_t gpu_page_idx, | ||
493 | uint64_t addr, | ||
494 | uint32_t flags) | ||
495 | { | ||
496 | void __iomem *ptr = (void *)cpu_pt_addr; | ||
497 | uint64_t value; | ||
498 | |||
499 | /* | ||
500 | * PTE format on VI: | ||
501 | * 63:40 reserved | ||
502 | * 39:12 4k physical page base address | ||
503 | * 11:7 fragment | ||
504 | * 6 write | ||
505 | * 5 read | ||
506 | * 4 exe | ||
507 | * 3 reserved | ||
508 | * 2 snooped | ||
509 | * 1 system | ||
510 | * 0 valid | ||
511 | * | ||
512 | * PDE format on VI: | ||
513 | * 63:59 block fragment size | ||
514 | * 58:40 reserved | ||
515 | * 39:1 physical base address of PTE | ||
516 | * bits 5:1 must be 0. | ||
517 | * 0 valid | ||
518 | */ | ||
519 | value = addr & 0x000000FFFFFFF000ULL; | ||
520 | value |= flags; | ||
521 | writeq(value, ptr + (gpu_page_idx * 8)); | ||
522 | |||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | /** | ||
527 | * gmc_v8_0_gart_enable - gart enable | ||
528 | * | ||
529 | * @adev: amdgpu_device pointer | ||
530 | * | ||
531 | * This sets up the TLBs, programs the page tables for VMID0, | ||
532 | * sets up the hw for VMIDs 1-15 which are allocated on | ||
533 | * demand, and sets up the global locations for the LDS, GDS, | ||
534 | * and GPUVM for FSA64 clients (CIK). | ||
535 | * Returns 0 for success, errors for failure. | ||
536 | */ | ||
537 | static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | ||
538 | { | ||
539 | int r, i; | ||
540 | u32 tmp; | ||
541 | |||
542 | if (adev->gart.robj == NULL) { | ||
543 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | r = amdgpu_gart_table_vram_pin(adev); | ||
547 | if (r) | ||
548 | return r; | ||
549 | /* Setup TLB control */ | ||
550 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
551 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); | ||
552 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); | ||
553 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); | ||
554 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); | ||
555 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); | ||
556 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
557 | /* Setup L2 cache */ | ||
558 | tmp = RREG32(mmVM_L2_CNTL); | ||
559 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); | ||
560 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); | ||
561 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
562 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
563 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | ||
564 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | ||
565 | WREG32(mmVM_L2_CNTL, tmp); | ||
566 | tmp = RREG32(mmVM_L2_CNTL2); | ||
567 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | ||
568 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | ||
569 | WREG32(mmVM_L2_CNTL2, tmp); | ||
570 | tmp = RREG32(mmVM_L2_CNTL3); | ||
571 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); | ||
572 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); | ||
573 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); | ||
574 | WREG32(mmVM_L2_CNTL3, tmp); | ||
575 | /* XXX: set to enable PTE/PDE in system memory */ | ||
576 | tmp = RREG32(mmVM_L2_CNTL4); | ||
577 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL, 0); | ||
578 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED, 0); | ||
579 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP, 0); | ||
580 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL, 0); | ||
581 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED, 0); | ||
582 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP, 0); | ||
583 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL, 0); | ||
584 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED, 0); | ||
585 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP, 0); | ||
586 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL, 0); | ||
587 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED, 0); | ||
588 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); | ||
589 | WREG32(mmVM_L2_CNTL4, tmp); | ||
590 | /* setup context0 */ | ||
591 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | ||
592 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); | ||
593 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | ||
594 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | ||
595 | (u32)(adev->dummy_page.addr >> 12)); | ||
596 | WREG32(mmVM_CONTEXT0_CNTL2, 0); | ||
597 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
598 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); | ||
599 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); | ||
600 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
601 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
602 | |||
603 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR, 0); | ||
604 | WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR, 0); | ||
605 | WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET, 0); | ||
606 | |||
607 | /* empty context1-15 */ | ||
608 | /* FIXME start with 4G, once using 2 level pt switch to full | ||
609 | * vm size space | ||
610 | */ | ||
611 | /* set vm size, must be a multiple of 4 */ | ||
612 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | ||
613 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn); | ||
614 | for (i = 1; i < 16; i++) { | ||
615 | if (i < 8) | ||
616 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, | ||
617 | adev->gart.table_addr >> 12); | ||
618 | else | ||
619 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, | ||
620 | adev->gart.table_addr >> 12); | ||
621 | } | ||
622 | |||
623 | /* enable context1-15 */ | ||
624 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | ||
625 | (u32)(adev->dummy_page.addr >> 12)); | ||
626 | WREG32(mmVM_CONTEXT1_CNTL2, 4); | ||
627 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
628 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); | ||
629 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); | ||
630 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
631 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
632 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
633 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
634 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
635 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
636 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
637 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
638 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
639 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
640 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
641 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
642 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
643 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
644 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, | ||
645 | amdgpu_vm_block_size - 9); | ||
646 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
647 | |||
648 | gmc_v8_0_gart_flush_gpu_tlb(adev, 0); | ||
649 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
650 | (unsigned)(adev->mc.gtt_size >> 20), | ||
651 | (unsigned long long)adev->gart.table_addr); | ||
652 | adev->gart.ready = true; | ||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | static int gmc_v8_0_gart_init(struct amdgpu_device *adev) | ||
657 | { | ||
658 | int r; | ||
659 | |||
660 | if (adev->gart.robj) { | ||
661 | WARN(1, "R600 PCIE GART already initialized\n"); | ||
662 | return 0; | ||
663 | } | ||
664 | /* Initialize common gart structure */ | ||
665 | r = amdgpu_gart_init(adev); | ||
666 | if (r) | ||
667 | return r; | ||
668 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | ||
669 | return amdgpu_gart_table_vram_alloc(adev); | ||
670 | } | ||
671 | |||
672 | /** | ||
673 | * gmc_v8_0_gart_disable - gart disable | ||
674 | * | ||
675 | * @adev: amdgpu_device pointer | ||
676 | * | ||
677 | * This disables all VM page table (CIK). | ||
678 | */ | ||
679 | static void gmc_v8_0_gart_disable(struct amdgpu_device *adev) | ||
680 | { | ||
681 | u32 tmp; | ||
682 | |||
683 | /* Disable all tables */ | ||
684 | WREG32(mmVM_CONTEXT0_CNTL, 0); | ||
685 | WREG32(mmVM_CONTEXT1_CNTL, 0); | ||
686 | /* Setup TLB control */ | ||
687 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
688 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); | ||
689 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); | ||
690 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); | ||
691 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
692 | /* Setup L2 cache */ | ||
693 | tmp = RREG32(mmVM_L2_CNTL); | ||
694 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); | ||
695 | WREG32(mmVM_L2_CNTL, tmp); | ||
696 | WREG32(mmVM_L2_CNTL2, 0); | ||
697 | amdgpu_gart_table_vram_unpin(adev); | ||
698 | } | ||
699 | |||
700 | /** | ||
701 | * gmc_v8_0_gart_fini - vm fini callback | ||
702 | * | ||
703 | * @adev: amdgpu_device pointer | ||
704 | * | ||
705 | * Tears down the driver GART/VM setup (CIK). | ||
706 | */ | ||
707 | static void gmc_v8_0_gart_fini(struct amdgpu_device *adev) | ||
708 | { | ||
709 | amdgpu_gart_table_vram_free(adev); | ||
710 | amdgpu_gart_fini(adev); | ||
711 | } | ||
712 | |||
713 | /* | ||
714 | * vm | ||
715 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
716 | * VMIDs 1-15 are used for userspace clients and are handled | ||
717 | * by the amdgpu vm/hsa code. | ||
718 | */ | ||
719 | /** | ||
720 | * gmc_v8_0_vm_init - cik vm init callback | ||
721 | * | ||
722 | * @adev: amdgpu_device pointer | ||
723 | * | ||
724 | * Inits cik specific vm parameters (number of VMs, base of vram for | ||
725 | * VMIDs 1-15) (CIK). | ||
726 | * Returns 0 for success. | ||
727 | */ | ||
728 | static int gmc_v8_0_vm_init(struct amdgpu_device *adev) | ||
729 | { | ||
730 | /* | ||
731 | * number of VMs | ||
732 | * VMID 0 is reserved for System | ||
733 | * amdgpu graphics/compute will use VMIDs 1-7 | ||
734 | * amdkfd will use VMIDs 8-15 | ||
735 | */ | ||
736 | adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; | ||
737 | |||
738 | /* base offset of vram pages */ | ||
739 | if (adev->flags & AMDGPU_IS_APU) { | ||
740 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | ||
741 | tmp <<= 22; | ||
742 | adev->vm_manager.vram_base_offset = tmp; | ||
743 | } else | ||
744 | adev->vm_manager.vram_base_offset = 0; | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | /** | ||
750 | * gmc_v8_0_vm_fini - cik vm fini callback | ||
751 | * | ||
752 | * @adev: amdgpu_device pointer | ||
753 | * | ||
754 | * Tear down any asic specific VM setup (CIK). | ||
755 | */ | ||
756 | static void gmc_v8_0_vm_fini(struct amdgpu_device *adev) | ||
757 | { | ||
758 | } | ||
759 | |||
760 | /** | ||
761 | * gmc_v8_0_vm_decode_fault - print human readable fault info | ||
762 | * | ||
763 | * @adev: amdgpu_device pointer | ||
764 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
765 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
766 | * | ||
767 | * Print human readable fault information (CIK). | ||
768 | */ | ||
769 | static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, | ||
770 | u32 status, u32 addr, u32 mc_client) | ||
771 | { | ||
772 | u32 mc_id; | ||
773 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); | ||
774 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
775 | PROTECTIONS); | ||
776 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, | ||
777 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | ||
778 | |||
779 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
780 | MEMORY_CLIENT_ID); | ||
781 | |||
782 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", | ||
783 | protections, vmid, addr, | ||
784 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
785 | MEMORY_CLIENT_RW) ? | ||
786 | "write" : "read", block, mc_client, mc_id); | ||
787 | } | ||
788 | |||
789 | static int gmc_v8_0_early_init(struct amdgpu_device *adev) | ||
790 | { | ||
791 | gmc_v8_0_set_gart_funcs(adev); | ||
792 | gmc_v8_0_set_irq_funcs(adev); | ||
793 | |||
794 | if (adev->flags & AMDGPU_IS_APU) { | ||
795 | adev->mc.is_gddr5 = false; | ||
796 | } else { | ||
797 | u32 tmp = RREG32(mmMC_SEQ_MISC0); | ||
798 | |||
799 | if (((tmp & MC_SEQ_MISC0__GDDR5_MASK) >> | ||
800 | MC_SEQ_MISC0__GDDR5__SHIFT) == MC_SEQ_MISC0__GDDR5_VALUE) | ||
801 | adev->mc.is_gddr5 = true; | ||
802 | else | ||
803 | adev->mc.is_gddr5 = false; | ||
804 | } | ||
805 | |||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | static int gmc_v8_0_sw_init(struct amdgpu_device *adev) | ||
810 | { | ||
811 | int r; | ||
812 | int dma_bits; | ||
813 | |||
814 | r = amdgpu_gem_init(adev); | ||
815 | if (r) | ||
816 | return r; | ||
817 | |||
818 | r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); | ||
819 | if (r) | ||
820 | return r; | ||
821 | |||
822 | r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); | ||
823 | if (r) | ||
824 | return r; | ||
825 | |||
826 | /* Adjust VM size here. | ||
827 | * Currently set to 4GB ((1 << 20) 4k pages). | ||
828 | * Max GPUVM size for cayman and SI is 40 bits. | ||
829 | */ | ||
830 | adev->vm_manager.max_pfn = amdgpu_vm_size << 18; | ||
831 | |||
832 | /* Set the internal MC address mask | ||
833 | * This is the max address of the GPU's | ||
834 | * internal address space. | ||
835 | */ | ||
836 | adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ | ||
837 | |||
838 | /* set DMA mask + need_dma32 flags. | ||
839 | * PCIE - can handle 40-bits. | ||
840 | * IGP - can handle 40-bits | ||
841 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics | ||
842 | */ | ||
843 | adev->need_dma32 = false; | ||
844 | dma_bits = adev->need_dma32 ? 32 : 40; | ||
845 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
846 | if (r) { | ||
847 | adev->need_dma32 = true; | ||
848 | dma_bits = 32; | ||
849 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | ||
850 | } | ||
851 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
852 | if (r) { | ||
853 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | ||
854 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | ||
855 | } | ||
856 | |||
857 | r = gmc_v8_0_init_microcode(adev); | ||
858 | if (r) { | ||
859 | DRM_ERROR("Failed to load mc firmware!\n"); | ||
860 | return r; | ||
861 | } | ||
862 | |||
863 | r = gmc_v8_0_mc_init(adev); | ||
864 | if (r) | ||
865 | return r; | ||
866 | |||
867 | /* Memory manager */ | ||
868 | r = amdgpu_bo_init(adev); | ||
869 | if (r) | ||
870 | return r; | ||
871 | |||
872 | r = gmc_v8_0_gart_init(adev); | ||
873 | if (r) | ||
874 | return r; | ||
875 | |||
876 | if (!adev->vm_manager.enabled) { | ||
877 | r = gmc_v8_0_vm_init(adev); | ||
878 | if (r) { | ||
879 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
880 | return r; | ||
881 | } | ||
882 | adev->vm_manager.enabled = true; | ||
883 | } | ||
884 | |||
885 | return r; | ||
886 | } | ||
887 | |||
888 | static int gmc_v8_0_sw_fini(struct amdgpu_device *adev) | ||
889 | { | ||
890 | int i; | ||
891 | |||
892 | if (adev->vm_manager.enabled) { | ||
893 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
894 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
895 | gmc_v8_0_vm_fini(adev); | ||
896 | adev->vm_manager.enabled = false; | ||
897 | } | ||
898 | gmc_v8_0_gart_fini(adev); | ||
899 | amdgpu_gem_fini(adev); | ||
900 | amdgpu_bo_fini(adev); | ||
901 | |||
902 | return 0; | ||
903 | } | ||
904 | |||
905 | static int gmc_v8_0_hw_init(struct amdgpu_device *adev) | ||
906 | { | ||
907 | int r; | ||
908 | |||
909 | gmc_v8_0_init_golden_registers(adev); | ||
910 | |||
911 | gmc_v8_0_mc_program(adev); | ||
912 | |||
913 | if (!(adev->flags & AMDGPU_IS_APU)) { | ||
914 | r = gmc_v8_0_mc_load_microcode(adev); | ||
915 | if (r) { | ||
916 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
917 | return r; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | r = gmc_v8_0_gart_enable(adev); | ||
922 | if (r) | ||
923 | return r; | ||
924 | |||
925 | return r; | ||
926 | } | ||
927 | |||
928 | static int gmc_v8_0_hw_fini(struct amdgpu_device *adev) | ||
929 | { | ||
930 | gmc_v8_0_gart_disable(adev); | ||
931 | |||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | static int gmc_v8_0_suspend(struct amdgpu_device *adev) | ||
936 | { | ||
937 | int i; | ||
938 | |||
939 | if (adev->vm_manager.enabled) { | ||
940 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
941 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
942 | gmc_v8_0_vm_fini(adev); | ||
943 | adev->vm_manager.enabled = false; | ||
944 | } | ||
945 | gmc_v8_0_hw_fini(adev); | ||
946 | |||
947 | return 0; | ||
948 | } | ||
949 | |||
950 | static int gmc_v8_0_resume(struct amdgpu_device *adev) | ||
951 | { | ||
952 | int r; | ||
953 | |||
954 | r = gmc_v8_0_hw_init(adev); | ||
955 | if (r) | ||
956 | return r; | ||
957 | |||
958 | if (!adev->vm_manager.enabled) { | ||
959 | r = gmc_v8_0_vm_init(adev); | ||
960 | if (r) { | ||
961 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
962 | return r; | ||
963 | } | ||
964 | adev->vm_manager.enabled = true; | ||
965 | } | ||
966 | |||
967 | return r; | ||
968 | } | ||
969 | |||
970 | static bool gmc_v8_0_is_idle(struct amdgpu_device *adev) | ||
971 | { | ||
972 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
973 | |||
974 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
975 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | ||
976 | return false; | ||
977 | |||
978 | return true; | ||
979 | } | ||
980 | |||
981 | static int gmc_v8_0_wait_for_idle(struct amdgpu_device *adev) | ||
982 | { | ||
983 | unsigned i; | ||
984 | u32 tmp; | ||
985 | |||
986 | for (i = 0; i < adev->usec_timeout; i++) { | ||
987 | /* read MC_STATUS */ | ||
988 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | | ||
989 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
990 | SRBM_STATUS__MCC_BUSY_MASK | | ||
991 | SRBM_STATUS__MCD_BUSY_MASK | | ||
992 | SRBM_STATUS__VMC_BUSY_MASK | | ||
993 | SRBM_STATUS__VMC1_BUSY_MASK); | ||
994 | if (!tmp) | ||
995 | return 0; | ||
996 | udelay(1); | ||
997 | } | ||
998 | return -ETIMEDOUT; | ||
999 | |||
1000 | } | ||
1001 | |||
1002 | static void gmc_v8_0_print_status(struct amdgpu_device *adev) | ||
1003 | { | ||
1004 | int i, j; | ||
1005 | |||
1006 | dev_info(adev->dev, "GMC 8.x registers\n"); | ||
1007 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
1008 | RREG32(mmSRBM_STATUS)); | ||
1009 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
1010 | RREG32(mmSRBM_STATUS2)); | ||
1011 | |||
1012 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1013 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); | ||
1014 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1015 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); | ||
1016 | dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", | ||
1017 | RREG32(mmMC_VM_MX_L1_TLB_CNTL)); | ||
1018 | dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", | ||
1019 | RREG32(mmVM_L2_CNTL)); | ||
1020 | dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", | ||
1021 | RREG32(mmVM_L2_CNTL2)); | ||
1022 | dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", | ||
1023 | RREG32(mmVM_L2_CNTL3)); | ||
1024 | dev_info(adev->dev, " VM_L2_CNTL4=0x%08X\n", | ||
1025 | RREG32(mmVM_L2_CNTL4)); | ||
1026 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1027 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); | ||
1028 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1029 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); | ||
1030 | dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1031 | RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1032 | dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", | ||
1033 | RREG32(mmVM_CONTEXT0_CNTL2)); | ||
1034 | dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", | ||
1035 | RREG32(mmVM_CONTEXT0_CNTL)); | ||
1036 | dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n", | ||
1037 | RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR)); | ||
1038 | dev_info(adev->dev, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n", | ||
1039 | RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR)); | ||
1040 | dev_info(adev->dev, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n", | ||
1041 | RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET)); | ||
1042 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1043 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); | ||
1044 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1045 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); | ||
1046 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1047 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1048 | dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", | ||
1049 | RREG32(mmVM_CONTEXT1_CNTL2)); | ||
1050 | dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", | ||
1051 | RREG32(mmVM_CONTEXT1_CNTL)); | ||
1052 | for (i = 0; i < 16; i++) { | ||
1053 | if (i < 8) | ||
1054 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1055 | i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); | ||
1056 | else | ||
1057 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1058 | i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); | ||
1059 | } | ||
1060 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", | ||
1061 | RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); | ||
1062 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", | ||
1063 | RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); | ||
1064 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", | ||
1065 | RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); | ||
1066 | dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", | ||
1067 | RREG32(mmMC_VM_FB_LOCATION)); | ||
1068 | dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", | ||
1069 | RREG32(mmMC_VM_AGP_BASE)); | ||
1070 | dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", | ||
1071 | RREG32(mmMC_VM_AGP_TOP)); | ||
1072 | dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", | ||
1073 | RREG32(mmMC_VM_AGP_BOT)); | ||
1074 | |||
1075 | dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", | ||
1076 | RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); | ||
1077 | dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", | ||
1078 | RREG32(mmHDP_NONSURFACE_BASE)); | ||
1079 | dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", | ||
1080 | RREG32(mmHDP_NONSURFACE_INFO)); | ||
1081 | dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", | ||
1082 | RREG32(mmHDP_NONSURFACE_SIZE)); | ||
1083 | dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", | ||
1084 | RREG32(mmHDP_MISC_CNTL)); | ||
1085 | dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", | ||
1086 | RREG32(mmHDP_HOST_PATH_CNTL)); | ||
1087 | |||
1088 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
1089 | dev_info(adev->dev, " %d:\n", i); | ||
1090 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1091 | 0xb05 + j, RREG32(0xb05 + j)); | ||
1092 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1093 | 0xb06 + j, RREG32(0xb06 + j)); | ||
1094 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1095 | 0xb07 + j, RREG32(0xb07 + j)); | ||
1096 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1097 | 0xb08 + j, RREG32(0xb08 + j)); | ||
1098 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1099 | 0xb09 + j, RREG32(0xb09 + j)); | ||
1100 | } | ||
1101 | |||
1102 | dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", | ||
1103 | RREG32(mmBIF_FB_EN)); | ||
1104 | } | ||
1105 | |||
1106 | static int gmc_v8_0_soft_reset(struct amdgpu_device *adev) | ||
1107 | { | ||
1108 | struct amdgpu_mode_mc_save save; | ||
1109 | u32 srbm_soft_reset = 0; | ||
1110 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
1111 | |||
1112 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | ||
1113 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1114 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | ||
1115 | |||
1116 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
1117 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | ||
1118 | if (!(adev->flags & AMDGPU_IS_APU)) | ||
1119 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1120 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | ||
1121 | } | ||
1122 | |||
1123 | if (srbm_soft_reset) { | ||
1124 | gmc_v8_0_print_status(adev); | ||
1125 | |||
1126 | gmc_v8_0_mc_stop(adev, &save); | ||
1127 | if (gmc_v8_0_wait_for_idle(adev)) { | ||
1128 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); | ||
1129 | } | ||
1130 | |||
1131 | |||
1132 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1133 | tmp |= srbm_soft_reset; | ||
1134 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
1135 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1136 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1137 | |||
1138 | udelay(50); | ||
1139 | |||
1140 | tmp &= ~srbm_soft_reset; | ||
1141 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1142 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1143 | |||
1144 | /* Wait a little for things to settle down */ | ||
1145 | udelay(50); | ||
1146 | |||
1147 | gmc_v8_0_mc_resume(adev, &save); | ||
1148 | udelay(50); | ||
1149 | |||
1150 | gmc_v8_0_print_status(adev); | ||
1151 | } | ||
1152 | |||
1153 | return 0; | ||
1154 | } | ||
1155 | |||
1156 | static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | ||
1157 | struct amdgpu_irq_src *src, | ||
1158 | unsigned type, | ||
1159 | enum amdgpu_interrupt_state state) | ||
1160 | { | ||
1161 | u32 tmp; | ||
1162 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1163 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1164 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1165 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1166 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1167 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1168 | VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | ||
1169 | |||
1170 | switch (state) { | ||
1171 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1172 | /* system context */ | ||
1173 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1174 | tmp &= ~bits; | ||
1175 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1176 | /* VMs */ | ||
1177 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1178 | tmp &= ~bits; | ||
1179 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1180 | break; | ||
1181 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1182 | /* system context */ | ||
1183 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1184 | tmp |= bits; | ||
1185 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1186 | /* VMs */ | ||
1187 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1188 | tmp |= bits; | ||
1189 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1190 | break; | ||
1191 | default: | ||
1192 | break; | ||
1193 | } | ||
1194 | |||
1195 | return 0; | ||
1196 | } | ||
1197 | |||
1198 | static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, | ||
1199 | struct amdgpu_irq_src *source, | ||
1200 | struct amdgpu_iv_entry *entry) | ||
1201 | { | ||
1202 | u32 addr, status, mc_client; | ||
1203 | |||
1204 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
1205 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
1206 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | ||
1207 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | ||
1208 | entry->src_id, entry->src_data); | ||
1209 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1210 | addr); | ||
1211 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1212 | status); | ||
1213 | gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); | ||
1214 | /* reset addr and status */ | ||
1215 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1216 | |||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static int gmc_v8_0_set_clockgating_state(struct amdgpu_device *adev, | ||
1221 | enum amdgpu_clockgating_state state) | ||
1222 | { | ||
1223 | /* XXX handled via the smc on VI */ | ||
1224 | |||
1225 | return 0; | ||
1226 | } | ||
1227 | |||
1228 | static int gmc_v8_0_set_powergating_state(struct amdgpu_device *adev, | ||
1229 | enum amdgpu_powergating_state state) | ||
1230 | { | ||
1231 | return 0; | ||
1232 | } | ||
1233 | |||
1234 | const struct amdgpu_ip_funcs gmc_v8_0_ip_funcs = { | ||
1235 | .early_init = gmc_v8_0_early_init, | ||
1236 | .late_init = NULL, | ||
1237 | .sw_init = gmc_v8_0_sw_init, | ||
1238 | .sw_fini = gmc_v8_0_sw_fini, | ||
1239 | .hw_init = gmc_v8_0_hw_init, | ||
1240 | .hw_fini = gmc_v8_0_hw_fini, | ||
1241 | .suspend = gmc_v8_0_suspend, | ||
1242 | .resume = gmc_v8_0_resume, | ||
1243 | .is_idle = gmc_v8_0_is_idle, | ||
1244 | .wait_for_idle = gmc_v8_0_wait_for_idle, | ||
1245 | .soft_reset = gmc_v8_0_soft_reset, | ||
1246 | .print_status = gmc_v8_0_print_status, | ||
1247 | .set_clockgating_state = gmc_v8_0_set_clockgating_state, | ||
1248 | .set_powergating_state = gmc_v8_0_set_powergating_state, | ||
1249 | }; | ||
1250 | |||
1251 | static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { | ||
1252 | .flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, | ||
1253 | .set_pte_pde = gmc_v8_0_gart_set_pte_pde, | ||
1254 | }; | ||
1255 | |||
1256 | static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { | ||
1257 | .set = gmc_v8_0_vm_fault_interrupt_state, | ||
1258 | .process = gmc_v8_0_process_interrupt, | ||
1259 | }; | ||
1260 | |||
1261 | static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) | ||
1262 | { | ||
1263 | if (adev->gart.gart_funcs == NULL) | ||
1264 | adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; | ||
1265 | } | ||
1266 | |||
1267 | static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) | ||
1268 | { | ||
1269 | adev->mc.vm_fault.num_types = 1; | ||
1270 | adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; | ||
1271 | } | ||