diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 1339 |
1 files changed, 1339 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c new file mode 100644 index 000000000000..ae37fce36520 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -0,0 +1,1339 @@ | |||
1 | /* | ||
2 | * Copyright 2014 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | */ | ||
23 | #include <linux/firmware.h> | ||
24 | #include "drmP.h" | ||
25 | #include "amdgpu.h" | ||
26 | #include "cikd.h" | ||
27 | #include "cik.h" | ||
28 | #include "gmc_v7_0.h" | ||
29 | #include "amdgpu_ucode.h" | ||
30 | |||
31 | #include "bif/bif_4_1_d.h" | ||
32 | #include "bif/bif_4_1_sh_mask.h" | ||
33 | |||
34 | #include "gmc/gmc_7_1_d.h" | ||
35 | #include "gmc/gmc_7_1_sh_mask.h" | ||
36 | |||
37 | #include "oss/oss_2_0_d.h" | ||
38 | #include "oss/oss_2_0_sh_mask.h" | ||
39 | |||
40 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); | ||
41 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); | ||
42 | |||
43 | MODULE_FIRMWARE("radeon/boniare_mc.bin"); | ||
44 | MODULE_FIRMWARE("radeon/hawaii_mc.bin"); | ||
45 | |||
46 | /** | ||
47 | * gmc8_mc_wait_for_idle - wait for MC idle callback. | ||
48 | * | ||
49 | * @adev: amdgpu_device pointer | ||
50 | * | ||
51 | * Wait for the MC (memory controller) to be idle. | ||
52 | * (evergreen+). | ||
53 | * Returns 0 if the MC is idle, -1 if not. | ||
54 | */ | ||
55 | int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) | ||
56 | { | ||
57 | unsigned i; | ||
58 | u32 tmp; | ||
59 | |||
60 | for (i = 0; i < adev->usec_timeout; i++) { | ||
61 | /* read MC_STATUS */ | ||
62 | tmp = RREG32(mmSRBM_STATUS) & 0x1F00; | ||
63 | if (!tmp) | ||
64 | return 0; | ||
65 | udelay(1); | ||
66 | } | ||
67 | return -1; | ||
68 | } | ||
69 | |||
70 | void gmc_v7_0_mc_stop(struct amdgpu_device *adev, | ||
71 | struct amdgpu_mode_mc_save *save) | ||
72 | { | ||
73 | u32 blackout; | ||
74 | |||
75 | if (adev->mode_info.num_crtc) | ||
76 | amdgpu_display_stop_mc_access(adev, save); | ||
77 | |||
78 | amdgpu_asic_wait_for_mc_idle(adev); | ||
79 | |||
80 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
81 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { | ||
82 | /* Block CPU access */ | ||
83 | WREG32(mmBIF_FB_EN, 0); | ||
84 | /* blackout the MC */ | ||
85 | blackout = REG_SET_FIELD(blackout, | ||
86 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | ||
87 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); | ||
88 | } | ||
89 | /* wait for the MC to settle */ | ||
90 | udelay(100); | ||
91 | } | ||
92 | |||
93 | void gmc_v7_0_mc_resume(struct amdgpu_device *adev, | ||
94 | struct amdgpu_mode_mc_save *save) | ||
95 | { | ||
96 | u32 tmp; | ||
97 | |||
98 | /* unblackout the MC */ | ||
99 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
100 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); | ||
101 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); | ||
102 | /* allow CPU access */ | ||
103 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); | ||
104 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); | ||
105 | WREG32(mmBIF_FB_EN, tmp); | ||
106 | |||
107 | if (adev->mode_info.num_crtc) | ||
108 | amdgpu_display_resume_mc_access(adev, save); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * gmc_v7_0_init_microcode - load ucode images from disk | ||
113 | * | ||
114 | * @adev: amdgpu_device pointer | ||
115 | * | ||
116 | * Use the firmware interface to load the ucode images into | ||
117 | * the driver (not loaded into hw). | ||
118 | * Returns 0 on success, error on failure. | ||
119 | */ | ||
120 | static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) | ||
121 | { | ||
122 | const char *chip_name; | ||
123 | char fw_name[30]; | ||
124 | int err; | ||
125 | |||
126 | DRM_DEBUG("\n"); | ||
127 | |||
128 | switch (adev->asic_type) { | ||
129 | case CHIP_BONAIRE: | ||
130 | chip_name = "bonaire"; | ||
131 | break; | ||
132 | case CHIP_HAWAII: | ||
133 | chip_name = "hawaii"; | ||
134 | break; | ||
135 | case CHIP_KAVERI: | ||
136 | case CHIP_KABINI: | ||
137 | return 0; | ||
138 | default: BUG(); | ||
139 | } | ||
140 | |||
141 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); | ||
142 | err = request_firmware(&adev->mc.fw, fw_name, adev->dev); | ||
143 | if (err) | ||
144 | goto out; | ||
145 | err = amdgpu_ucode_validate(adev->mc.fw); | ||
146 | |||
147 | out: | ||
148 | if (err) { | ||
149 | printk(KERN_ERR | ||
150 | "cik_mc: Failed to load firmware \"%s\"\n", | ||
151 | fw_name); | ||
152 | release_firmware(adev->mc.fw); | ||
153 | adev->mc.fw = NULL; | ||
154 | } | ||
155 | return err; | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * gmc_v7_0_mc_load_microcode - load MC ucode into the hw | ||
160 | * | ||
161 | * @adev: amdgpu_device pointer | ||
162 | * | ||
163 | * Load the GDDR MC ucode into the hw (CIK). | ||
164 | * Returns 0 on success, error on failure. | ||
165 | */ | ||
166 | static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) | ||
167 | { | ||
168 | const struct mc_firmware_header_v1_0 *hdr; | ||
169 | const __le32 *fw_data = NULL; | ||
170 | const __le32 *io_mc_regs = NULL; | ||
171 | u32 running, blackout = 0; | ||
172 | int i, ucode_size, regs_size; | ||
173 | |||
174 | if (!adev->mc.fw) | ||
175 | return -EINVAL; | ||
176 | |||
177 | hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; | ||
178 | amdgpu_ucode_print_mc_hdr(&hdr->header); | ||
179 | |||
180 | adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); | ||
181 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); | ||
182 | io_mc_regs = (const __le32 *) | ||
183 | (adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); | ||
184 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | ||
185 | fw_data = (const __le32 *) | ||
186 | (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | ||
187 | |||
188 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); | ||
189 | |||
190 | if (running == 0) { | ||
191 | if (running) { | ||
192 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); | ||
193 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); | ||
194 | } | ||
195 | |||
196 | /* reset the engine and set to writable */ | ||
197 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
198 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); | ||
199 | |||
200 | /* load mc io regs */ | ||
201 | for (i = 0; i < regs_size; i++) { | ||
202 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); | ||
203 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); | ||
204 | } | ||
205 | /* load the MC ucode */ | ||
206 | for (i = 0; i < ucode_size; i++) | ||
207 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); | ||
208 | |||
209 | /* put the engine back into the active state */ | ||
210 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); | ||
211 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); | ||
212 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); | ||
213 | |||
214 | /* wait for training to complete */ | ||
215 | for (i = 0; i < adev->usec_timeout; i++) { | ||
216 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
217 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) | ||
218 | break; | ||
219 | udelay(1); | ||
220 | } | ||
221 | for (i = 0; i < adev->usec_timeout; i++) { | ||
222 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), | ||
223 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) | ||
224 | break; | ||
225 | udelay(1); | ||
226 | } | ||
227 | |||
228 | if (running) | ||
229 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout); | ||
230 | } | ||
231 | |||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, | ||
236 | struct amdgpu_mc *mc) | ||
237 | { | ||
238 | if (mc->mc_vram_size > 0xFFC0000000ULL) { | ||
239 | /* leave room for at least 1024M GTT */ | ||
240 | dev_warn(adev->dev, "limiting VRAM\n"); | ||
241 | mc->real_vram_size = 0xFFC0000000ULL; | ||
242 | mc->mc_vram_size = 0xFFC0000000ULL; | ||
243 | } | ||
244 | amdgpu_vram_location(adev, &adev->mc, 0); | ||
245 | adev->mc.gtt_base_align = 0; | ||
246 | amdgpu_gtt_location(adev, mc); | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * gmc_v7_0_mc_program - program the GPU memory controller | ||
251 | * | ||
252 | * @adev: amdgpu_device pointer | ||
253 | * | ||
254 | * Set the location of vram, gart, and AGP in the GPU's | ||
255 | * physical address space (CIK). | ||
256 | */ | ||
257 | static void gmc_v7_0_mc_program(struct amdgpu_device *adev) | ||
258 | { | ||
259 | struct amdgpu_mode_mc_save save; | ||
260 | u32 tmp; | ||
261 | int i, j; | ||
262 | |||
263 | /* Initialize HDP */ | ||
264 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
265 | WREG32((0xb05 + j), 0x00000000); | ||
266 | WREG32((0xb06 + j), 0x00000000); | ||
267 | WREG32((0xb07 + j), 0x00000000); | ||
268 | WREG32((0xb08 + j), 0x00000000); | ||
269 | WREG32((0xb09 + j), 0x00000000); | ||
270 | } | ||
271 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); | ||
272 | |||
273 | if (adev->mode_info.num_crtc) | ||
274 | amdgpu_display_set_vga_render_state(adev, false); | ||
275 | |||
276 | gmc_v7_0_mc_stop(adev, &save); | ||
277 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
278 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
279 | } | ||
280 | /* Update configuration */ | ||
281 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, | ||
282 | adev->mc.vram_start >> 12); | ||
283 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, | ||
284 | adev->mc.vram_end >> 12); | ||
285 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, | ||
286 | adev->vram_scratch.gpu_addr >> 12); | ||
287 | tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; | ||
288 | tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); | ||
289 | WREG32(mmMC_VM_FB_LOCATION, tmp); | ||
290 | /* XXX double check these! */ | ||
291 | WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); | ||
292 | WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); | ||
293 | WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); | ||
294 | WREG32(mmMC_VM_AGP_BASE, 0); | ||
295 | WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); | ||
296 | WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); | ||
297 | if (amdgpu_asic_wait_for_mc_idle(adev)) { | ||
298 | dev_warn(adev->dev, "Wait for MC idle timedout !\n"); | ||
299 | } | ||
300 | gmc_v7_0_mc_resume(adev, &save); | ||
301 | |||
302 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); | ||
303 | |||
304 | tmp = RREG32(mmHDP_MISC_CNTL); | ||
305 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); | ||
306 | WREG32(mmHDP_MISC_CNTL, tmp); | ||
307 | |||
308 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); | ||
309 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * gmc_v7_0_mc_init - initialize the memory controller driver params | ||
314 | * | ||
315 | * @adev: amdgpu_device pointer | ||
316 | * | ||
317 | * Look up the amount of vram, vram width, and decide how to place | ||
318 | * vram and gart within the GPU's physical address space (CIK). | ||
319 | * Returns 0 for success. | ||
320 | */ | ||
321 | static int gmc_v7_0_mc_init(struct amdgpu_device *adev) | ||
322 | { | ||
323 | u32 tmp; | ||
324 | int chansize, numchan; | ||
325 | |||
326 | /* Get VRAM informations */ | ||
327 | tmp = RREG32(mmMC_ARB_RAMCFG); | ||
328 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { | ||
329 | chansize = 64; | ||
330 | } else { | ||
331 | chansize = 32; | ||
332 | } | ||
333 | tmp = RREG32(mmMC_SHARED_CHMAP); | ||
334 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { | ||
335 | case 0: | ||
336 | default: | ||
337 | numchan = 1; | ||
338 | break; | ||
339 | case 1: | ||
340 | numchan = 2; | ||
341 | break; | ||
342 | case 2: | ||
343 | numchan = 4; | ||
344 | break; | ||
345 | case 3: | ||
346 | numchan = 8; | ||
347 | break; | ||
348 | case 4: | ||
349 | numchan = 3; | ||
350 | break; | ||
351 | case 5: | ||
352 | numchan = 6; | ||
353 | break; | ||
354 | case 6: | ||
355 | numchan = 10; | ||
356 | break; | ||
357 | case 7: | ||
358 | numchan = 12; | ||
359 | break; | ||
360 | case 8: | ||
361 | numchan = 16; | ||
362 | break; | ||
363 | } | ||
364 | adev->mc.vram_width = numchan * chansize; | ||
365 | /* Could aper size report 0 ? */ | ||
366 | adev->mc.aper_base = pci_resource_start(adev->pdev, 0); | ||
367 | adev->mc.aper_size = pci_resource_len(adev->pdev, 0); | ||
368 | /* size in MB on si */ | ||
369 | adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
370 | adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; | ||
371 | adev->mc.visible_vram_size = adev->mc.aper_size; | ||
372 | |||
373 | /* unless the user had overridden it, set the gart | ||
374 | * size equal to the 1024 or vram, whichever is larger. | ||
375 | */ | ||
376 | if (amdgpu_gart_size == -1) | ||
377 | adev->mc.gtt_size = max((1024ULL << 20), adev->mc.mc_vram_size); | ||
378 | else | ||
379 | adev->mc.gtt_size = (uint64_t)amdgpu_gart_size << 20; | ||
380 | |||
381 | gmc_v7_0_vram_gtt_location(adev, &adev->mc); | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * GART | ||
388 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
389 | * VMIDs 1-15 are used for userspace clients and are handled | ||
390 | * by the amdgpu vm/hsa code. | ||
391 | */ | ||
392 | |||
393 | /** | ||
394 | * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback | ||
395 | * | ||
396 | * @adev: amdgpu_device pointer | ||
397 | * @vmid: vm instance to flush | ||
398 | * | ||
399 | * Flush the TLB for the requested page table (CIK). | ||
400 | */ | ||
401 | static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, | ||
402 | uint32_t vmid) | ||
403 | { | ||
404 | /* flush hdp cache */ | ||
405 | WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); | ||
406 | |||
407 | /* bits 0-15 are the VM contexts0-15 */ | ||
408 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); | ||
409 | } | ||
410 | |||
411 | /** | ||
412 | * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO | ||
413 | * | ||
414 | * @adev: amdgpu_device pointer | ||
415 | * @cpu_pt_addr: cpu address of the page table | ||
416 | * @gpu_page_idx: entry in the page table to update | ||
417 | * @addr: dst addr to write into pte/pde | ||
418 | * @flags: access flags | ||
419 | * | ||
420 | * Update the page tables using the CPU. | ||
421 | */ | ||
422 | static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, | ||
423 | void *cpu_pt_addr, | ||
424 | uint32_t gpu_page_idx, | ||
425 | uint64_t addr, | ||
426 | uint32_t flags) | ||
427 | { | ||
428 | void __iomem *ptr = (void *)cpu_pt_addr; | ||
429 | uint64_t value; | ||
430 | |||
431 | value = addr & 0xFFFFFFFFFFFFF000ULL; | ||
432 | value |= flags; | ||
433 | writeq(value, ptr + (gpu_page_idx * 8)); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * gmc_v7_0_gart_enable - gart enable | ||
440 | * | ||
441 | * @adev: amdgpu_device pointer | ||
442 | * | ||
443 | * This sets up the TLBs, programs the page tables for VMID0, | ||
444 | * sets up the hw for VMIDs 1-15 which are allocated on | ||
445 | * demand, and sets up the global locations for the LDS, GDS, | ||
446 | * and GPUVM for FSA64 clients (CIK). | ||
447 | * Returns 0 for success, errors for failure. | ||
448 | */ | ||
449 | static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | ||
450 | { | ||
451 | int r, i; | ||
452 | u32 tmp; | ||
453 | |||
454 | if (adev->gart.robj == NULL) { | ||
455 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | r = amdgpu_gart_table_vram_pin(adev); | ||
459 | if (r) | ||
460 | return r; | ||
461 | /* Setup TLB control */ | ||
462 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
463 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); | ||
464 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); | ||
465 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); | ||
466 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); | ||
467 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); | ||
468 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
469 | /* Setup L2 cache */ | ||
470 | tmp = RREG32(mmVM_L2_CNTL); | ||
471 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); | ||
472 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); | ||
473 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
474 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); | ||
475 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); | ||
476 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); | ||
477 | WREG32(mmVM_L2_CNTL, tmp); | ||
478 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); | ||
479 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); | ||
480 | WREG32(mmVM_L2_CNTL2, tmp); | ||
481 | tmp = RREG32(mmVM_L2_CNTL3); | ||
482 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); | ||
483 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 4); | ||
484 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, 4); | ||
485 | WREG32(mmVM_L2_CNTL3, tmp); | ||
486 | /* setup context0 */ | ||
487 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | ||
488 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); | ||
489 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | ||
490 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | ||
491 | (u32)(adev->dummy_page.addr >> 12)); | ||
492 | WREG32(mmVM_CONTEXT0_CNTL2, 0); | ||
493 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
494 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); | ||
495 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); | ||
496 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
497 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
498 | |||
499 | WREG32(0x575, 0); | ||
500 | WREG32(0x576, 0); | ||
501 | WREG32(0x577, 0); | ||
502 | |||
503 | /* empty context1-15 */ | ||
504 | /* FIXME start with 4G, once using 2 level pt switch to full | ||
505 | * vm size space | ||
506 | */ | ||
507 | /* set vm size, must be a multiple of 4 */ | ||
508 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | ||
509 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); | ||
510 | for (i = 1; i < 16; i++) { | ||
511 | if (i < 8) | ||
512 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, | ||
513 | adev->gart.table_addr >> 12); | ||
514 | else | ||
515 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, | ||
516 | adev->gart.table_addr >> 12); | ||
517 | } | ||
518 | |||
519 | /* enable context1-15 */ | ||
520 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | ||
521 | (u32)(adev->dummy_page.addr >> 12)); | ||
522 | WREG32(mmVM_CONTEXT1_CNTL2, 4); | ||
523 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
524 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); | ||
525 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); | ||
526 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
527 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
528 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
529 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
530 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
531 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
532 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
533 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
534 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
535 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
536 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT, 1); | ||
537 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); | ||
538 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, | ||
539 | amdgpu_vm_block_size - 9); | ||
540 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
541 | |||
542 | if (adev->asic_type == CHIP_KAVERI) { | ||
543 | tmp = RREG32(mmCHUB_CONTROL); | ||
544 | tmp &= ~BYPASS_VM; | ||
545 | WREG32(mmCHUB_CONTROL, tmp); | ||
546 | } | ||
547 | |||
548 | gmc_v7_0_gart_flush_gpu_tlb(adev, 0); | ||
549 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | ||
550 | (unsigned)(adev->mc.gtt_size >> 20), | ||
551 | (unsigned long long)adev->gart.table_addr); | ||
552 | adev->gart.ready = true; | ||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static int gmc_v7_0_gart_init(struct amdgpu_device *adev) | ||
557 | { | ||
558 | int r; | ||
559 | |||
560 | if (adev->gart.robj) { | ||
561 | WARN(1, "R600 PCIE GART already initialized\n"); | ||
562 | return 0; | ||
563 | } | ||
564 | /* Initialize common gart structure */ | ||
565 | r = amdgpu_gart_init(adev); | ||
566 | if (r) | ||
567 | return r; | ||
568 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; | ||
569 | return amdgpu_gart_table_vram_alloc(adev); | ||
570 | } | ||
571 | |||
572 | /** | ||
573 | * gmc_v7_0_gart_disable - gart disable | ||
574 | * | ||
575 | * @adev: amdgpu_device pointer | ||
576 | * | ||
577 | * This disables all VM page table (CIK). | ||
578 | */ | ||
579 | static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) | ||
580 | { | ||
581 | u32 tmp; | ||
582 | |||
583 | /* Disable all tables */ | ||
584 | WREG32(mmVM_CONTEXT0_CNTL, 0); | ||
585 | WREG32(mmVM_CONTEXT1_CNTL, 0); | ||
586 | /* Setup TLB control */ | ||
587 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); | ||
588 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); | ||
589 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); | ||
590 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); | ||
591 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); | ||
592 | /* Setup L2 cache */ | ||
593 | tmp = RREG32(mmVM_L2_CNTL); | ||
594 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); | ||
595 | WREG32(mmVM_L2_CNTL, tmp); | ||
596 | WREG32(mmVM_L2_CNTL2, 0); | ||
597 | amdgpu_gart_table_vram_unpin(adev); | ||
598 | } | ||
599 | |||
600 | /** | ||
601 | * gmc_v7_0_gart_fini - vm fini callback | ||
602 | * | ||
603 | * @adev: amdgpu_device pointer | ||
604 | * | ||
605 | * Tears down the driver GART/VM setup (CIK). | ||
606 | */ | ||
607 | static void gmc_v7_0_gart_fini(struct amdgpu_device *adev) | ||
608 | { | ||
609 | amdgpu_gart_table_vram_free(adev); | ||
610 | amdgpu_gart_fini(adev); | ||
611 | } | ||
612 | |||
613 | /* | ||
614 | * vm | ||
615 | * VMID 0 is the physical GPU addresses as used by the kernel. | ||
616 | * VMIDs 1-15 are used for userspace clients and are handled | ||
617 | * by the amdgpu vm/hsa code. | ||
618 | */ | ||
619 | /** | ||
620 | * gmc_v7_0_vm_init - cik vm init callback | ||
621 | * | ||
622 | * @adev: amdgpu_device pointer | ||
623 | * | ||
624 | * Inits cik specific vm parameters (number of VMs, base of vram for | ||
625 | * VMIDs 1-15) (CIK). | ||
626 | * Returns 0 for success. | ||
627 | */ | ||
628 | static int gmc_v7_0_vm_init(struct amdgpu_device *adev) | ||
629 | { | ||
630 | /* | ||
631 | * number of VMs | ||
632 | * VMID 0 is reserved for System | ||
633 | * amdgpu graphics/compute will use VMIDs 1-7 | ||
634 | * amdkfd will use VMIDs 8-15 | ||
635 | */ | ||
636 | adev->vm_manager.nvm = AMDGPU_NUM_OF_VMIDS; | ||
637 | |||
638 | /* base offset of vram pages */ | ||
639 | if (adev->flags & AMDGPU_IS_APU) { | ||
640 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); | ||
641 | tmp <<= 22; | ||
642 | adev->vm_manager.vram_base_offset = tmp; | ||
643 | } else | ||
644 | adev->vm_manager.vram_base_offset = 0; | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /** | ||
650 | * gmc_v7_0_vm_fini - cik vm fini callback | ||
651 | * | ||
652 | * @adev: amdgpu_device pointer | ||
653 | * | ||
654 | * Tear down any asic specific VM setup (CIK). | ||
655 | */ | ||
656 | static void gmc_v7_0_vm_fini(struct amdgpu_device *adev) | ||
657 | { | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * gmc_v7_0_vm_decode_fault - print human readable fault info | ||
662 | * | ||
663 | * @adev: amdgpu_device pointer | ||
664 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value | ||
665 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value | ||
666 | * | ||
667 | * Print human readable fault information (CIK). | ||
668 | */ | ||
669 | static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, | ||
670 | u32 status, u32 addr, u32 mc_client) | ||
671 | { | ||
672 | u32 mc_id; | ||
673 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); | ||
674 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
675 | PROTECTIONS); | ||
676 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, | ||
677 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; | ||
678 | |||
679 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
680 | MEMORY_CLIENT_ID); | ||
681 | |||
682 | printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", | ||
683 | protections, vmid, addr, | ||
684 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, | ||
685 | MEMORY_CLIENT_RW) ? | ||
686 | "write" : "read", block, mc_client, mc_id); | ||
687 | } | ||
688 | |||
689 | |||
690 | static const u32 mc_cg_registers[] = { | ||
691 | mmMC_HUB_MISC_HUB_CG, | ||
692 | mmMC_HUB_MISC_SIP_CG, | ||
693 | mmMC_HUB_MISC_VM_CG, | ||
694 | mmMC_XPB_CLK_GAT, | ||
695 | mmATC_MISC_CG, | ||
696 | mmMC_CITF_MISC_WR_CG, | ||
697 | mmMC_CITF_MISC_RD_CG, | ||
698 | mmMC_CITF_MISC_VM_CG, | ||
699 | mmVM_L2_CG, | ||
700 | }; | ||
701 | |||
702 | static const u32 mc_cg_ls_en[] = { | ||
703 | MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, | ||
704 | MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, | ||
705 | MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, | ||
706 | MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, | ||
707 | ATC_MISC_CG__MEM_LS_ENABLE_MASK, | ||
708 | MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, | ||
709 | MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, | ||
710 | MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, | ||
711 | VM_L2_CG__MEM_LS_ENABLE_MASK, | ||
712 | }; | ||
713 | |||
714 | static const u32 mc_cg_en[] = { | ||
715 | MC_HUB_MISC_HUB_CG__ENABLE_MASK, | ||
716 | MC_HUB_MISC_SIP_CG__ENABLE_MASK, | ||
717 | MC_HUB_MISC_VM_CG__ENABLE_MASK, | ||
718 | MC_XPB_CLK_GAT__ENABLE_MASK, | ||
719 | ATC_MISC_CG__ENABLE_MASK, | ||
720 | MC_CITF_MISC_WR_CG__ENABLE_MASK, | ||
721 | MC_CITF_MISC_RD_CG__ENABLE_MASK, | ||
722 | MC_CITF_MISC_VM_CG__ENABLE_MASK, | ||
723 | VM_L2_CG__ENABLE_MASK, | ||
724 | }; | ||
725 | |||
726 | static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, | ||
727 | bool enable) | ||
728 | { | ||
729 | int i; | ||
730 | u32 orig, data; | ||
731 | |||
732 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | ||
733 | orig = data = RREG32(mc_cg_registers[i]); | ||
734 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) | ||
735 | data |= mc_cg_ls_en[i]; | ||
736 | else | ||
737 | data &= ~mc_cg_ls_en[i]; | ||
738 | if (data != orig) | ||
739 | WREG32(mc_cg_registers[i], data); | ||
740 | } | ||
741 | } | ||
742 | |||
743 | static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, | ||
744 | bool enable) | ||
745 | { | ||
746 | int i; | ||
747 | u32 orig, data; | ||
748 | |||
749 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { | ||
750 | orig = data = RREG32(mc_cg_registers[i]); | ||
751 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) | ||
752 | data |= mc_cg_en[i]; | ||
753 | else | ||
754 | data &= ~mc_cg_en[i]; | ||
755 | if (data != orig) | ||
756 | WREG32(mc_cg_registers[i], data); | ||
757 | } | ||
758 | } | ||
759 | |||
760 | static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, | ||
761 | bool enable) | ||
762 | { | ||
763 | u32 orig, data; | ||
764 | |||
765 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); | ||
766 | |||
767 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { | ||
768 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); | ||
769 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); | ||
770 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); | ||
771 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); | ||
772 | } else { | ||
773 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); | ||
774 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); | ||
775 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); | ||
776 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); | ||
777 | } | ||
778 | |||
779 | if (orig != data) | ||
780 | WREG32_PCIE(ixPCIE_CNTL2, data); | ||
781 | } | ||
782 | |||
783 | static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, | ||
784 | bool enable) | ||
785 | { | ||
786 | u32 orig, data; | ||
787 | |||
788 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); | ||
789 | |||
790 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) | ||
791 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); | ||
792 | else | ||
793 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); | ||
794 | |||
795 | if (orig != data) | ||
796 | WREG32(mmHDP_HOST_PATH_CNTL, data); | ||
797 | } | ||
798 | |||
799 | static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, | ||
800 | bool enable) | ||
801 | { | ||
802 | u32 orig, data; | ||
803 | |||
804 | orig = data = RREG32(mmHDP_MEM_POWER_LS); | ||
805 | |||
806 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) | ||
807 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); | ||
808 | else | ||
809 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); | ||
810 | |||
811 | if (orig != data) | ||
812 | WREG32(mmHDP_MEM_POWER_LS, data); | ||
813 | } | ||
814 | |||
815 | static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) | ||
816 | { | ||
817 | switch (mc_seq_vram_type) { | ||
818 | case MC_SEQ_MISC0__MT__GDDR1: | ||
819 | return AMDGPU_VRAM_TYPE_GDDR1; | ||
820 | case MC_SEQ_MISC0__MT__DDR2: | ||
821 | return AMDGPU_VRAM_TYPE_DDR2; | ||
822 | case MC_SEQ_MISC0__MT__GDDR3: | ||
823 | return AMDGPU_VRAM_TYPE_GDDR3; | ||
824 | case MC_SEQ_MISC0__MT__GDDR4: | ||
825 | return AMDGPU_VRAM_TYPE_GDDR4; | ||
826 | case MC_SEQ_MISC0__MT__GDDR5: | ||
827 | return AMDGPU_VRAM_TYPE_GDDR5; | ||
828 | case MC_SEQ_MISC0__MT__HBM: | ||
829 | return AMDGPU_VRAM_TYPE_HBM; | ||
830 | case MC_SEQ_MISC0__MT__DDR3: | ||
831 | return AMDGPU_VRAM_TYPE_DDR3; | ||
832 | default: | ||
833 | return AMDGPU_VRAM_TYPE_UNKNOWN; | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static int gmc_v7_0_early_init(void *handle) | ||
838 | { | ||
839 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
840 | |||
841 | gmc_v7_0_set_gart_funcs(adev); | ||
842 | gmc_v7_0_set_irq_funcs(adev); | ||
843 | |||
844 | if (adev->flags & AMDGPU_IS_APU) { | ||
845 | adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; | ||
846 | } else { | ||
847 | u32 tmp = RREG32(mmMC_SEQ_MISC0); | ||
848 | tmp &= MC_SEQ_MISC0__MT__MASK; | ||
849 | adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); | ||
850 | } | ||
851 | |||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static int gmc_v7_0_sw_init(void *handle) | ||
856 | { | ||
857 | int r; | ||
858 | int dma_bits; | ||
859 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
860 | |||
861 | r = amdgpu_gem_init(adev); | ||
862 | if (r) | ||
863 | return r; | ||
864 | |||
865 | r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); | ||
866 | if (r) | ||
867 | return r; | ||
868 | |||
869 | r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault); | ||
870 | if (r) | ||
871 | return r; | ||
872 | |||
873 | /* Adjust VM size here. | ||
874 | * Currently set to 4GB ((1 << 20) 4k pages). | ||
875 | * Max GPUVM size for cayman and SI is 40 bits. | ||
876 | */ | ||
877 | adev->vm_manager.max_pfn = amdgpu_vm_size << 18; | ||
878 | |||
879 | /* Set the internal MC address mask | ||
880 | * This is the max address of the GPU's | ||
881 | * internal address space. | ||
882 | */ | ||
883 | adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ | ||
884 | |||
885 | /* set DMA mask + need_dma32 flags. | ||
886 | * PCIE - can handle 40-bits. | ||
887 | * IGP - can handle 40-bits | ||
888 | * PCI - dma32 for legacy pci gart, 40 bits on newer asics | ||
889 | */ | ||
890 | adev->need_dma32 = false; | ||
891 | dma_bits = adev->need_dma32 ? 32 : 40; | ||
892 | r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
893 | if (r) { | ||
894 | adev->need_dma32 = true; | ||
895 | dma_bits = 32; | ||
896 | printk(KERN_WARNING "amdgpu: No suitable DMA available.\n"); | ||
897 | } | ||
898 | r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); | ||
899 | if (r) { | ||
900 | pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32)); | ||
901 | printk(KERN_WARNING "amdgpu: No coherent DMA available.\n"); | ||
902 | } | ||
903 | |||
904 | r = gmc_v7_0_init_microcode(adev); | ||
905 | if (r) { | ||
906 | DRM_ERROR("Failed to load mc firmware!\n"); | ||
907 | return r; | ||
908 | } | ||
909 | |||
910 | r = gmc_v7_0_mc_init(adev); | ||
911 | if (r) | ||
912 | return r; | ||
913 | |||
914 | /* Memory manager */ | ||
915 | r = amdgpu_bo_init(adev); | ||
916 | if (r) | ||
917 | return r; | ||
918 | |||
919 | r = gmc_v7_0_gart_init(adev); | ||
920 | if (r) | ||
921 | return r; | ||
922 | |||
923 | if (!adev->vm_manager.enabled) { | ||
924 | r = gmc_v7_0_vm_init(adev); | ||
925 | if (r) { | ||
926 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
927 | return r; | ||
928 | } | ||
929 | adev->vm_manager.enabled = true; | ||
930 | } | ||
931 | |||
932 | return r; | ||
933 | } | ||
934 | |||
935 | static int gmc_v7_0_sw_fini(void *handle) | ||
936 | { | ||
937 | int i; | ||
938 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
939 | |||
940 | if (adev->vm_manager.enabled) { | ||
941 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
942 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
943 | gmc_v7_0_vm_fini(adev); | ||
944 | adev->vm_manager.enabled = false; | ||
945 | } | ||
946 | gmc_v7_0_gart_fini(adev); | ||
947 | amdgpu_gem_fini(adev); | ||
948 | amdgpu_bo_fini(adev); | ||
949 | |||
950 | return 0; | ||
951 | } | ||
952 | |||
953 | static int gmc_v7_0_hw_init(void *handle) | ||
954 | { | ||
955 | int r; | ||
956 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
957 | |||
958 | gmc_v7_0_mc_program(adev); | ||
959 | |||
960 | if (!(adev->flags & AMDGPU_IS_APU)) { | ||
961 | r = gmc_v7_0_mc_load_microcode(adev); | ||
962 | if (r) { | ||
963 | DRM_ERROR("Failed to load MC firmware!\n"); | ||
964 | return r; | ||
965 | } | ||
966 | } | ||
967 | |||
968 | r = gmc_v7_0_gart_enable(adev); | ||
969 | if (r) | ||
970 | return r; | ||
971 | |||
972 | return r; | ||
973 | } | ||
974 | |||
975 | static int gmc_v7_0_hw_fini(void *handle) | ||
976 | { | ||
977 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
978 | |||
979 | gmc_v7_0_gart_disable(adev); | ||
980 | |||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | static int gmc_v7_0_suspend(void *handle) | ||
985 | { | ||
986 | int i; | ||
987 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
988 | |||
989 | if (adev->vm_manager.enabled) { | ||
990 | for (i = 0; i < AMDGPU_NUM_VM; ++i) | ||
991 | amdgpu_fence_unref(&adev->vm_manager.active[i]); | ||
992 | gmc_v7_0_vm_fini(adev); | ||
993 | adev->vm_manager.enabled = false; | ||
994 | } | ||
995 | gmc_v7_0_hw_fini(adev); | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static int gmc_v7_0_resume(void *handle) | ||
1001 | { | ||
1002 | int r; | ||
1003 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1004 | |||
1005 | r = gmc_v7_0_hw_init(adev); | ||
1006 | if (r) | ||
1007 | return r; | ||
1008 | |||
1009 | if (!adev->vm_manager.enabled) { | ||
1010 | r = gmc_v7_0_vm_init(adev); | ||
1011 | if (r) { | ||
1012 | dev_err(adev->dev, "vm manager initialization failed (%d).\n", r); | ||
1013 | return r; | ||
1014 | } | ||
1015 | adev->vm_manager.enabled = true; | ||
1016 | } | ||
1017 | |||
1018 | return r; | ||
1019 | } | ||
1020 | |||
1021 | static bool gmc_v7_0_is_idle(void *handle) | ||
1022 | { | ||
1023 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1024 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
1025 | |||
1026 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
1027 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) | ||
1028 | return false; | ||
1029 | |||
1030 | return true; | ||
1031 | } | ||
1032 | |||
1033 | static int gmc_v7_0_wait_for_idle(void *handle) | ||
1034 | { | ||
1035 | unsigned i; | ||
1036 | u32 tmp; | ||
1037 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1038 | |||
1039 | for (i = 0; i < adev->usec_timeout; i++) { | ||
1040 | /* read MC_STATUS */ | ||
1041 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | | ||
1042 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
1043 | SRBM_STATUS__MCC_BUSY_MASK | | ||
1044 | SRBM_STATUS__MCD_BUSY_MASK | | ||
1045 | SRBM_STATUS__VMC_BUSY_MASK); | ||
1046 | if (!tmp) | ||
1047 | return 0; | ||
1048 | udelay(1); | ||
1049 | } | ||
1050 | return -ETIMEDOUT; | ||
1051 | |||
1052 | } | ||
1053 | |||
1054 | static void gmc_v7_0_print_status(void *handle) | ||
1055 | { | ||
1056 | int i, j; | ||
1057 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1058 | |||
1059 | dev_info(adev->dev, "GMC 8.x registers\n"); | ||
1060 | dev_info(adev->dev, " SRBM_STATUS=0x%08X\n", | ||
1061 | RREG32(mmSRBM_STATUS)); | ||
1062 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | ||
1063 | RREG32(mmSRBM_STATUS2)); | ||
1064 | |||
1065 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1066 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR)); | ||
1067 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1068 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS)); | ||
1069 | dev_info(adev->dev, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n", | ||
1070 | RREG32(mmMC_VM_MX_L1_TLB_CNTL)); | ||
1071 | dev_info(adev->dev, " VM_L2_CNTL=0x%08X\n", | ||
1072 | RREG32(mmVM_L2_CNTL)); | ||
1073 | dev_info(adev->dev, " VM_L2_CNTL2=0x%08X\n", | ||
1074 | RREG32(mmVM_L2_CNTL2)); | ||
1075 | dev_info(adev->dev, " VM_L2_CNTL3=0x%08X\n", | ||
1076 | RREG32(mmVM_L2_CNTL3)); | ||
1077 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1078 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR)); | ||
1079 | dev_info(adev->dev, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1080 | RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR)); | ||
1081 | dev_info(adev->dev, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1082 | RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1083 | dev_info(adev->dev, " VM_CONTEXT0_CNTL2=0x%08X\n", | ||
1084 | RREG32(mmVM_CONTEXT0_CNTL2)); | ||
1085 | dev_info(adev->dev, " VM_CONTEXT0_CNTL=0x%08X\n", | ||
1086 | RREG32(mmVM_CONTEXT0_CNTL)); | ||
1087 | dev_info(adev->dev, " 0x15D4=0x%08X\n", | ||
1088 | RREG32(0x575)); | ||
1089 | dev_info(adev->dev, " 0x15D8=0x%08X\n", | ||
1090 | RREG32(0x576)); | ||
1091 | dev_info(adev->dev, " 0x15DC=0x%08X\n", | ||
1092 | RREG32(0x577)); | ||
1093 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n", | ||
1094 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR)); | ||
1095 | dev_info(adev->dev, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n", | ||
1096 | RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR)); | ||
1097 | dev_info(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n", | ||
1098 | RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR)); | ||
1099 | dev_info(adev->dev, " VM_CONTEXT1_CNTL2=0x%08X\n", | ||
1100 | RREG32(mmVM_CONTEXT1_CNTL2)); | ||
1101 | dev_info(adev->dev, " VM_CONTEXT1_CNTL=0x%08X\n", | ||
1102 | RREG32(mmVM_CONTEXT1_CNTL)); | ||
1103 | for (i = 0; i < 16; i++) { | ||
1104 | if (i < 8) | ||
1105 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1106 | i, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i)); | ||
1107 | else | ||
1108 | dev_info(adev->dev, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n", | ||
1109 | i, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8)); | ||
1110 | } | ||
1111 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n", | ||
1112 | RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR)); | ||
1113 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n", | ||
1114 | RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR)); | ||
1115 | dev_info(adev->dev, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n", | ||
1116 | RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR)); | ||
1117 | dev_info(adev->dev, " MC_VM_FB_LOCATION=0x%08X\n", | ||
1118 | RREG32(mmMC_VM_FB_LOCATION)); | ||
1119 | dev_info(adev->dev, " MC_VM_AGP_BASE=0x%08X\n", | ||
1120 | RREG32(mmMC_VM_AGP_BASE)); | ||
1121 | dev_info(adev->dev, " MC_VM_AGP_TOP=0x%08X\n", | ||
1122 | RREG32(mmMC_VM_AGP_TOP)); | ||
1123 | dev_info(adev->dev, " MC_VM_AGP_BOT=0x%08X\n", | ||
1124 | RREG32(mmMC_VM_AGP_BOT)); | ||
1125 | |||
1126 | if (adev->asic_type == CHIP_KAVERI) { | ||
1127 | dev_info(adev->dev, " CHUB_CONTROL=0x%08X\n", | ||
1128 | RREG32(mmCHUB_CONTROL)); | ||
1129 | } | ||
1130 | |||
1131 | dev_info(adev->dev, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n", | ||
1132 | RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL)); | ||
1133 | dev_info(adev->dev, " HDP_NONSURFACE_BASE=0x%08X\n", | ||
1134 | RREG32(mmHDP_NONSURFACE_BASE)); | ||
1135 | dev_info(adev->dev, " HDP_NONSURFACE_INFO=0x%08X\n", | ||
1136 | RREG32(mmHDP_NONSURFACE_INFO)); | ||
1137 | dev_info(adev->dev, " HDP_NONSURFACE_SIZE=0x%08X\n", | ||
1138 | RREG32(mmHDP_NONSURFACE_SIZE)); | ||
1139 | dev_info(adev->dev, " HDP_MISC_CNTL=0x%08X\n", | ||
1140 | RREG32(mmHDP_MISC_CNTL)); | ||
1141 | dev_info(adev->dev, " HDP_HOST_PATH_CNTL=0x%08X\n", | ||
1142 | RREG32(mmHDP_HOST_PATH_CNTL)); | ||
1143 | |||
1144 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { | ||
1145 | dev_info(adev->dev, " %d:\n", i); | ||
1146 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1147 | 0xb05 + j, RREG32(0xb05 + j)); | ||
1148 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1149 | 0xb06 + j, RREG32(0xb06 + j)); | ||
1150 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1151 | 0xb07 + j, RREG32(0xb07 + j)); | ||
1152 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1153 | 0xb08 + j, RREG32(0xb08 + j)); | ||
1154 | dev_info(adev->dev, " 0x%04X=0x%08X\n", | ||
1155 | 0xb09 + j, RREG32(0xb09 + j)); | ||
1156 | } | ||
1157 | |||
1158 | dev_info(adev->dev, " BIF_FB_EN=0x%08X\n", | ||
1159 | RREG32(mmBIF_FB_EN)); | ||
1160 | } | ||
1161 | |||
1162 | static int gmc_v7_0_soft_reset(void *handle) | ||
1163 | { | ||
1164 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1165 | struct amdgpu_mode_mc_save save; | ||
1166 | u32 srbm_soft_reset = 0; | ||
1167 | u32 tmp = RREG32(mmSRBM_STATUS); | ||
1168 | |||
1169 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) | ||
1170 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1171 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); | ||
1172 | |||
1173 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | | ||
1174 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { | ||
1175 | if (!(adev->flags & AMDGPU_IS_APU)) | ||
1176 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, | ||
1177 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); | ||
1178 | } | ||
1179 | |||
1180 | if (srbm_soft_reset) { | ||
1181 | gmc_v7_0_print_status((void *)adev); | ||
1182 | |||
1183 | gmc_v7_0_mc_stop(adev, &save); | ||
1184 | if (gmc_v7_0_wait_for_idle(adev)) { | ||
1185 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); | ||
1186 | } | ||
1187 | |||
1188 | |||
1189 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1190 | tmp |= srbm_soft_reset; | ||
1191 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); | ||
1192 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1193 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1194 | |||
1195 | udelay(50); | ||
1196 | |||
1197 | tmp &= ~srbm_soft_reset; | ||
1198 | WREG32(mmSRBM_SOFT_RESET, tmp); | ||
1199 | tmp = RREG32(mmSRBM_SOFT_RESET); | ||
1200 | |||
1201 | /* Wait a little for things to settle down */ | ||
1202 | udelay(50); | ||
1203 | |||
1204 | gmc_v7_0_mc_resume(adev, &save); | ||
1205 | udelay(50); | ||
1206 | |||
1207 | gmc_v7_0_print_status((void *)adev); | ||
1208 | } | ||
1209 | |||
1210 | return 0; | ||
1211 | } | ||
1212 | |||
1213 | static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, | ||
1214 | struct amdgpu_irq_src *src, | ||
1215 | unsigned type, | ||
1216 | enum amdgpu_interrupt_state state) | ||
1217 | { | ||
1218 | u32 tmp; | ||
1219 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1220 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1221 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1222 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1223 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | | ||
1224 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); | ||
1225 | |||
1226 | switch (state) { | ||
1227 | case AMDGPU_IRQ_STATE_DISABLE: | ||
1228 | /* system context */ | ||
1229 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1230 | tmp &= ~bits; | ||
1231 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1232 | /* VMs */ | ||
1233 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1234 | tmp &= ~bits; | ||
1235 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1236 | break; | ||
1237 | case AMDGPU_IRQ_STATE_ENABLE: | ||
1238 | /* system context */ | ||
1239 | tmp = RREG32(mmVM_CONTEXT0_CNTL); | ||
1240 | tmp |= bits; | ||
1241 | WREG32(mmVM_CONTEXT0_CNTL, tmp); | ||
1242 | /* VMs */ | ||
1243 | tmp = RREG32(mmVM_CONTEXT1_CNTL); | ||
1244 | tmp |= bits; | ||
1245 | WREG32(mmVM_CONTEXT1_CNTL, tmp); | ||
1246 | break; | ||
1247 | default: | ||
1248 | break; | ||
1249 | } | ||
1250 | |||
1251 | return 0; | ||
1252 | } | ||
1253 | |||
1254 | static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, | ||
1255 | struct amdgpu_irq_src *source, | ||
1256 | struct amdgpu_iv_entry *entry) | ||
1257 | { | ||
1258 | u32 addr, status, mc_client; | ||
1259 | |||
1260 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); | ||
1261 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); | ||
1262 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); | ||
1263 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", | ||
1264 | entry->src_id, entry->src_data); | ||
1265 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1266 | addr); | ||
1267 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1268 | status); | ||
1269 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); | ||
1270 | /* reset addr and status */ | ||
1271 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); | ||
1272 | |||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | static int gmc_v7_0_set_clockgating_state(void *handle, | ||
1277 | enum amd_clockgating_state state) | ||
1278 | { | ||
1279 | bool gate = false; | ||
1280 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
1281 | |||
1282 | if (state == AMD_CG_STATE_GATE) | ||
1283 | gate = true; | ||
1284 | |||
1285 | if (!(adev->flags & AMDGPU_IS_APU)) { | ||
1286 | gmc_v7_0_enable_mc_mgcg(adev, gate); | ||
1287 | gmc_v7_0_enable_mc_ls(adev, gate); | ||
1288 | } | ||
1289 | gmc_v7_0_enable_bif_mgls(adev, gate); | ||
1290 | gmc_v7_0_enable_hdp_mgcg(adev, gate); | ||
1291 | gmc_v7_0_enable_hdp_ls(adev, gate); | ||
1292 | |||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | static int gmc_v7_0_set_powergating_state(void *handle, | ||
1297 | enum amd_powergating_state state) | ||
1298 | { | ||
1299 | return 0; | ||
1300 | } | ||
1301 | |||
1302 | const struct amd_ip_funcs gmc_v7_0_ip_funcs = { | ||
1303 | .early_init = gmc_v7_0_early_init, | ||
1304 | .late_init = NULL, | ||
1305 | .sw_init = gmc_v7_0_sw_init, | ||
1306 | .sw_fini = gmc_v7_0_sw_fini, | ||
1307 | .hw_init = gmc_v7_0_hw_init, | ||
1308 | .hw_fini = gmc_v7_0_hw_fini, | ||
1309 | .suspend = gmc_v7_0_suspend, | ||
1310 | .resume = gmc_v7_0_resume, | ||
1311 | .is_idle = gmc_v7_0_is_idle, | ||
1312 | .wait_for_idle = gmc_v7_0_wait_for_idle, | ||
1313 | .soft_reset = gmc_v7_0_soft_reset, | ||
1314 | .print_status = gmc_v7_0_print_status, | ||
1315 | .set_clockgating_state = gmc_v7_0_set_clockgating_state, | ||
1316 | .set_powergating_state = gmc_v7_0_set_powergating_state, | ||
1317 | }; | ||
1318 | |||
1319 | static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { | ||
1320 | .flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, | ||
1321 | .set_pte_pde = gmc_v7_0_gart_set_pte_pde, | ||
1322 | }; | ||
1323 | |||
1324 | static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { | ||
1325 | .set = gmc_v7_0_vm_fault_interrupt_state, | ||
1326 | .process = gmc_v7_0_process_interrupt, | ||
1327 | }; | ||
1328 | |||
1329 | static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) | ||
1330 | { | ||
1331 | if (adev->gart.gart_funcs == NULL) | ||
1332 | adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; | ||
1333 | } | ||
1334 | |||
1335 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) | ||
1336 | { | ||
1337 | adev->mc.vm_fault.num_types = 1; | ||
1338 | adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; | ||
1339 | } | ||