aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c339
1 files changed, 185 insertions, 154 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 1940d36bc304..64d3c1e6014c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -1,4 +1,3 @@
1
2/* 1/*
3 * Copyright 2014 Advanced Micro Devices, Inc. 2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * 3 *
@@ -26,7 +25,16 @@
26#include "amdgpu.h" 25#include "amdgpu.h"
27#include "gmc_v6_0.h" 26#include "gmc_v6_0.h"
28#include "amdgpu_ucode.h" 27#include "amdgpu_ucode.h"
29#include "si/sid.h" 28
29#include "bif/bif_3_0_d.h"
30#include "bif/bif_3_0_sh_mask.h"
31#include "oss/oss_1_0_d.h"
32#include "oss/oss_1_0_sh_mask.h"
33#include "gmc/gmc_6_0_d.h"
34#include "gmc/gmc_6_0_sh_mask.h"
35#include "dce/dce_6_0_d.h"
36#include "dce/dce_6_0_sh_mask.h"
37#include "si_enums.h"
30 38
31static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); 39static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev);
32static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); 40static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -37,6 +45,16 @@ MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
37MODULE_FIRMWARE("radeon/verde_mc.bin"); 45MODULE_FIRMWARE("radeon/verde_mc.bin");
38MODULE_FIRMWARE("radeon/oland_mc.bin"); 46MODULE_FIRMWARE("radeon/oland_mc.bin");
39 47
48#define MC_SEQ_MISC0__MT__MASK 0xf0000000
49#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
50#define MC_SEQ_MISC0__MT__DDR2 0x20000000
51#define MC_SEQ_MISC0__MT__GDDR3 0x30000000
52#define MC_SEQ_MISC0__MT__GDDR4 0x40000000
53#define MC_SEQ_MISC0__MT__GDDR5 0x50000000
54#define MC_SEQ_MISC0__MT__HBM 0x60000000
55#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
56
57
40static const u32 crtc_offsets[6] = 58static const u32 crtc_offsets[6] =
41{ 59{
42 SI_CRTC0_REGISTER_OFFSET, 60 SI_CRTC0_REGISTER_OFFSET,
@@ -57,14 +75,14 @@ static void gmc_v6_0_mc_stop(struct amdgpu_device *adev,
57 75
58 gmc_v6_0_wait_for_idle((void *)adev); 76 gmc_v6_0_wait_for_idle((void *)adev);
59 77
60 blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); 78 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
61 if (REG_GET_FIELD(blackout, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE) != 1) { 79 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
62 /* Block CPU access */ 80 /* Block CPU access */
63 WREG32(BIF_FB_EN, 0); 81 WREG32(mmBIF_FB_EN, 0);
64 /* blackout the MC */ 82 /* blackout the MC */
65 blackout = REG_SET_FIELD(blackout, 83 blackout = REG_SET_FIELD(blackout,
66 mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0); 84 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
67 WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); 85 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1);
68 } 86 }
69 /* wait for the MC to settle */ 87 /* wait for the MC to settle */
70 udelay(100); 88 udelay(100);
@@ -77,13 +95,13 @@ static void gmc_v6_0_mc_resume(struct amdgpu_device *adev,
77 u32 tmp; 95 u32 tmp;
78 96
79 /* unblackout the MC */ 97 /* unblackout the MC */
80 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); 98 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
81 tmp = REG_SET_FIELD(tmp, mmMC_SHARED_BLACKOUT_CNTL, xxBLACKOUT_MODE, 0); 99 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0);
82 WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); 100 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp);
83 /* allow CPU access */ 101 /* allow CPU access */
84 tmp = REG_SET_FIELD(0, mmBIF_FB_EN, xxFB_READ_EN, 1); 102 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1);
85 tmp = REG_SET_FIELD(tmp, mmBIF_FB_EN, xxFB_WRITE_EN, 1); 103 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1);
86 WREG32(BIF_FB_EN, tmp); 104 WREG32(mmBIF_FB_EN, tmp);
87 105
88 if (adev->mode_info.num_crtc) 106 if (adev->mode_info.num_crtc)
89 amdgpu_display_resume_mc_access(adev, save); 107 amdgpu_display_resume_mc_access(adev, save);
@@ -158,37 +176,37 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
158 new_fw_data = (const __le32 *) 176 new_fw_data = (const __le32 *)
159 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 177 (adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
160 178
161 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 179 running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
162 180
163 if (running == 0) { 181 if (running == 0) {
164 182
165 /* reset the engine and set to writable */ 183 /* reset the engine and set to writable */
166 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 184 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
167 WREG32(MC_SEQ_SUP_CNTL, 0x00000010); 185 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010);
168 186
169 /* load mc io regs */ 187 /* load mc io regs */
170 for (i = 0; i < regs_size; i++) { 188 for (i = 0; i < regs_size; i++) {
171 WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); 189 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
172 WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); 190 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
173 } 191 }
174 /* load the MC ucode */ 192 /* load the MC ucode */
175 for (i = 0; i < ucode_size; i++) { 193 for (i = 0; i < ucode_size; i++) {
176 WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); 194 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
177 } 195 }
178 196
179 /* put the engine back into the active state */ 197 /* put the engine back into the active state */
180 WREG32(MC_SEQ_SUP_CNTL, 0x00000008); 198 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008);
181 WREG32(MC_SEQ_SUP_CNTL, 0x00000004); 199 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004);
182 WREG32(MC_SEQ_SUP_CNTL, 0x00000001); 200 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001);
183 201
184 /* wait for training to complete */ 202 /* wait for training to complete */
185 for (i = 0; i < adev->usec_timeout; i++) { 203 for (i = 0; i < adev->usec_timeout; i++) {
186 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0) 204 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK)
187 break; 205 break;
188 udelay(1); 206 udelay(1);
189 } 207 }
190 for (i = 0; i < adev->usec_timeout; i++) { 208 for (i = 0; i < adev->usec_timeout; i++) {
191 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1) 209 if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK)
192 break; 210 break;
193 udelay(1); 211 udelay(1);
194 } 212 }
@@ -225,7 +243,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
225 WREG32((0xb08 + j), 0x00000000); 243 WREG32((0xb08 + j), 0x00000000);
226 WREG32((0xb09 + j), 0x00000000); 244 WREG32((0xb09 + j), 0x00000000);
227 } 245 }
228 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); 246 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
229 247
230 gmc_v6_0_mc_stop(adev, &save); 248 gmc_v6_0_mc_stop(adev, &save);
231 249
@@ -233,24 +251,24 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
233 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 251 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
234 } 252 }
235 253
236 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 254 WREG32(mmVGA_HDP_CONTROL, VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK);
237 /* Update configuration */ 255 /* Update configuration */
238 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 256 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
239 adev->mc.vram_start >> 12); 257 adev->mc.vram_start >> 12);
240 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 258 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
241 adev->mc.vram_end >> 12); 259 adev->mc.vram_end >> 12);
242 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 260 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
243 adev->vram_scratch.gpu_addr >> 12); 261 adev->vram_scratch.gpu_addr >> 12);
244 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; 262 tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16;
245 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); 263 tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF);
246 WREG32(MC_VM_FB_LOCATION, tmp); 264 WREG32(mmMC_VM_FB_LOCATION, tmp);
247 /* XXX double check these! */ 265 /* XXX double check these! */
248 WREG32(HDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); 266 WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8));
249 WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); 267 WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
250 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF); 268 WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
251 WREG32(MC_VM_AGP_BASE, 0); 269 WREG32(mmMC_VM_AGP_BASE, 0);
252 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); 270 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
253 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); 271 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
254 272
255 if (gmc_v6_0_wait_for_idle((void *)adev)) { 273 if (gmc_v6_0_wait_for_idle((void *)adev)) {
256 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 274 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
@@ -265,16 +283,16 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
265 u32 tmp; 283 u32 tmp;
266 int chansize, numchan; 284 int chansize, numchan;
267 285
268 tmp = RREG32(MC_ARB_RAMCFG); 286 tmp = RREG32(mmMC_ARB_RAMCFG);
269 if (tmp & CHANSIZE_OVERRIDE) { 287 if (tmp & (1 << 11)) {
270 chansize = 16; 288 chansize = 16;
271 } else if (tmp & CHANSIZE_MASK) { 289 } else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) {
272 chansize = 64; 290 chansize = 64;
273 } else { 291 } else {
274 chansize = 32; 292 chansize = 32;
275 } 293 }
276 tmp = RREG32(MC_SHARED_CHMAP); 294 tmp = RREG32(mmMC_SHARED_CHMAP);
277 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { 295 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
278 case 0: 296 case 0:
279 default: 297 default:
280 numchan = 1; 298 numchan = 1;
@@ -309,8 +327,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
309 adev->mc.aper_base = pci_resource_start(adev->pdev, 0); 327 adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
310 adev->mc.aper_size = pci_resource_len(adev->pdev, 0); 328 adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
311 /* size in MB on si */ 329 /* size in MB on si */
312 adev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 330 adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
313 adev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; 331 adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
314 adev->mc.visible_vram_size = adev->mc.aper_size; 332 adev->mc.visible_vram_size = adev->mc.aper_size;
315 333
316 /* unless the user had overridden it, set the gart 334 /* unless the user had overridden it, set the gart
@@ -329,9 +347,9 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
329static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, 347static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
330 uint32_t vmid) 348 uint32_t vmid)
331{ 349{
332 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0); 350 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
333 351
334 WREG32(VM_INVALIDATE_REQUEST, 1 << vmid); 352 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
335} 353}
336 354
337static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, 355static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev,
@@ -355,20 +373,20 @@ static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev,
355{ 373{
356 u32 tmp; 374 u32 tmp;
357 375
358 tmp = RREG32(VM_CONTEXT1_CNTL); 376 tmp = RREG32(mmVM_CONTEXT1_CNTL);
359 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 377 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
360 xxRANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 378 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
361 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 379 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
362 xxDUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 380 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
363 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 381 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
364 xxPDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 382 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
365 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 383 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
366 xxVALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 384 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
367 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 385 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
368 xxREAD_PROTECTION_FAULT_ENABLE_DEFAULT, value); 386 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
369 tmp = REG_SET_FIELD(tmp, mmVM_CONTEXT1_CNTL, 387 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
370 xxWRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 388 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
371 WREG32(VM_CONTEXT1_CNTL, tmp); 389 WREG32(mmVM_CONTEXT1_CNTL, tmp);
372} 390}
373 391
374static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) 392static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
@@ -383,33 +401,39 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
383 if (r) 401 if (r)
384 return r; 402 return r;
385 /* Setup TLB control */ 403 /* Setup TLB control */
386 WREG32(MC_VM_MX_L1_TLB_CNTL, 404 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
387 (0xA << 7) | 405 (0xA << 7) |
388 ENABLE_L1_TLB | 406 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK |
389 ENABLE_L1_FRAGMENT_PROCESSING | 407 MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK |
390 SYSTEM_ACCESS_MODE_NOT_IN_SYS | 408 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
391 ENABLE_ADVANCED_DRIVER_MODEL | 409 MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK |
392 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 410 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
393 /* Setup L2 cache */ 411 /* Setup L2 cache */
394 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | 412 WREG32(mmVM_L2_CNTL,
395 ENABLE_L2_FRAGMENT_PROCESSING | 413 VM_L2_CNTL__ENABLE_L2_CACHE_MASK |
396 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 414 VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK |
397 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 415 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
398 EFFECTIVE_L2_QUEUE_SIZE(7) | 416 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
399 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 417 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
400 WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE); 418 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
401 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 419 WREG32(mmVM_L2_CNTL2,
402 BANK_SELECT(4) | 420 VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK |
403 L2_CACHE_BIGK_FRAGMENT_SIZE(4)); 421 VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK);
422 WREG32(mmVM_L2_CNTL3,
423 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
424 (4UL << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
425 (4UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
404 /* setup context0 */ 426 /* setup context0 */
405 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); 427 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12);
406 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); 428 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12);
407 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); 429 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
408 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 430 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
409 (u32)(adev->dummy_page.addr >> 12)); 431 (u32)(adev->dummy_page.addr >> 12));
410 WREG32(VM_CONTEXT0_CNTL2, 0); 432 WREG32(mmVM_CONTEXT0_CNTL2, 0);
411 WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | 433 WREG32(mmVM_CONTEXT0_CNTL,
412 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT)); 434 VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK |
435 (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
436 VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
413 437
414 WREG32(0x575, 0); 438 WREG32(0x575, 0);
415 WREG32(0x576, 0); 439 WREG32(0x576, 0);
@@ -417,39 +441,41 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
417 441
418 /* empty context1-15 */ 442 /* empty context1-15 */
419 /* set vm size, must be a multiple of 4 */ 443 /* set vm size, must be a multiple of 4 */
420 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 444 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
421 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); 445 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1);
422 /* Assign the pt base to something valid for now; the pts used for 446 /* Assign the pt base to something valid for now; the pts used for
423 * the VMs are determined by the application and setup and assigned 447 * the VMs are determined by the application and setup and assigned
424 * on the fly in the vm part of radeon_gart.c 448 * on the fly in the vm part of radeon_gart.c
425 */ 449 */
426 for (i = 1; i < 16; i++) { 450 for (i = 1; i < 16; i++) {
427 if (i < 8) 451 if (i < 8)
428 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, 452 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i,
429 adev->gart.table_addr >> 12); 453 adev->gart.table_addr >> 12);
430 else 454 else
431 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, 455 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8,
432 adev->gart.table_addr >> 12); 456 adev->gart.table_addr >> 12);
433 } 457 }
434 458
435 /* enable context1-15 */ 459 /* enable context1-15 */
436 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 460 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
437 (u32)(adev->dummy_page.addr >> 12)); 461 (u32)(adev->dummy_page.addr >> 12));
438 WREG32(VM_CONTEXT1_CNTL2, 4); 462 WREG32(mmVM_CONTEXT1_CNTL2, 4);
439 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 463 WREG32(mmVM_CONTEXT1_CNTL,
440 PAGE_TABLE_BLOCK_SIZE(amdgpu_vm_block_size - 9) | 464 VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
441 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 465 (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
442 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | 466 ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
443 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | 467 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
444 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | 468 VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
445 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | 469 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
446 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | 470 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
447 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | 471 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
448 VALID_PROTECTION_FAULT_ENABLE_DEFAULT | 472 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
449 READ_PROTECTION_FAULT_ENABLE_INTERRUPT | 473 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
450 READ_PROTECTION_FAULT_ENABLE_DEFAULT | 474 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
451 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | 475 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
452 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); 476 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
477 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
478 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
453 479
454 gmc_v6_0_gart_flush_gpu_tlb(adev, 0); 480 gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
455 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", 481 dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -488,19 +514,22 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
488 }*/ 514 }*/
489 515
490 /* Disable all tables */ 516 /* Disable all tables */
491 WREG32(VM_CONTEXT0_CNTL, 0); 517 WREG32(mmVM_CONTEXT0_CNTL, 0);
492 WREG32(VM_CONTEXT1_CNTL, 0); 518 WREG32(mmVM_CONTEXT1_CNTL, 0);
493 /* Setup TLB control */ 519 /* Setup TLB control */
494 WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS | 520 WREG32(mmMC_VM_MX_L1_TLB_CNTL,
495 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); 521 MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK |
522 (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT));
496 /* Setup L2 cache */ 523 /* Setup L2 cache */
497 WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | 524 WREG32(mmVM_L2_CNTL,
498 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE | 525 VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK |
499 EFFECTIVE_L2_QUEUE_SIZE(7) | 526 VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK |
500 CONTEXT1_IDENTITY_ACCESS_MODE(1)); 527 (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) |
501 WREG32(VM_L2_CNTL2, 0); 528 (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT));
502 WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY | 529 WREG32(mmVM_L2_CNTL2, 0);
503 L2_CACHE_BIGK_FRAGMENT_SIZE(0)); 530 WREG32(mmVM_L2_CNTL3,
531 VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
532 (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
504 amdgpu_gart_table_vram_unpin(adev); 533 amdgpu_gart_table_vram_unpin(adev);
505} 534}
506 535
@@ -523,7 +552,7 @@ static int gmc_v6_0_vm_init(struct amdgpu_device *adev)
523 552
524 /* base offset of vram pages */ 553 /* base offset of vram pages */
525 if (adev->flags & AMD_IS_APU) { 554 if (adev->flags & AMD_IS_APU) {
526 u64 tmp = RREG32(MC_VM_FB_OFFSET); 555 u64 tmp = RREG32(mmMC_VM_FB_OFFSET);
527 tmp <<= 22; 556 tmp <<= 22;
528 adev->vm_manager.vram_base_offset = tmp; 557 adev->vm_manager.vram_base_offset = tmp;
529 } else 558 } else
@@ -540,19 +569,19 @@ static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
540 u32 status, u32 addr, u32 mc_client) 569 u32 status, u32 addr, u32 mc_client)
541{ 570{
542 u32 mc_id; 571 u32 mc_id;
543 u32 vmid = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, xxVMID); 572 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
544 u32 protections = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, 573 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
545 xxPROTECTIONS); 574 PROTECTIONS);
546 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 575 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
547 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 576 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
548 577
549 mc_id = REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, 578 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
550 xxMEMORY_CLIENT_ID); 579 MEMORY_CLIENT_ID);
551 580
552 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 581 dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
553 protections, vmid, addr, 582 protections, vmid, addr,
554 REG_GET_FIELD(status, mmVM_CONTEXT1_PROTECTION_FAULT_STATUS, 583 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
555 xxMEMORY_CLIENT_RW) ? 584 MEMORY_CLIENT_RW) ?
556 "write" : "read", block, mc_client, mc_id); 585 "write" : "read", block, mc_client, mc_id);
557} 586}
558 587
@@ -655,7 +684,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
655{ 684{
656 u32 orig, data; 685 u32 orig, data;
657 686
658 orig = data = RREG32(HDP_HOST_PATH_CNTL); 687 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
659 688
660 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) 689 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
661 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); 690 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
@@ -663,7 +692,7 @@ static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev,
663 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); 692 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
664 693
665 if (orig != data) 694 if (orig != data)
666 WREG32(HDP_HOST_PATH_CNTL, data); 695 WREG32(mmHDP_HOST_PATH_CNTL, data);
667} 696}
668 697
669static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, 698static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
@@ -671,7 +700,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
671{ 700{
672 u32 orig, data; 701 u32 orig, data;
673 702
674 orig = data = RREG32(HDP_MEM_POWER_LS); 703 orig = data = RREG32(mmHDP_MEM_POWER_LS);
675 704
676 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) 705 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
677 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); 706 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
@@ -679,7 +708,7 @@ static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev,
679 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); 708 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
680 709
681 if (orig != data) 710 if (orig != data)
682 WREG32(HDP_MEM_POWER_LS, data); 711 WREG32(mmHDP_MEM_POWER_LS, data);
683} 712}
684*/ 713*/
685 714
@@ -713,7 +742,7 @@ static int gmc_v6_0_early_init(void *handle)
713 if (adev->flags & AMD_IS_APU) { 742 if (adev->flags & AMD_IS_APU) {
714 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 743 adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
715 } else { 744 } else {
716 u32 tmp = RREG32(MC_SEQ_MISC0); 745 u32 tmp = RREG32(mmMC_SEQ_MISC0);
717 tmp &= MC_SEQ_MISC0__MT__MASK; 746 tmp &= MC_SEQ_MISC0__MT__MASK;
718 adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); 747 adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp);
719 } 748 }
@@ -879,7 +908,7 @@ static int gmc_v6_0_resume(void *handle)
879static bool gmc_v6_0_is_idle(void *handle) 908static bool gmc_v6_0_is_idle(void *handle)
880{ 909{
881 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 910 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
882 u32 tmp = RREG32(SRBM_STATUS); 911 u32 tmp = RREG32(mmSRBM_STATUS);
883 912
884 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 913 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
885 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) 914 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK))
@@ -895,7 +924,7 @@ static int gmc_v6_0_wait_for_idle(void *handle)
895 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
896 925
897 for (i = 0; i < adev->usec_timeout; i++) { 926 for (i = 0; i < adev->usec_timeout; i++) {
898 tmp = RREG32(SRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | 927 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK |
899 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 928 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
900 SRBM_STATUS__MCC_BUSY_MASK | 929 SRBM_STATUS__MCC_BUSY_MASK |
901 SRBM_STATUS__MCD_BUSY_MASK | 930 SRBM_STATUS__MCD_BUSY_MASK |
@@ -913,17 +942,17 @@ static int gmc_v6_0_soft_reset(void *handle)
913 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 942 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
914 struct amdgpu_mode_mc_save save; 943 struct amdgpu_mode_mc_save save;
915 u32 srbm_soft_reset = 0; 944 u32 srbm_soft_reset = 0;
916 u32 tmp = RREG32(SRBM_STATUS); 945 u32 tmp = RREG32(mmSRBM_STATUS);
917 946
918 if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 947 if (tmp & SRBM_STATUS__VMC_BUSY_MASK)
919 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 948 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
920 mmSRBM_SOFT_RESET, xxSOFT_RESET_VMC, 1); 949 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1);
921 950
922 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 951 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
923 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 952 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) {
924 if (!(adev->flags & AMD_IS_APU)) 953 if (!(adev->flags & AMD_IS_APU))
925 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 954 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset,
926 mmSRBM_SOFT_RESET, xxSOFT_RESET_MC, 1); 955 SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
927 } 956 }
928 957
929 if (srbm_soft_reset) { 958 if (srbm_soft_reset) {
@@ -933,17 +962,17 @@ static int gmc_v6_0_soft_reset(void *handle)
933 } 962 }
934 963
935 964
936 tmp = RREG32(SRBM_SOFT_RESET); 965 tmp = RREG32(mmSRBM_SOFT_RESET);
937 tmp |= srbm_soft_reset; 966 tmp |= srbm_soft_reset;
938 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 967 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
939 WREG32(SRBM_SOFT_RESET, tmp); 968 WREG32(mmSRBM_SOFT_RESET, tmp);
940 tmp = RREG32(SRBM_SOFT_RESET); 969 tmp = RREG32(mmSRBM_SOFT_RESET);
941 970
942 udelay(50); 971 udelay(50);
943 972
944 tmp &= ~srbm_soft_reset; 973 tmp &= ~srbm_soft_reset;
945 WREG32(SRBM_SOFT_RESET, tmp); 974 WREG32(mmSRBM_SOFT_RESET, tmp);
946 tmp = RREG32(SRBM_SOFT_RESET); 975 tmp = RREG32(mmSRBM_SOFT_RESET);
947 976
948 udelay(50); 977 udelay(50);
949 978
@@ -969,20 +998,20 @@ static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
969 998
970 switch (state) { 999 switch (state) {
971 case AMDGPU_IRQ_STATE_DISABLE: 1000 case AMDGPU_IRQ_STATE_DISABLE:
972 tmp = RREG32(VM_CONTEXT0_CNTL); 1001 tmp = RREG32(mmVM_CONTEXT0_CNTL);
973 tmp &= ~bits; 1002 tmp &= ~bits;
974 WREG32(VM_CONTEXT0_CNTL, tmp); 1003 WREG32(mmVM_CONTEXT0_CNTL, tmp);
975 tmp = RREG32(VM_CONTEXT1_CNTL); 1004 tmp = RREG32(mmVM_CONTEXT1_CNTL);
976 tmp &= ~bits; 1005 tmp &= ~bits;
977 WREG32(VM_CONTEXT1_CNTL, tmp); 1006 WREG32(mmVM_CONTEXT1_CNTL, tmp);
978 break; 1007 break;
979 case AMDGPU_IRQ_STATE_ENABLE: 1008 case AMDGPU_IRQ_STATE_ENABLE:
980 tmp = RREG32(VM_CONTEXT0_CNTL); 1009 tmp = RREG32(mmVM_CONTEXT0_CNTL);
981 tmp |= bits; 1010 tmp |= bits;
982 WREG32(VM_CONTEXT0_CNTL, tmp); 1011 WREG32(mmVM_CONTEXT0_CNTL, tmp);
983 tmp = RREG32(VM_CONTEXT1_CNTL); 1012 tmp = RREG32(mmVM_CONTEXT1_CNTL);
984 tmp |= bits; 1013 tmp |= bits;
985 WREG32(VM_CONTEXT1_CNTL, tmp); 1014 WREG32(mmVM_CONTEXT1_CNTL, tmp);
986 break; 1015 break;
987 default: 1016 default:
988 break; 1017 break;
@@ -997,9 +1026,9 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
997{ 1026{
998 u32 addr, status; 1027 u32 addr, status;
999 1028
1000 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 1029 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
1001 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 1030 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
1002 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); 1031 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
1003 1032
1004 if (!addr && !status) 1033 if (!addr && !status)
1005 return 0; 1034 return 0;
@@ -1007,13 +1036,15 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
1007 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) 1036 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
1008 gmc_v6_0_set_fault_enable_default(adev, false); 1037 gmc_v6_0_set_fault_enable_default(adev, false);
1009 1038
1010 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1039 if (printk_ratelimit()) {
1011 entry->src_id, entry->src_data); 1040 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
1012 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1041 entry->src_id, entry->src_data);
1013 addr); 1042 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1014 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1043 addr);
1015 status); 1044 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1016 gmc_v6_0_vm_decode_fault(adev, status, addr, 0); 1045 status);
1046 gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
1047 }
1017 1048
1018 return 0; 1049 return 0;
1019} 1050}