aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c456
1 files changed, 293 insertions, 163 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4f4a9239069..3b9b58debabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -264,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
264 if (adev->vram_scratch.robj == NULL) { 264 if (adev->vram_scratch.robj == NULL) {
265 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, 265 r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
266 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 266 PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
267 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, 267 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
268 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
268 NULL, NULL, &adev->vram_scratch.robj); 269 NULL, NULL, &adev->vram_scratch.robj);
269 if (r) { 270 if (r) {
270 return r; 271 return r;
@@ -442,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
442static void amdgpu_wb_fini(struct amdgpu_device *adev) 443static void amdgpu_wb_fini(struct amdgpu_device *adev)
443{ 444{
444 if (adev->wb.wb_obj) { 445 if (adev->wb.wb_obj) {
445 if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { 446 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
446 amdgpu_bo_kunmap(adev->wb.wb_obj); 447 &adev->wb.gpu_addr,
447 amdgpu_bo_unpin(adev->wb.wb_obj); 448 (void **)&adev->wb.wb);
448 amdgpu_bo_unreserve(adev->wb.wb_obj);
449 }
450 amdgpu_bo_unref(&adev->wb.wb_obj);
451 adev->wb.wb = NULL;
452 adev->wb.wb_obj = NULL; 449 adev->wb.wb_obj = NULL;
453 } 450 }
454} 451}
@@ -467,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
467 int r; 464 int r;
468 465
469 if (adev->wb.wb_obj == NULL) { 466 if (adev->wb.wb_obj == NULL) {
470 r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, 467 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4,
471 AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, 468 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
472 &adev->wb.wb_obj); 469 &adev->wb.wb_obj, &adev->wb.gpu_addr,
470 (void **)&adev->wb.wb);
473 if (r) { 471 if (r) {
474 dev_warn(adev->dev, "(%d) create WB bo failed\n", r); 472 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
475 return r; 473 return r;
476 } 474 }
477 r = amdgpu_bo_reserve(adev->wb.wb_obj, false);
478 if (unlikely(r != 0)) {
479 amdgpu_wb_fini(adev);
480 return r;
481 }
482 r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT,
483 &adev->wb.gpu_addr);
484 if (r) {
485 amdgpu_bo_unreserve(adev->wb.wb_obj);
486 dev_warn(adev->dev, "(%d) pin WB bo failed\n", r);
487 amdgpu_wb_fini(adev);
488 return r;
489 }
490 r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb);
491 amdgpu_bo_unreserve(adev->wb.wb_obj);
492 if (r) {
493 dev_warn(adev->dev, "(%d) map WB bo failed\n", r);
494 amdgpu_wb_fini(adev);
495 return r;
496 }
497 475
498 adev->wb.num_wb = AMDGPU_MAX_WB; 476 adev->wb.num_wb = AMDGPU_MAX_WB;
499 memset(&adev->wb.used, 0, sizeof(adev->wb.used)); 477 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
@@ -1051,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
1051 amdgpu_vm_block_size); 1029 amdgpu_vm_block_size);
1052 amdgpu_vm_block_size = 9; 1030 amdgpu_vm_block_size = 9;
1053 } 1031 }
1032
1033 if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) ||
1034 !amdgpu_check_pot_argument(amdgpu_vram_page_split)) {
1035 dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
1036 amdgpu_vram_page_split);
1037 amdgpu_vram_page_split = 1024;
1038 }
1054} 1039}
1055 1040
1056/** 1041/**
@@ -1125,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1125 int i, r = 0; 1110 int i, r = 0;
1126 1111
1127 for (i = 0; i < adev->num_ip_blocks; i++) { 1112 for (i = 0; i < adev->num_ip_blocks; i++) {
1128 if (!adev->ip_block_status[i].valid) 1113 if (!adev->ip_blocks[i].status.valid)
1129 continue; 1114 continue;
1130 if (adev->ip_blocks[i].type == block_type) { 1115 if (adev->ip_blocks[i].version->type == block_type) {
1131 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1116 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1132 state); 1117 state);
1133 if (r) 1118 if (r)
1134 return r; 1119 return r;
1135 break; 1120 break;
@@ -1145,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1145 int i, r = 0; 1130 int i, r = 0;
1146 1131
1147 for (i = 0; i < adev->num_ip_blocks; i++) { 1132 for (i = 0; i < adev->num_ip_blocks; i++) {
1148 if (!adev->ip_block_status[i].valid) 1133 if (!adev->ip_blocks[i].status.valid)
1149 continue; 1134 continue;
1150 if (adev->ip_blocks[i].type == block_type) { 1135 if (adev->ip_blocks[i].version->type == block_type) {
1151 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, 1136 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
1152 state); 1137 state);
1153 if (r) 1138 if (r)
1154 return r; 1139 return r;
1155 break; 1140 break;
@@ -1164,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1164 int i, r; 1149 int i, r;
1165 1150
1166 for (i = 0; i < adev->num_ip_blocks; i++) { 1151 for (i = 0; i < adev->num_ip_blocks; i++) {
1167 if (!adev->ip_block_status[i].valid) 1152 if (!adev->ip_blocks[i].status.valid)
1168 continue; 1153 continue;
1169 if (adev->ip_blocks[i].type == block_type) { 1154 if (adev->ip_blocks[i].version->type == block_type) {
1170 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); 1155 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1171 if (r) 1156 if (r)
1172 return r; 1157 return r;
1173 break; 1158 break;
@@ -1183,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev,
1183 int i; 1168 int i;
1184 1169
1185 for (i = 0; i < adev->num_ip_blocks; i++) { 1170 for (i = 0; i < adev->num_ip_blocks; i++) {
1186 if (!adev->ip_block_status[i].valid) 1171 if (!adev->ip_blocks[i].status.valid)
1187 continue; 1172 continue;
1188 if (adev->ip_blocks[i].type == block_type) 1173 if (adev->ip_blocks[i].version->type == block_type)
1189 return adev->ip_blocks[i].funcs->is_idle((void *)adev); 1174 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1190 } 1175 }
1191 return true; 1176 return true;
1192 1177
1193} 1178}
1194 1179
1195const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 1180struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
1196 struct amdgpu_device *adev, 1181 enum amd_ip_block_type type)
1197 enum amd_ip_block_type type)
1198{ 1182{
1199 int i; 1183 int i;
1200 1184
1201 for (i = 0; i < adev->num_ip_blocks; i++) 1185 for (i = 0; i < adev->num_ip_blocks; i++)
1202 if (adev->ip_blocks[i].type == type) 1186 if (adev->ip_blocks[i].version->type == type)
1203 return &adev->ip_blocks[i]; 1187 return &adev->ip_blocks[i];
1204 1188
1205 return NULL; 1189 return NULL;
@@ -1220,38 +1204,75 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
1220 enum amd_ip_block_type type, 1204 enum amd_ip_block_type type,
1221 u32 major, u32 minor) 1205 u32 major, u32 minor)
1222{ 1206{
1223 const struct amdgpu_ip_block_version *ip_block; 1207 struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
1224 ip_block = amdgpu_get_ip_block(adev, type);
1225 1208
1226 if (ip_block && ((ip_block->major > major) || 1209 if (ip_block && ((ip_block->version->major > major) ||
1227 ((ip_block->major == major) && 1210 ((ip_block->version->major == major) &&
1228 (ip_block->minor >= minor)))) 1211 (ip_block->version->minor >= minor))))
1229 return 0; 1212 return 0;
1230 1213
1231 return 1; 1214 return 1;
1232} 1215}
1233 1216
1234static void amdgpu_whether_enable_virtual_display(struct amdgpu_device *adev) 1217/**
1218 * amdgpu_ip_block_add
1219 *
1220 * @adev: amdgpu_device pointer
1221 * @ip_block_version: pointer to the IP to add
1222 *
1223 * Adds the IP block driver information to the collection of IPs
1224 * on the asic.
1225 */
1226int amdgpu_ip_block_add(struct amdgpu_device *adev,
1227 const struct amdgpu_ip_block_version *ip_block_version)
1228{
1229 if (!ip_block_version)
1230 return -EINVAL;
1231
1232 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1233
1234 return 0;
1235}
1236
1237static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1235{ 1238{
1236 adev->enable_virtual_display = false; 1239 adev->enable_virtual_display = false;
1237 1240
1238 if (amdgpu_virtual_display) { 1241 if (amdgpu_virtual_display) {
1239 struct drm_device *ddev = adev->ddev; 1242 struct drm_device *ddev = adev->ddev;
1240 const char *pci_address_name = pci_name(ddev->pdev); 1243 const char *pci_address_name = pci_name(ddev->pdev);
1241 char *pciaddstr, *pciaddstr_tmp, *pciaddname; 1244 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1242 1245
1243 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); 1246 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1244 pciaddstr_tmp = pciaddstr; 1247 pciaddstr_tmp = pciaddstr;
1245 while ((pciaddname = strsep(&pciaddstr_tmp, ";"))) { 1248 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1249 pciaddname = strsep(&pciaddname_tmp, ",");
1246 if (!strcmp(pci_address_name, pciaddname)) { 1250 if (!strcmp(pci_address_name, pciaddname)) {
1251 long num_crtc;
1252 int res = -1;
1253
1247 adev->enable_virtual_display = true; 1254 adev->enable_virtual_display = true;
1255
1256 if (pciaddname_tmp)
1257 res = kstrtol(pciaddname_tmp, 10,
1258 &num_crtc);
1259
1260 if (!res) {
1261 if (num_crtc < 1)
1262 num_crtc = 1;
1263 if (num_crtc > 6)
1264 num_crtc = 6;
1265 adev->mode_info.num_crtc = num_crtc;
1266 } else {
1267 adev->mode_info.num_crtc = 1;
1268 }
1248 break; 1269 break;
1249 } 1270 }
1250 } 1271 }
1251 1272
1252 DRM_INFO("virtual display string:%s, %s:virtual_display:%d\n", 1273 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1253 amdgpu_virtual_display, pci_address_name, 1274 amdgpu_virtual_display, pci_address_name,
1254 adev->enable_virtual_display); 1275 adev->enable_virtual_display, adev->mode_info.num_crtc);
1255 1276
1256 kfree(pciaddstr); 1277 kfree(pciaddstr);
1257 } 1278 }
@@ -1261,7 +1282,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1261{ 1282{
1262 int i, r; 1283 int i, r;
1263 1284
1264 amdgpu_whether_enable_virtual_display(adev); 1285 amdgpu_device_enable_virtual_display(adev);
1265 1286
1266 switch (adev->asic_type) { 1287 switch (adev->asic_type) {
1267 case CHIP_TOPAZ: 1288 case CHIP_TOPAZ:
@@ -1313,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1313 return -EINVAL; 1334 return -EINVAL;
1314 } 1335 }
1315 1336
1316 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1317 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1318 if (adev->ip_block_status == NULL)
1319 return -ENOMEM;
1320
1321 if (adev->ip_blocks == NULL) {
1322 DRM_ERROR("No IP blocks found!\n");
1323 return r;
1324 }
1325
1326 for (i = 0; i < adev->num_ip_blocks; i++) { 1337 for (i = 0; i < adev->num_ip_blocks; i++) {
1327 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1338 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1328 DRM_ERROR("disabled ip block: %d\n", i); 1339 DRM_ERROR("disabled ip block: %d\n", i);
1329 adev->ip_block_status[i].valid = false; 1340 adev->ip_blocks[i].status.valid = false;
1330 } else { 1341 } else {
1331 if (adev->ip_blocks[i].funcs->early_init) { 1342 if (adev->ip_blocks[i].version->funcs->early_init) {
1332 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1343 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
1333 if (r == -ENOENT) { 1344 if (r == -ENOENT) {
1334 adev->ip_block_status[i].valid = false; 1345 adev->ip_blocks[i].status.valid = false;
1335 } else if (r) { 1346 } else if (r) {
1336 DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1347 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1348 adev->ip_blocks[i].version->funcs->name, r);
1337 return r; 1349 return r;
1338 } else { 1350 } else {
1339 adev->ip_block_status[i].valid = true; 1351 adev->ip_blocks[i].status.valid = true;
1340 } 1352 }
1341 } else { 1353 } else {
1342 adev->ip_block_status[i].valid = true; 1354 adev->ip_blocks[i].status.valid = true;
1343 } 1355 }
1344 } 1356 }
1345 } 1357 }
@@ -1355,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
1355 int i, r; 1367 int i, r;
1356 1368
1357 for (i = 0; i < adev->num_ip_blocks; i++) { 1369 for (i = 0; i < adev->num_ip_blocks; i++) {
1358 if (!adev->ip_block_status[i].valid) 1370 if (!adev->ip_blocks[i].status.valid)
1359 continue; 1371 continue;
1360 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1372 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
1361 if (r) { 1373 if (r) {
1362 DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1374 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1375 adev->ip_blocks[i].version->funcs->name, r);
1363 return r; 1376 return r;
1364 } 1377 }
1365 adev->ip_block_status[i].sw = true; 1378 adev->ip_blocks[i].status.sw = true;
1366 /* need to do gmc hw init early so we can allocate gpu mem */ 1379 /* need to do gmc hw init early so we can allocate gpu mem */
1367 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1380 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1368 r = amdgpu_vram_scratch_init(adev); 1381 r = amdgpu_vram_scratch_init(adev);
1369 if (r) { 1382 if (r) {
1370 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); 1383 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
1371 return r; 1384 return r;
1372 } 1385 }
1373 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1386 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1374 if (r) { 1387 if (r) {
1375 DRM_ERROR("hw_init %d failed %d\n", i, r); 1388 DRM_ERROR("hw_init %d failed %d\n", i, r);
1376 return r; 1389 return r;
@@ -1380,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev)
1380 DRM_ERROR("amdgpu_wb_init failed %d\n", r); 1393 DRM_ERROR("amdgpu_wb_init failed %d\n", r);
1381 return r; 1394 return r;
1382 } 1395 }
1383 adev->ip_block_status[i].hw = true; 1396 adev->ip_blocks[i].status.hw = true;
1384 } 1397 }
1385 } 1398 }
1386 1399
1387 for (i = 0; i < adev->num_ip_blocks; i++) { 1400 for (i = 0; i < adev->num_ip_blocks; i++) {
1388 if (!adev->ip_block_status[i].sw) 1401 if (!adev->ip_blocks[i].status.sw)
1389 continue; 1402 continue;
1390 /* gmc hw init is done early */ 1403 /* gmc hw init is done early */
1391 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1404 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
1392 continue; 1405 continue;
1393 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1406 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
1394 if (r) { 1407 if (r) {
1395 DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1408 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1409 adev->ip_blocks[i].version->funcs->name, r);
1396 return r; 1410 return r;
1397 } 1411 }
1398 adev->ip_block_status[i].hw = true; 1412 adev->ip_blocks[i].status.hw = true;
1399 } 1413 }
1400 1414
1401 return 0; 1415 return 0;
@@ -1406,25 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1406 int i = 0, r; 1420 int i = 0, r;
1407 1421
1408 for (i = 0; i < adev->num_ip_blocks; i++) { 1422 for (i = 0; i < adev->num_ip_blocks; i++) {
1409 if (!adev->ip_block_status[i].valid) 1423 if (!adev->ip_blocks[i].status.valid)
1410 continue; 1424 continue;
1411 if (adev->ip_blocks[i].funcs->late_init) { 1425 if (adev->ip_blocks[i].version->funcs->late_init) {
1412 r = adev->ip_blocks[i].funcs->late_init((void *)adev); 1426 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
1413 if (r) { 1427 if (r) {
1414 DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1428 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1429 adev->ip_blocks[i].version->funcs->name, r);
1415 return r; 1430 return r;
1416 } 1431 }
1417 adev->ip_block_status[i].late_initialized = true; 1432 adev->ip_blocks[i].status.late_initialized = true;
1418 } 1433 }
1419 /* skip CG for VCE/UVD, it's handled specially */ 1434 /* skip CG for VCE/UVD, it's handled specially */
1420 if (adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_UVD && 1435 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
1421 adev->ip_blocks[i].type != AMD_IP_BLOCK_TYPE_VCE) { 1436 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
1422 /* enable clockgating to save power */ 1437 /* enable clockgating to save power */
1423 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1438 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1424 AMD_CG_STATE_GATE); 1439 AMD_CG_STATE_GATE);
1425 if (r) { 1440 if (r) {
1426 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", 1441 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1427 adev->ip_blocks[i].funcs->name, r); 1442 adev->ip_blocks[i].version->funcs->name, r);
1428 return r; 1443 return r;
1429 } 1444 }
1430 } 1445 }
@@ -1439,68 +1454,71 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1439 1454
1440 /* need to disable SMC first */ 1455 /* need to disable SMC first */
1441 for (i = 0; i < adev->num_ip_blocks; i++) { 1456 for (i = 0; i < adev->num_ip_blocks; i++) {
1442 if (!adev->ip_block_status[i].hw) 1457 if (!adev->ip_blocks[i].status.hw)
1443 continue; 1458 continue;
1444 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) { 1459 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
1445 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1460 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1446 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1461 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1447 AMD_CG_STATE_UNGATE); 1462 AMD_CG_STATE_UNGATE);
1448 if (r) { 1463 if (r) {
1449 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", 1464 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1450 adev->ip_blocks[i].funcs->name, r); 1465 adev->ip_blocks[i].version->funcs->name, r);
1451 return r; 1466 return r;
1452 } 1467 }
1453 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1468 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1454 /* XXX handle errors */ 1469 /* XXX handle errors */
1455 if (r) { 1470 if (r) {
1456 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", 1471 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1457 adev->ip_blocks[i].funcs->name, r); 1472 adev->ip_blocks[i].version->funcs->name, r);
1458 } 1473 }
1459 adev->ip_block_status[i].hw = false; 1474 adev->ip_blocks[i].status.hw = false;
1460 break; 1475 break;
1461 } 1476 }
1462 } 1477 }
1463 1478
1464 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1479 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1465 if (!adev->ip_block_status[i].hw) 1480 if (!adev->ip_blocks[i].status.hw)
1466 continue; 1481 continue;
1467 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1482 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
1468 amdgpu_wb_fini(adev); 1483 amdgpu_wb_fini(adev);
1469 amdgpu_vram_scratch_fini(adev); 1484 amdgpu_vram_scratch_fini(adev);
1470 } 1485 }
1471 /* ungate blocks before hw fini so that we can shutdown the blocks safely */ 1486 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1472 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1487 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1473 AMD_CG_STATE_UNGATE); 1488 AMD_CG_STATE_UNGATE);
1474 if (r) { 1489 if (r) {
1475 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1490 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1491 adev->ip_blocks[i].version->funcs->name, r);
1476 return r; 1492 return r;
1477 } 1493 }
1478 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1494 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
1479 /* XXX handle errors */ 1495 /* XXX handle errors */
1480 if (r) { 1496 if (r) {
1481 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1497 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1498 adev->ip_blocks[i].version->funcs->name, r);
1482 } 1499 }
1483 adev->ip_block_status[i].hw = false; 1500 adev->ip_blocks[i].status.hw = false;
1484 } 1501 }
1485 1502
1486 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1503 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1487 if (!adev->ip_block_status[i].sw) 1504 if (!adev->ip_blocks[i].status.sw)
1488 continue; 1505 continue;
1489 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1506 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
1490 /* XXX handle errors */ 1507 /* XXX handle errors */
1491 if (r) { 1508 if (r) {
1492 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1509 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1510 adev->ip_blocks[i].version->funcs->name, r);
1493 } 1511 }
1494 adev->ip_block_status[i].sw = false; 1512 adev->ip_blocks[i].status.sw = false;
1495 adev->ip_block_status[i].valid = false; 1513 adev->ip_blocks[i].status.valid = false;
1496 } 1514 }
1497 1515
1498 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1516 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1499 if (!adev->ip_block_status[i].late_initialized) 1517 if (!adev->ip_blocks[i].status.late_initialized)
1500 continue; 1518 continue;
1501 if (adev->ip_blocks[i].funcs->late_fini) 1519 if (adev->ip_blocks[i].version->funcs->late_fini)
1502 adev->ip_blocks[i].funcs->late_fini((void *)adev); 1520 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
1503 adev->ip_block_status[i].late_initialized = false; 1521 adev->ip_blocks[i].status.late_initialized = false;
1504 } 1522 }
1505 1523
1506 return 0; 1524 return 0;
@@ -1518,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1518 } 1536 }
1519 1537
1520 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1538 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1521 if (!adev->ip_block_status[i].valid) 1539 if (!adev->ip_blocks[i].status.valid)
1522 continue; 1540 continue;
1523 /* ungate blocks so that suspend can properly shut them down */ 1541 /* ungate blocks so that suspend can properly shut them down */
1524 if (i != AMD_IP_BLOCK_TYPE_SMC) { 1542 if (i != AMD_IP_BLOCK_TYPE_SMC) {
1525 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1543 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
1526 AMD_CG_STATE_UNGATE); 1544 AMD_CG_STATE_UNGATE);
1527 if (r) { 1545 if (r) {
1528 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1546 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1547 adev->ip_blocks[i].version->funcs->name, r);
1529 } 1548 }
1530 } 1549 }
1531 /* XXX handle errors */ 1550 /* XXX handle errors */
1532 r = adev->ip_blocks[i].funcs->suspend(adev); 1551 r = adev->ip_blocks[i].version->funcs->suspend(adev);
1533 /* XXX handle errors */ 1552 /* XXX handle errors */
1534 if (r) { 1553 if (r) {
1535 DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1554 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1555 adev->ip_blocks[i].version->funcs->name, r);
1536 } 1556 }
1537 } 1557 }
1538 1558
@@ -1544,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1544 int i, r; 1564 int i, r;
1545 1565
1546 for (i = 0; i < adev->num_ip_blocks; i++) { 1566 for (i = 0; i < adev->num_ip_blocks; i++) {
1547 if (!adev->ip_block_status[i].valid) 1567 if (!adev->ip_blocks[i].status.valid)
1548 continue; 1568 continue;
1549 r = adev->ip_blocks[i].funcs->resume(adev); 1569 r = adev->ip_blocks[i].version->funcs->resume(adev);
1550 if (r) { 1570 if (r) {
1551 DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); 1571 DRM_ERROR("resume of IP block <%s> failed %d\n",
1572 adev->ip_blocks[i].version->funcs->name, r);
1552 return r; 1573 return r;
1553 } 1574 }
1554 } 1575 }
@@ -1859,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1859 amdgpu_fence_driver_fini(adev); 1880 amdgpu_fence_driver_fini(adev);
1860 amdgpu_fbdev_fini(adev); 1881 amdgpu_fbdev_fini(adev);
1861 r = amdgpu_fini(adev); 1882 r = amdgpu_fini(adev);
1862 kfree(adev->ip_block_status);
1863 adev->ip_block_status = NULL;
1864 adev->accel_working = false; 1883 adev->accel_working = false;
1865 /* free i2c buses */ 1884 /* free i2c buses */
1866 amdgpu_i2c_fini(adev); 1885 amdgpu_i2c_fini(adev);
@@ -1956,7 +1975,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
1956 1975
1957 r = amdgpu_suspend(adev); 1976 r = amdgpu_suspend(adev);
1958 1977
1959 /* evict remaining vram memory */ 1978 /* evict remaining vram memory
1979 * This second call to evict vram is to evict the gart page table
1980 * using the CPU.
1981 */
1960 amdgpu_bo_evict_vram(adev); 1982 amdgpu_bo_evict_vram(adev);
1961 1983
1962 pci_save_state(dev->pdev); 1984 pci_save_state(dev->pdev);
@@ -2096,13 +2118,13 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
2096 bool asic_hang = false; 2118 bool asic_hang = false;
2097 2119
2098 for (i = 0; i < adev->num_ip_blocks; i++) { 2120 for (i = 0; i < adev->num_ip_blocks; i++) {
2099 if (!adev->ip_block_status[i].valid) 2121 if (!adev->ip_blocks[i].status.valid)
2100 continue; 2122 continue;
2101 if (adev->ip_blocks[i].funcs->check_soft_reset) 2123 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
2102 adev->ip_block_status[i].hang = 2124 adev->ip_blocks[i].status.hang =
2103 adev->ip_blocks[i].funcs->check_soft_reset(adev); 2125 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
2104 if (adev->ip_block_status[i].hang) { 2126 if (adev->ip_blocks[i].status.hang) {
2105 DRM_INFO("IP block:%d is hang!\n", i); 2127 DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
2106 asic_hang = true; 2128 asic_hang = true;
2107 } 2129 }
2108 } 2130 }
@@ -2114,11 +2136,11 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
2114 int i, r = 0; 2136 int i, r = 0;
2115 2137
2116 for (i = 0; i < adev->num_ip_blocks; i++) { 2138 for (i = 0; i < adev->num_ip_blocks; i++) {
2117 if (!adev->ip_block_status[i].valid) 2139 if (!adev->ip_blocks[i].status.valid)
2118 continue; 2140 continue;
2119 if (adev->ip_block_status[i].hang && 2141 if (adev->ip_blocks[i].status.hang &&
2120 adev->ip_blocks[i].funcs->pre_soft_reset) { 2142 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
2121 r = adev->ip_blocks[i].funcs->pre_soft_reset(adev); 2143 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
2122 if (r) 2144 if (r)
2123 return r; 2145 return r;
2124 } 2146 }
@@ -2132,13 +2154,13 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
2132 int i; 2154 int i;
2133 2155
2134 for (i = 0; i < adev->num_ip_blocks; i++) { 2156 for (i = 0; i < adev->num_ip_blocks; i++) {
2135 if (!adev->ip_block_status[i].valid) 2157 if (!adev->ip_blocks[i].status.valid)
2136 continue; 2158 continue;
2137 if ((adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) || 2159 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
2138 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_SMC) || 2160 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
2139 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_ACP) || 2161 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
2140 (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_DCE)) { 2162 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
2141 if (adev->ip_block_status[i].hang) { 2163 if (adev->ip_blocks[i].status.hang) {
2142 DRM_INFO("Some block need full reset!\n"); 2164 DRM_INFO("Some block need full reset!\n");
2143 return true; 2165 return true;
2144 } 2166 }
@@ -2152,11 +2174,11 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
2152 int i, r = 0; 2174 int i, r = 0;
2153 2175
2154 for (i = 0; i < adev->num_ip_blocks; i++) { 2176 for (i = 0; i < adev->num_ip_blocks; i++) {
2155 if (!adev->ip_block_status[i].valid) 2177 if (!adev->ip_blocks[i].status.valid)
2156 continue; 2178 continue;
2157 if (adev->ip_block_status[i].hang && 2179 if (adev->ip_blocks[i].status.hang &&
2158 adev->ip_blocks[i].funcs->soft_reset) { 2180 adev->ip_blocks[i].version->funcs->soft_reset) {
2159 r = adev->ip_blocks[i].funcs->soft_reset(adev); 2181 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
2160 if (r) 2182 if (r)
2161 return r; 2183 return r;
2162 } 2184 }
@@ -2170,11 +2192,11 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
2170 int i, r = 0; 2192 int i, r = 0;
2171 2193
2172 for (i = 0; i < adev->num_ip_blocks; i++) { 2194 for (i = 0; i < adev->num_ip_blocks; i++) {
2173 if (!adev->ip_block_status[i].valid) 2195 if (!adev->ip_blocks[i].status.valid)
2174 continue; 2196 continue;
2175 if (adev->ip_block_status[i].hang && 2197 if (adev->ip_blocks[i].status.hang &&
2176 adev->ip_blocks[i].funcs->post_soft_reset) 2198 adev->ip_blocks[i].version->funcs->post_soft_reset)
2177 r = adev->ip_blocks[i].funcs->post_soft_reset(adev); 2199 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
2178 if (r) 2200 if (r)
2179 return r; 2201 return r;
2180 } 2202 }
@@ -2531,6 +2553,13 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2531 se_bank = (*pos >> 24) & 0x3FF; 2553 se_bank = (*pos >> 24) & 0x3FF;
2532 sh_bank = (*pos >> 34) & 0x3FF; 2554 sh_bank = (*pos >> 34) & 0x3FF;
2533 instance_bank = (*pos >> 44) & 0x3FF; 2555 instance_bank = (*pos >> 44) & 0x3FF;
2556
2557 if (se_bank == 0x3FF)
2558 se_bank = 0xFFFFFFFF;
2559 if (sh_bank == 0x3FF)
2560 sh_bank = 0xFFFFFFFF;
2561 if (instance_bank == 0x3FF)
2562 instance_bank = 0xFFFFFFFF;
2534 use_bank = 1; 2563 use_bank = 1;
2535 } else { 2564 } else {
2536 use_bank = 0; 2565 use_bank = 0;
@@ -2539,8 +2568,8 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2539 *pos &= 0x3FFFF; 2568 *pos &= 0x3FFFF;
2540 2569
2541 if (use_bank) { 2570 if (use_bank) {
2542 if (sh_bank >= adev->gfx.config.max_sh_per_se || 2571 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2543 se_bank >= adev->gfx.config.max_shader_engines) 2572 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2544 return -EINVAL; 2573 return -EINVAL;
2545 mutex_lock(&adev->grbm_idx_mutex); 2574 mutex_lock(&adev->grbm_idx_mutex);
2546 amdgpu_gfx_select_se_sh(adev, se_bank, 2575 amdgpu_gfx_select_se_sh(adev, se_bank,
@@ -2587,10 +2616,45 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2587 struct amdgpu_device *adev = f->f_inode->i_private; 2616 struct amdgpu_device *adev = f->f_inode->i_private;
2588 ssize_t result = 0; 2617 ssize_t result = 0;
2589 int r; 2618 int r;
2619 bool pm_pg_lock, use_bank;
2620 unsigned instance_bank, sh_bank, se_bank;
2590 2621
2591 if (size & 0x3 || *pos & 0x3) 2622 if (size & 0x3 || *pos & 0x3)
2592 return -EINVAL; 2623 return -EINVAL;
2593 2624
2625 /* are we reading registers for which a PG lock is necessary? */
2626 pm_pg_lock = (*pos >> 23) & 1;
2627
2628 if (*pos & (1ULL << 62)) {
2629 se_bank = (*pos >> 24) & 0x3FF;
2630 sh_bank = (*pos >> 34) & 0x3FF;
2631 instance_bank = (*pos >> 44) & 0x3FF;
2632
2633 if (se_bank == 0x3FF)
2634 se_bank = 0xFFFFFFFF;
2635 if (sh_bank == 0x3FF)
2636 sh_bank = 0xFFFFFFFF;
2637 if (instance_bank == 0x3FF)
2638 instance_bank = 0xFFFFFFFF;
2639 use_bank = 1;
2640 } else {
2641 use_bank = 0;
2642 }
2643
2644 *pos &= 0x3FFFF;
2645
2646 if (use_bank) {
2647 if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
2648 (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
2649 return -EINVAL;
2650 mutex_lock(&adev->grbm_idx_mutex);
2651 amdgpu_gfx_select_se_sh(adev, se_bank,
2652 sh_bank, instance_bank);
2653 }
2654
2655 if (pm_pg_lock)
2656 mutex_lock(&adev->pm.mutex);
2657
2594 while (size) { 2658 while (size) {
2595 uint32_t value; 2659 uint32_t value;
2596 2660
@@ -2609,6 +2673,14 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
2609 size -= 4; 2673 size -= 4;
2610 } 2674 }
2611 2675
2676 if (use_bank) {
2677 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2678 mutex_unlock(&adev->grbm_idx_mutex);
2679 }
2680
2681 if (pm_pg_lock)
2682 mutex_unlock(&adev->pm.mutex);
2683
2612 return result; 2684 return result;
2613} 2685}
2614 2686
@@ -2871,6 +2943,56 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
2871 return !r ? 4 : r; 2943 return !r ? 4 : r;
2872} 2944}
2873 2945
2946static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
2947 size_t size, loff_t *pos)
2948{
2949 struct amdgpu_device *adev = f->f_inode->i_private;
2950 int r, x;
2951 ssize_t result=0;
2952 uint32_t offset, se, sh, cu, wave, simd, data[32];
2953
2954 if (size & 3 || *pos & 3)
2955 return -EINVAL;
2956
2957 /* decode offset */
2958 offset = (*pos & 0x7F);
2959 se = ((*pos >> 7) & 0xFF);
2960 sh = ((*pos >> 15) & 0xFF);
2961 cu = ((*pos >> 23) & 0xFF);
2962 wave = ((*pos >> 31) & 0xFF);
2963 simd = ((*pos >> 37) & 0xFF);
2964
2965 /* switch to the specific se/sh/cu */
2966 mutex_lock(&adev->grbm_idx_mutex);
2967 amdgpu_gfx_select_se_sh(adev, se, sh, cu);
2968
2969 x = 0;
2970 if (adev->gfx.funcs->read_wave_data)
2971 adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
2972
2973 amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
2974 mutex_unlock(&adev->grbm_idx_mutex);
2975
2976 if (!x)
2977 return -EINVAL;
2978
2979 while (size && (offset < x * 4)) {
2980 uint32_t value;
2981
2982 value = data[offset >> 2];
2983 r = put_user(value, (uint32_t *)buf);
2984 if (r)
2985 return r;
2986
2987 result += 4;
2988 buf += 4;
2989 offset += 4;
2990 size -= 4;
2991 }
2992
2993 return result;
2994}
2995
2874static const struct file_operations amdgpu_debugfs_regs_fops = { 2996static const struct file_operations amdgpu_debugfs_regs_fops = {
2875 .owner = THIS_MODULE, 2997 .owner = THIS_MODULE,
2876 .read = amdgpu_debugfs_regs_read, 2998 .read = amdgpu_debugfs_regs_read,
@@ -2908,6 +3030,12 @@ static const struct file_operations amdgpu_debugfs_sensors_fops = {
2908 .llseek = default_llseek 3030 .llseek = default_llseek
2909}; 3031};
2910 3032
3033static const struct file_operations amdgpu_debugfs_wave_fops = {
3034 .owner = THIS_MODULE,
3035 .read = amdgpu_debugfs_wave_read,
3036 .llseek = default_llseek
3037};
3038
2911static const struct file_operations *debugfs_regs[] = { 3039static const struct file_operations *debugfs_regs[] = {
2912 &amdgpu_debugfs_regs_fops, 3040 &amdgpu_debugfs_regs_fops,
2913 &amdgpu_debugfs_regs_didt_fops, 3041 &amdgpu_debugfs_regs_didt_fops,
@@ -2915,6 +3043,7 @@ static const struct file_operations *debugfs_regs[] = {
2915 &amdgpu_debugfs_regs_smc_fops, 3043 &amdgpu_debugfs_regs_smc_fops,
2916 &amdgpu_debugfs_gca_config_fops, 3044 &amdgpu_debugfs_gca_config_fops,
2917 &amdgpu_debugfs_sensors_fops, 3045 &amdgpu_debugfs_sensors_fops,
3046 &amdgpu_debugfs_wave_fops,
2918}; 3047};
2919 3048
2920static const char *debugfs_regs_names[] = { 3049static const char *debugfs_regs_names[] = {
@@ -2924,6 +3053,7 @@ static const char *debugfs_regs_names[] = {
2924 "amdgpu_regs_smc", 3053 "amdgpu_regs_smc",
2925 "amdgpu_gca_config", 3054 "amdgpu_gca_config",
2926 "amdgpu_sensors", 3055 "amdgpu_sensors",
3056 "amdgpu_wave",
2927}; 3057};
2928 3058
2929static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 3059static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)