diff options
author | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2015-10-12 13:09:27 -0400 |
commit | 79828b4fa835f73cdaf4bffa48696abdcbea9d02 (patch) | |
tree | 5e0fa7156acb75ba603022bc807df8f2fedb97a8 /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |
parent | 721b51fcf91898299d96f4b72cb9434cda29dce6 (diff) | |
parent | 8c1a9d6323abf0fb1e5dad96cf3f1c783505ea5a (diff) |
Merge remote-tracking branch 'asoc/fix/rt5645' into asoc-fix-rt5645
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 77 |
1 files changed, 42 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ba46be361c9b..6ff6ae945794 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -55,6 +55,7 @@ static const char *amdgpu_asic_name[] = { | |||
55 | "MULLINS", | 55 | "MULLINS", |
56 | "TOPAZ", | 56 | "TOPAZ", |
57 | "TONGA", | 57 | "TONGA", |
58 | "FIJI", | ||
58 | "CARRIZO", | 59 | "CARRIZO", |
59 | "LAST", | 60 | "LAST", |
60 | }; | 61 | }; |
@@ -63,7 +64,7 @@ bool amdgpu_device_is_px(struct drm_device *dev) | |||
63 | { | 64 | { |
64 | struct amdgpu_device *adev = dev->dev_private; | 65 | struct amdgpu_device *adev = dev->dev_private; |
65 | 66 | ||
66 | if (adev->flags & AMDGPU_IS_PX) | 67 | if (adev->flags & AMD_IS_PX) |
67 | return true; | 68 | return true; |
68 | return false; | 69 | return false; |
69 | } | 70 | } |
@@ -243,7 +244,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |||
243 | 244 | ||
244 | if (adev->vram_scratch.robj == NULL) { | 245 | if (adev->vram_scratch.robj == NULL) { |
245 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | 246 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, |
246 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0, | 247 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | ||
247 | NULL, &adev->vram_scratch.robj); | 249 | NULL, &adev->vram_scratch.robj); |
248 | if (r) { | 250 | if (r) { |
249 | return r; | 251 | return r; |
@@ -1160,6 +1162,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1160 | switch (adev->asic_type) { | 1162 | switch (adev->asic_type) { |
1161 | case CHIP_TOPAZ: | 1163 | case CHIP_TOPAZ: |
1162 | case CHIP_TONGA: | 1164 | case CHIP_TONGA: |
1165 | case CHIP_FIJI: | ||
1163 | case CHIP_CARRIZO: | 1166 | case CHIP_CARRIZO: |
1164 | if (adev->asic_type == CHIP_CARRIZO) | 1167 | if (adev->asic_type == CHIP_CARRIZO) |
1165 | adev->family = AMDGPU_FAMILY_CZ; | 1168 | adev->family = AMDGPU_FAMILY_CZ; |
@@ -1191,8 +1194,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1191 | return -EINVAL; | 1194 | return -EINVAL; |
1192 | } | 1195 | } |
1193 | 1196 | ||
1194 | adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); | 1197 | adev->ip_block_status = kcalloc(adev->num_ip_blocks, |
1195 | if (adev->ip_block_enabled == NULL) | 1198 | sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); |
1199 | if (adev->ip_block_status == NULL) | ||
1196 | return -ENOMEM; | 1200 | return -ENOMEM; |
1197 | 1201 | ||
1198 | if (adev->ip_blocks == NULL) { | 1202 | if (adev->ip_blocks == NULL) { |
@@ -1203,14 +1207,19 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1203 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1207 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1204 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { | 1208 | if ((amdgpu_ip_block_mask & (1 << i)) == 0) { |
1205 | DRM_ERROR("disabled ip block: %d\n", i); | 1209 | DRM_ERROR("disabled ip block: %d\n", i); |
1206 | adev->ip_block_enabled[i] = false; | 1210 | adev->ip_block_status[i].valid = false; |
1207 | } else { | 1211 | } else { |
1208 | if (adev->ip_blocks[i].funcs->early_init) { | 1212 | if (adev->ip_blocks[i].funcs->early_init) { |
1209 | r = adev->ip_blocks[i].funcs->early_init((void *)adev); | 1213 | r = adev->ip_blocks[i].funcs->early_init((void *)adev); |
1210 | if (r) | 1214 | if (r == -ENOENT) |
1215 | adev->ip_block_status[i].valid = false; | ||
1216 | else if (r) | ||
1211 | return r; | 1217 | return r; |
1218 | else | ||
1219 | adev->ip_block_status[i].valid = true; | ||
1220 | } else { | ||
1221 | adev->ip_block_status[i].valid = true; | ||
1212 | } | 1222 | } |
1213 | adev->ip_block_enabled[i] = true; | ||
1214 | } | 1223 | } |
1215 | } | 1224 | } |
1216 | 1225 | ||
@@ -1222,11 +1231,12 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1222 | int i, r; | 1231 | int i, r; |
1223 | 1232 | ||
1224 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1233 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1225 | if (!adev->ip_block_enabled[i]) | 1234 | if (!adev->ip_block_status[i].valid) |
1226 | continue; | 1235 | continue; |
1227 | r = adev->ip_blocks[i].funcs->sw_init((void *)adev); | 1236 | r = adev->ip_blocks[i].funcs->sw_init((void *)adev); |
1228 | if (r) | 1237 | if (r) |
1229 | return r; | 1238 | return r; |
1239 | adev->ip_block_status[i].sw = true; | ||
1230 | /* need to do gmc hw init early so we can allocate gpu mem */ | 1240 | /* need to do gmc hw init early so we can allocate gpu mem */ |
1231 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1241 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { |
1232 | r = amdgpu_vram_scratch_init(adev); | 1242 | r = amdgpu_vram_scratch_init(adev); |
@@ -1238,11 +1248,12 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1238 | r = amdgpu_wb_init(adev); | 1248 | r = amdgpu_wb_init(adev); |
1239 | if (r) | 1249 | if (r) |
1240 | return r; | 1250 | return r; |
1251 | adev->ip_block_status[i].hw = true; | ||
1241 | } | 1252 | } |
1242 | } | 1253 | } |
1243 | 1254 | ||
1244 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1255 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1245 | if (!adev->ip_block_enabled[i]) | 1256 | if (!adev->ip_block_status[i].sw) |
1246 | continue; | 1257 | continue; |
1247 | /* gmc hw init is done early */ | 1258 | /* gmc hw init is done early */ |
1248 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) | 1259 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) |
@@ -1250,6 +1261,7 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1250 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); | 1261 | r = adev->ip_blocks[i].funcs->hw_init((void *)adev); |
1251 | if (r) | 1262 | if (r) |
1252 | return r; | 1263 | return r; |
1264 | adev->ip_block_status[i].hw = true; | ||
1253 | } | 1265 | } |
1254 | 1266 | ||
1255 | return 0; | 1267 | return 0; |
@@ -1260,7 +1272,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev) | |||
1260 | int i = 0, r; | 1272 | int i = 0, r; |
1261 | 1273 | ||
1262 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1274 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1263 | if (!adev->ip_block_enabled[i]) | 1275 | if (!adev->ip_block_status[i].valid) |
1264 | continue; | 1276 | continue; |
1265 | /* enable clockgating to save power */ | 1277 | /* enable clockgating to save power */ |
1266 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1278 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, |
@@ -1282,7 +1294,7 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1282 | int i, r; | 1294 | int i, r; |
1283 | 1295 | ||
1284 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1296 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1285 | if (!adev->ip_block_enabled[i]) | 1297 | if (!adev->ip_block_status[i].hw) |
1286 | continue; | 1298 | continue; |
1287 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { | 1299 | if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { |
1288 | amdgpu_wb_fini(adev); | 1300 | amdgpu_wb_fini(adev); |
@@ -1295,14 +1307,16 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1295 | return r; | 1307 | return r; |
1296 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); | 1308 | r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); |
1297 | /* XXX handle errors */ | 1309 | /* XXX handle errors */ |
1310 | adev->ip_block_status[i].hw = false; | ||
1298 | } | 1311 | } |
1299 | 1312 | ||
1300 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1313 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1301 | if (!adev->ip_block_enabled[i]) | 1314 | if (!adev->ip_block_status[i].sw) |
1302 | continue; | 1315 | continue; |
1303 | r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); | 1316 | r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); |
1304 | /* XXX handle errors */ | 1317 | /* XXX handle errors */ |
1305 | adev->ip_block_enabled[i] = false; | 1318 | adev->ip_block_status[i].sw = false; |
1319 | adev->ip_block_status[i].valid = false; | ||
1306 | } | 1320 | } |
1307 | 1321 | ||
1308 | return 0; | 1322 | return 0; |
@@ -1313,7 +1327,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev) | |||
1313 | int i, r; | 1327 | int i, r; |
1314 | 1328 | ||
1315 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | 1329 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { |
1316 | if (!adev->ip_block_enabled[i]) | 1330 | if (!adev->ip_block_status[i].valid) |
1317 | continue; | 1331 | continue; |
1318 | /* ungate blocks so that suspend can properly shut them down */ | 1332 | /* ungate blocks so that suspend can properly shut them down */ |
1319 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, | 1333 | r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, |
@@ -1331,7 +1345,7 @@ static int amdgpu_resume(struct amdgpu_device *adev) | |||
1331 | int i, r; | 1345 | int i, r; |
1332 | 1346 | ||
1333 | for (i = 0; i < adev->num_ip_blocks; i++) { | 1347 | for (i = 0; i < adev->num_ip_blocks; i++) { |
1334 | if (!adev->ip_block_enabled[i]) | 1348 | if (!adev->ip_block_status[i].valid) |
1335 | continue; | 1349 | continue; |
1336 | r = adev->ip_blocks[i].funcs->resume(adev); | 1350 | r = adev->ip_blocks[i].funcs->resume(adev); |
1337 | if (r) | 1351 | if (r) |
@@ -1366,7 +1380,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1366 | adev->ddev = ddev; | 1380 | adev->ddev = ddev; |
1367 | adev->pdev = pdev; | 1381 | adev->pdev = pdev; |
1368 | adev->flags = flags; | 1382 | adev->flags = flags; |
1369 | adev->asic_type = flags & AMDGPU_ASIC_MASK; | 1383 | adev->asic_type = flags & AMD_ASIC_MASK; |
1370 | adev->is_atom_bios = false; | 1384 | adev->is_atom_bios = false; |
1371 | adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; | 1385 | adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; |
1372 | adev->mc.gtt_size = 512 * 1024 * 1024; | 1386 | adev->mc.gtt_size = 512 * 1024 * 1024; |
@@ -1512,6 +1526,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1512 | return r; | 1526 | return r; |
1513 | } | 1527 | } |
1514 | 1528 | ||
1529 | r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); | ||
1530 | if (r) { | ||
1531 | dev_err(adev->dev, "failed to create kernel context (%d).\n", r); | ||
1532 | return r; | ||
1533 | } | ||
1515 | r = amdgpu_ib_ring_tests(adev); | 1534 | r = amdgpu_ib_ring_tests(adev); |
1516 | if (r) | 1535 | if (r) |
1517 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1536 | DRM_ERROR("ib ring test failed (%d).\n", r); |
@@ -1573,12 +1592,13 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
1573 | adev->shutdown = true; | 1592 | adev->shutdown = true; |
1574 | /* evict vram memory */ | 1593 | /* evict vram memory */ |
1575 | amdgpu_bo_evict_vram(adev); | 1594 | amdgpu_bo_evict_vram(adev); |
1595 | amdgpu_ctx_fini(&adev->kernel_ctx); | ||
1576 | amdgpu_ib_pool_fini(adev); | 1596 | amdgpu_ib_pool_fini(adev); |
1577 | amdgpu_fence_driver_fini(adev); | 1597 | amdgpu_fence_driver_fini(adev); |
1578 | amdgpu_fbdev_fini(adev); | 1598 | amdgpu_fbdev_fini(adev); |
1579 | r = amdgpu_fini(adev); | 1599 | r = amdgpu_fini(adev); |
1580 | kfree(adev->ip_block_enabled); | 1600 | kfree(adev->ip_block_status); |
1581 | adev->ip_block_enabled = NULL; | 1601 | adev->ip_block_status = NULL; |
1582 | adev->accel_working = false; | 1602 | adev->accel_working = false; |
1583 | /* free i2c buses */ | 1603 | /* free i2c buses */ |
1584 | amdgpu_i2c_fini(adev); | 1604 | amdgpu_i2c_fini(adev); |
@@ -1616,8 +1636,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1616 | struct amdgpu_device *adev; | 1636 | struct amdgpu_device *adev; |
1617 | struct drm_crtc *crtc; | 1637 | struct drm_crtc *crtc; |
1618 | struct drm_connector *connector; | 1638 | struct drm_connector *connector; |
1619 | int i, r; | 1639 | int r; |
1620 | bool force_completion = false; | ||
1621 | 1640 | ||
1622 | if (dev == NULL || dev->dev_private == NULL) { | 1641 | if (dev == NULL || dev->dev_private == NULL) { |
1623 | return -ENODEV; | 1642 | return -ENODEV; |
@@ -1656,21 +1675,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) | |||
1656 | /* evict vram memory */ | 1675 | /* evict vram memory */ |
1657 | amdgpu_bo_evict_vram(adev); | 1676 | amdgpu_bo_evict_vram(adev); |
1658 | 1677 | ||
1659 | /* wait for gpu to finish processing current batch */ | 1678 | amdgpu_fence_driver_suspend(adev); |
1660 | for (i = 0; i < AMDGPU_MAX_RINGS; i++) { | ||
1661 | struct amdgpu_ring *ring = adev->rings[i]; | ||
1662 | if (!ring) | ||
1663 | continue; | ||
1664 | |||
1665 | r = amdgpu_fence_wait_empty(ring); | ||
1666 | if (r) { | ||
1667 | /* delay GPU reset to resume */ | ||
1668 | force_completion = true; | ||
1669 | } | ||
1670 | } | ||
1671 | if (force_completion) { | ||
1672 | amdgpu_fence_driver_force_completion(adev); | ||
1673 | } | ||
1674 | 1679 | ||
1675 | r = amdgpu_suspend(adev); | 1680 | r = amdgpu_suspend(adev); |
1676 | 1681 | ||
@@ -1728,6 +1733,8 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) | |||
1728 | 1733 | ||
1729 | r = amdgpu_resume(adev); | 1734 | r = amdgpu_resume(adev); |
1730 | 1735 | ||
1736 | amdgpu_fence_driver_resume(adev); | ||
1737 | |||
1731 | r = amdgpu_ib_ring_tests(adev); | 1738 | r = amdgpu_ib_ring_tests(adev); |
1732 | if (r) | 1739 | if (r) |
1733 | DRM_ERROR("ib ring test failed (%d).\n", r); | 1740 | DRM_ERROR("ib ring test failed (%d).\n", r); |