diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2013-04-26 18:03:44 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2013-05-20 12:09:38 -0400 |
commit | 1df0d523ddb8683e2d5ca1a50ca92f76f908ef20 (patch) | |
tree | 6952ea519c032d4dff8807ec01384f59156c4709 /drivers/gpu | |
parent | 948bee3ff41c226b5c8f7d4a78f5562473a09de6 (diff) |
drm/radeon: sun/hainan chips do not have UVD (v2)
Skip UVD handling on them.
v2: split has_uvd tracking into separate patch
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 67 |
1 files changed, 39 insertions, 28 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 2e0a08617f4a..d708fc9aa318 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2635,9 +2635,11 @@ static void si_gpu_init(struct radeon_device *rdev) | |||
2635 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2635 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2636 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); | 2636 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
2637 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); | 2637 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
2638 | WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); | 2638 | if (rdev->has_uvd) { |
2639 | WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); | 2639 | WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); |
2640 | WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); | 2640 | WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); |
2641 | WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); | ||
2642 | } | ||
2641 | 2643 | ||
2642 | si_tiling_mode_table_init(rdev); | 2644 | si_tiling_mode_table_init(rdev); |
2643 | 2645 | ||
@@ -5213,15 +5215,17 @@ static int si_startup(struct radeon_device *rdev) | |||
5213 | return r; | 5215 | return r; |
5214 | } | 5216 | } |
5215 | 5217 | ||
5216 | r = rv770_uvd_resume(rdev); | 5218 | if (rdev->has_uvd) { |
5217 | if (!r) { | 5219 | r = rv770_uvd_resume(rdev); |
5218 | r = radeon_fence_driver_start_ring(rdev, | 5220 | if (!r) { |
5219 | R600_RING_TYPE_UVD_INDEX); | 5221 | r = radeon_fence_driver_start_ring(rdev, |
5222 | R600_RING_TYPE_UVD_INDEX); | ||
5223 | if (r) | ||
5224 | dev_err(rdev->dev, "UVD fences init error (%d).\n", r); | ||
5225 | } | ||
5220 | if (r) | 5226 | if (r) |
5221 | dev_err(rdev->dev, "UVD fences init error (%d).\n", r); | 5227 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; |
5222 | } | 5228 | } |
5223 | if (r) | ||
5224 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; | ||
5225 | 5229 | ||
5226 | /* Enable IRQ */ | 5230 | /* Enable IRQ */ |
5227 | r = si_irq_init(rdev); | 5231 | r = si_irq_init(rdev); |
@@ -5280,16 +5284,18 @@ static int si_startup(struct radeon_device *rdev) | |||
5280 | if (r) | 5284 | if (r) |
5281 | return r; | 5285 | return r; |
5282 | 5286 | ||
5283 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 5287 | if (rdev->has_uvd) { |
5284 | if (ring->ring_size) { | 5288 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
5285 | r = radeon_ring_init(rdev, ring, ring->ring_size, | 5289 | if (ring->ring_size) { |
5286 | R600_WB_UVD_RPTR_OFFSET, | 5290 | r = radeon_ring_init(rdev, ring, ring->ring_size, |
5287 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, | 5291 | R600_WB_UVD_RPTR_OFFSET, |
5288 | 0, 0xfffff, RADEON_CP_PACKET2); | 5292 | UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, |
5289 | if (!r) | 5293 | 0, 0xfffff, RADEON_CP_PACKET2); |
5290 | r = r600_uvd_init(rdev); | 5294 | if (!r) |
5291 | if (r) | 5295 | r = r600_uvd_init(rdev); |
5292 | DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); | 5296 | if (r) |
5297 | DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); | ||
5298 | } | ||
5293 | } | 5299 | } |
5294 | 5300 | ||
5295 | r = radeon_ib_pool_init(rdev); | 5301 | r = radeon_ib_pool_init(rdev); |
@@ -5338,8 +5344,10 @@ int si_suspend(struct radeon_device *rdev) | |||
5338 | radeon_vm_manager_fini(rdev); | 5344 | radeon_vm_manager_fini(rdev); |
5339 | si_cp_enable(rdev, false); | 5345 | si_cp_enable(rdev, false); |
5340 | cayman_dma_stop(rdev); | 5346 | cayman_dma_stop(rdev); |
5341 | r600_uvd_rbc_stop(rdev); | 5347 | if (rdev->has_uvd) { |
5342 | radeon_uvd_suspend(rdev); | 5348 | r600_uvd_rbc_stop(rdev); |
5349 | radeon_uvd_suspend(rdev); | ||
5350 | } | ||
5343 | si_irq_suspend(rdev); | 5351 | si_irq_suspend(rdev); |
5344 | radeon_wb_disable(rdev); | 5352 | radeon_wb_disable(rdev); |
5345 | si_pcie_gart_disable(rdev); | 5353 | si_pcie_gart_disable(rdev); |
@@ -5427,11 +5435,13 @@ int si_init(struct radeon_device *rdev) | |||
5427 | ring->ring_obj = NULL; | 5435 | ring->ring_obj = NULL; |
5428 | r600_ring_init(rdev, ring, 64 * 1024); | 5436 | r600_ring_init(rdev, ring, 64 * 1024); |
5429 | 5437 | ||
5430 | r = radeon_uvd_init(rdev); | 5438 | if (rdev->has_uvd) { |
5431 | if (!r) { | 5439 | r = radeon_uvd_init(rdev); |
5432 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 5440 | if (!r) { |
5433 | ring->ring_obj = NULL; | 5441 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
5434 | r600_ring_init(rdev, ring, 4096); | 5442 | ring->ring_obj = NULL; |
5443 | r600_ring_init(rdev, ring, 4096); | ||
5444 | } | ||
5435 | } | 5445 | } |
5436 | 5446 | ||
5437 | rdev->ih.ring_obj = NULL; | 5447 | rdev->ih.ring_obj = NULL; |
@@ -5479,7 +5489,8 @@ void si_fini(struct radeon_device *rdev) | |||
5479 | radeon_vm_manager_fini(rdev); | 5489 | radeon_vm_manager_fini(rdev); |
5480 | radeon_ib_pool_fini(rdev); | 5490 | radeon_ib_pool_fini(rdev); |
5481 | radeon_irq_kms_fini(rdev); | 5491 | radeon_irq_kms_fini(rdev); |
5482 | radeon_uvd_fini(rdev); | 5492 | if (rdev->has_uvd) |
5493 | radeon_uvd_fini(rdev); | ||
5483 | si_pcie_gart_fini(rdev); | 5494 | si_pcie_gart_fini(rdev); |
5484 | r600_vram_scratch_fini(rdev); | 5495 | r600_vram_scratch_fini(rdev); |
5485 | radeon_gem_fini(rdev); | 5496 | radeon_gem_fini(rdev); |