aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c60
1 files changed, 34 insertions, 26 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c70a504d96af..ea5c9af722ef 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)
1812{ 1812{
1813 u32 tiling_config; 1813 u32 tiling_config;
1814 u32 ramcfg; 1814 u32 ramcfg;
1815 u32 cc_rb_backend_disable;
1816 u32 cc_gc_shader_pipe_config; 1815 u32 cc_gc_shader_pipe_config;
1817 u32 tmp; 1816 u32 tmp;
1818 int i, j; 1817 int i, j;
@@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)
1939 } 1938 }
1940 tiling_config |= BANK_SWAPS(1); 1939 tiling_config |= BANK_SWAPS(1);
1941 1940
1942 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1943 tmp = R6XX_MAX_BACKENDS -
1944 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1945 if (tmp < rdev->config.r600.max_backends) {
1946 rdev->config.r600.max_backends = tmp;
1947 }
1948
1949 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; 1941 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1950 tmp = R6XX_MAX_PIPES -
1951 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1952 if (tmp < rdev->config.r600.max_pipes) {
1953 rdev->config.r600.max_pipes = tmp;
1954 }
1955 tmp = R6XX_MAX_SIMDS -
1956 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1957 if (tmp < rdev->config.r600.max_simds) {
1958 rdev->config.r600.max_simds = tmp;
1959 }
1960 tmp = rdev->config.r600.max_simds - 1942 tmp = rdev->config.r600.max_simds -
1961 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); 1943 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1962 rdev->config.r600.active_simds = tmp; 1944 rdev->config.r600.active_simds = tmp;
1963 1945
1964 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; 1946 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1947 tmp = 0;
1948 for (i = 0; i < rdev->config.r600.max_backends; i++)
1949 tmp |= (1 << i);
1950 /* if all the backends are disabled, fix it up here */
1951 if ((disabled_rb_mask & tmp) == tmp) {
1952 for (i = 0; i < rdev->config.r600.max_backends; i++)
1953 disabled_rb_mask &= ~(1 << i);
1954 }
1965 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; 1955 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1966 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, 1956 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1967 R6XX_MAX_BACKENDS, disabled_rb_mask); 1957 R6XX_MAX_BACKENDS, disabled_rb_mask);
@@ -2547,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev)
2547 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2537 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2548 radeon_ring_write(ring, 0); 2538 radeon_ring_write(ring, 0);
2549 radeon_ring_write(ring, 0); 2539 radeon_ring_write(ring, 0);
2550 radeon_ring_unlock_commit(rdev, ring); 2540 radeon_ring_unlock_commit(rdev, ring, false);
2551 2541
2552 cp_me = 0xff; 2542 cp_me = 0xff;
2553 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2543 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2683,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2683 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2673 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2684 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2674 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2685 radeon_ring_write(ring, 0xDEADBEEF); 2675 radeon_ring_write(ring, 0xDEADBEEF);
2686 radeon_ring_unlock_commit(rdev, ring); 2676 radeon_ring_unlock_commit(rdev, ring, false);
2687 for (i = 0; i < rdev->usec_timeout; i++) { 2677 for (i = 0; i < rdev->usec_timeout; i++) {
2688 tmp = RREG32(scratch); 2678 tmp = RREG32(scratch);
2689 if (tmp == 0xDEADBEEF) 2679 if (tmp == 0xDEADBEEF)
@@ -2753,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
2753 } 2743 }
2754} 2744}
2755 2745
2746/**
2747 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2748 *
2749 * @rdev: radeon_device pointer
2750 * @ring: radeon ring buffer object
2751 * @semaphore: radeon semaphore object
2752 * @emit_wait: Is this a sempahore wait?
2753 *
2754 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2755 * from running ahead of semaphore waits.
2756 */
2756bool r600_semaphore_ring_emit(struct radeon_device *rdev, 2757bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2757 struct radeon_ring *ring, 2758 struct radeon_ring *ring,
2758 struct radeon_semaphore *semaphore, 2759 struct radeon_semaphore *semaphore,
@@ -2768,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,
2768 radeon_ring_write(ring, lower_32_bits(addr)); 2769 radeon_ring_write(ring, lower_32_bits(addr));
2769 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2770 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2770 2771
2772 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2773 if (emit_wait && (rdev->family >= CHIP_CEDAR)) {
2774 /* Prevent the PFP from running ahead of the semaphore wait */
2775 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
2776 radeon_ring_write(ring, 0x0);
2777 }
2778
2771 return true; 2779 return true;
2772} 2780}
2773 2781
@@ -2845,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
2845 return r; 2853 return r;
2846 } 2854 }
2847 2855
2848 radeon_ring_unlock_commit(rdev, ring); 2856 radeon_ring_unlock_commit(rdev, ring, false);
2849 radeon_semaphore_free(rdev, &sem, *fence); 2857 radeon_semaphore_free(rdev, &sem, *fence);
2850 2858
2851 return r; 2859 return r;
@@ -3165,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3165 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3173 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
3166 ib.ptr[2] = 0xDEADBEEF; 3174 ib.ptr[2] = 0xDEADBEEF;
3167 ib.length_dw = 3; 3175 ib.length_dw = 3;
3168 r = radeon_ib_schedule(rdev, &ib, NULL); 3176 r = radeon_ib_schedule(rdev, &ib, NULL, false);
3169 if (r) { 3177 if (r) {
3170 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3178 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3171 goto free_ib; 3179 goto free_ib;
@@ -3784,17 +3792,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
3784 wptr = RREG32(IH_RB_WPTR); 3792 wptr = RREG32(IH_RB_WPTR);
3785 3793
3786 if (wptr & RB_OVERFLOW) { 3794 if (wptr & RB_OVERFLOW) {
3795 wptr &= ~RB_OVERFLOW;
3787 /* When a ring buffer overflow happen start parsing interrupt 3796 /* When a ring buffer overflow happen start parsing interrupt
3788 * from the last not overwritten vector (wptr + 16). Hopefully 3797 * from the last not overwritten vector (wptr + 16). Hopefully
3789 * this should allow us to catchup. 3798 * this should allow us to catchup.
3790 */ 3799 */
3791 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 3800 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
3792 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 3801 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
3793 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 3802 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3794 tmp = RREG32(IH_RB_CNTL); 3803 tmp = RREG32(IH_RB_CNTL);
3795 tmp |= IH_WPTR_OVERFLOW_CLEAR; 3804 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3796 WREG32(IH_RB_CNTL, tmp); 3805 WREG32(IH_RB_CNTL, tmp);
3797 wptr &= ~RB_OVERFLOW;
3798 } 3806 }
3799 return (wptr & rdev->ih.ptr_mask); 3807 return (wptr & rdev->ih.ptr_mask);
3800} 3808}
@@ -4040,6 +4048,7 @@ restart_ih:
4040 /* wptr/rptr are in bytes! */ 4048 /* wptr/rptr are in bytes! */
4041 rptr += 16; 4049 rptr += 16;
4042 rptr &= rdev->ih.ptr_mask; 4050 rptr &= rdev->ih.ptr_mask;
4051 WREG32(IH_RB_RPTR, rptr);
4043 } 4052 }
4044 if (queue_hotplug) 4053 if (queue_hotplug)
4045 schedule_work(&rdev->hotplug_work); 4054 schedule_work(&rdev->hotplug_work);
@@ -4048,7 +4057,6 @@ restart_ih:
4048 if (queue_thermal && rdev->pm.dpm_enabled) 4057 if (queue_thermal && rdev->pm.dpm_enabled)
4049 schedule_work(&rdev->pm.dpm.thermal.work); 4058 schedule_work(&rdev->pm.dpm.thermal.work);
4050 rdev->ih.rptr = rptr; 4059 rdev->ih.rptr = rptr;
4051 WREG32(IH_RB_RPTR, rdev->ih.rptr);
4052 atomic_set(&rdev->ih.lock, 0); 4060 atomic_set(&rdev->ih.lock, 0);
4053 4061
4054 /* make sure wptr hasn't changed while processing */ 4062 /* make sure wptr hasn't changed while processing */