diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 159 |
1 files changed, 146 insertions, 13 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ea5c9af722ef..56b02927cd3d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -122,6 +122,94 @@ u32 r600_get_xclk(struct radeon_device *rdev) | |||
122 | 122 | ||
123 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) | 123 | int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) |
124 | { | 124 | { |
125 | unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; | ||
126 | int r; | ||
127 | |||
128 | /* bypass vclk and dclk with bclk */ | ||
129 | WREG32_P(CG_UPLL_FUNC_CNTL_2, | ||
130 | VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), | ||
131 | ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); | ||
132 | |||
133 | /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ | ||
134 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( | ||
135 | UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); | ||
136 | |||
137 | if (rdev->family >= CHIP_RS780) | ||
138 | WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, | ||
139 | ~UPLL_BYPASS_CNTL); | ||
140 | |||
141 | if (!vclk || !dclk) { | ||
142 | /* keep the Bypass mode, put PLL to sleep */ | ||
143 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | if (rdev->clock.spll.reference_freq == 10000) | ||
148 | ref_div = 34; | ||
149 | else | ||
150 | ref_div = 4; | ||
151 | |||
152 | r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, | ||
153 | ref_div + 1, 0xFFF, 2, 30, ~0, | ||
154 | &fb_div, &vclk_div, &dclk_div); | ||
155 | if (r) | ||
156 | return r; | ||
157 | |||
158 | if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) | ||
159 | fb_div >>= 1; | ||
160 | else | ||
161 | fb_div |= 1; | ||
162 | |||
163 | r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); | ||
164 | if (r) | ||
165 | return r; | ||
166 | |||
167 | /* assert PLL_RESET */ | ||
168 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); | ||
169 | |||
170 | /* For RS780 we have to choose ref clk */ | ||
171 | if (rdev->family >= CHIP_RS780) | ||
172 | WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, | ||
173 | ~UPLL_REFCLK_SRC_SEL_MASK); | ||
174 | |||
175 | /* set the required fb, ref and post divder values */ | ||
176 | WREG32_P(CG_UPLL_FUNC_CNTL, | ||
177 | UPLL_FB_DIV(fb_div) | | ||
178 | UPLL_REF_DIV(ref_div), | ||
179 | ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); | ||
180 | WREG32_P(CG_UPLL_FUNC_CNTL_2, | ||
181 | UPLL_SW_HILEN(vclk_div >> 1) | | ||
182 | UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | | ||
183 | UPLL_SW_HILEN2(dclk_div >> 1) | | ||
184 | UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | | ||
185 | UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, | ||
186 | ~UPLL_SW_MASK); | ||
187 | |||
188 | /* give the PLL some time to settle */ | ||
189 | mdelay(15); | ||
190 | |||
191 | /* deassert PLL_RESET */ | ||
192 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); | ||
193 | |||
194 | mdelay(15); | ||
195 | |||
196 | /* deassert BYPASS EN */ | ||
197 | WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); | ||
198 | |||
199 | if (rdev->family >= CHIP_RS780) | ||
200 | WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); | ||
201 | |||
202 | r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); | ||
203 | if (r) | ||
204 | return r; | ||
205 | |||
206 | /* switch VCLK and DCLK selection */ | ||
207 | WREG32_P(CG_UPLL_FUNC_CNTL_2, | ||
208 | VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), | ||
209 | ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); | ||
210 | |||
211 | mdelay(100); | ||
212 | |||
125 | return 0; | 213 | return 0; |
126 | } | 214 | } |
127 | 215 | ||
@@ -992,6 +1080,8 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
992 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); | 1080 | WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); |
993 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); | 1081 | WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); |
994 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); | 1082 | WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); |
1083 | WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); | ||
1084 | WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); | ||
995 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 1085 | WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
996 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); | 1086 | WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); |
997 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); | 1087 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); |
@@ -1042,6 +1132,8 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
1042 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); | 1132 | WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); |
1043 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | 1133 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
1044 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | 1134 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
1135 | WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); | ||
1136 | WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); | ||
1045 | radeon_gart_table_vram_unpin(rdev); | 1137 | radeon_gart_table_vram_unpin(rdev); |
1046 | } | 1138 | } |
1047 | 1139 | ||
@@ -1338,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) | |||
1338 | if (rdev->vram_scratch.robj == NULL) { | 1430 | if (rdev->vram_scratch.robj == NULL) { |
1339 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, | 1431 | r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, |
1340 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, | 1432 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, |
1341 | 0, NULL, &rdev->vram_scratch.robj); | 1433 | 0, NULL, NULL, &rdev->vram_scratch.robj); |
1342 | if (r) { | 1434 | if (r) { |
1343 | return r; | 1435 | return r; |
1344 | } | 1436 | } |
@@ -2792,12 +2884,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, | |||
2792 | * Used by the radeon ttm implementation to move pages if | 2884 | * Used by the radeon ttm implementation to move pages if |
2793 | * registered as the asic copy callback. | 2885 | * registered as the asic copy callback. |
2794 | */ | 2886 | */ |
2795 | int r600_copy_cpdma(struct radeon_device *rdev, | 2887 | struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, |
2796 | uint64_t src_offset, uint64_t dst_offset, | 2888 | uint64_t src_offset, uint64_t dst_offset, |
2797 | unsigned num_gpu_pages, | 2889 | unsigned num_gpu_pages, |
2798 | struct radeon_fence **fence) | 2890 | struct reservation_object *resv) |
2799 | { | 2891 | { |
2800 | struct radeon_semaphore *sem = NULL; | 2892 | struct radeon_semaphore *sem = NULL; |
2893 | struct radeon_fence *fence; | ||
2801 | int ring_index = rdev->asic->copy.blit_ring_index; | 2894 | int ring_index = rdev->asic->copy.blit_ring_index; |
2802 | struct radeon_ring *ring = &rdev->ring[ring_index]; | 2895 | struct radeon_ring *ring = &rdev->ring[ring_index]; |
2803 | u32 size_in_bytes, cur_size_in_bytes, tmp; | 2896 | u32 size_in_bytes, cur_size_in_bytes, tmp; |
@@ -2807,7 +2900,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
2807 | r = radeon_semaphore_create(rdev, &sem); | 2900 | r = radeon_semaphore_create(rdev, &sem); |
2808 | if (r) { | 2901 | if (r) { |
2809 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 2902 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
2810 | return r; | 2903 | return ERR_PTR(r); |
2811 | } | 2904 | } |
2812 | 2905 | ||
2813 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); | 2906 | size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); |
@@ -2816,10 +2909,10 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
2816 | if (r) { | 2909 | if (r) { |
2817 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 2910 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
2818 | radeon_semaphore_free(rdev, &sem, NULL); | 2911 | radeon_semaphore_free(rdev, &sem, NULL); |
2819 | return r; | 2912 | return ERR_PTR(r); |
2820 | } | 2913 | } |
2821 | 2914 | ||
2822 | radeon_semaphore_sync_to(sem, *fence); | 2915 | radeon_semaphore_sync_resv(rdev, sem, resv, false); |
2823 | radeon_semaphore_sync_rings(rdev, sem, ring->idx); | 2916 | radeon_semaphore_sync_rings(rdev, sem, ring->idx); |
2824 | 2917 | ||
2825 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2918 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
@@ -2846,17 +2939,17 @@ int r600_copy_cpdma(struct radeon_device *rdev, | |||
2846 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2939 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2847 | radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); | 2940 | radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); |
2848 | 2941 | ||
2849 | r = radeon_fence_emit(rdev, fence, ring->idx); | 2942 | r = radeon_fence_emit(rdev, &fence, ring->idx); |
2850 | if (r) { | 2943 | if (r) { |
2851 | radeon_ring_unlock_undo(rdev, ring); | 2944 | radeon_ring_unlock_undo(rdev, ring); |
2852 | radeon_semaphore_free(rdev, &sem, NULL); | 2945 | radeon_semaphore_free(rdev, &sem, NULL); |
2853 | return r; | 2946 | return ERR_PTR(r); |
2854 | } | 2947 | } |
2855 | 2948 | ||
2856 | radeon_ring_unlock_commit(rdev, ring, false); | 2949 | radeon_ring_unlock_commit(rdev, ring, false); |
2857 | radeon_semaphore_free(rdev, &sem, *fence); | 2950 | radeon_semaphore_free(rdev, &sem, fence); |
2858 | 2951 | ||
2859 | return r; | 2952 | return fence; |
2860 | } | 2953 | } |
2861 | 2954 | ||
2862 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 2955 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
@@ -2907,6 +3000,18 @@ static int r600_startup(struct radeon_device *rdev) | |||
2907 | return r; | 3000 | return r; |
2908 | } | 3001 | } |
2909 | 3002 | ||
3003 | if (rdev->has_uvd) { | ||
3004 | r = uvd_v1_0_resume(rdev); | ||
3005 | if (!r) { | ||
3006 | r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); | ||
3007 | if (r) { | ||
3008 | dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); | ||
3009 | } | ||
3010 | } | ||
3011 | if (r) | ||
3012 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; | ||
3013 | } | ||
3014 | |||
2910 | /* Enable IRQ */ | 3015 | /* Enable IRQ */ |
2911 | if (!rdev->irq.installed) { | 3016 | if (!rdev->irq.installed) { |
2912 | r = radeon_irq_kms_init(rdev); | 3017 | r = radeon_irq_kms_init(rdev); |
@@ -2935,6 +3040,18 @@ static int r600_startup(struct radeon_device *rdev) | |||
2935 | if (r) | 3040 | if (r) |
2936 | return r; | 3041 | return r; |
2937 | 3042 | ||
3043 | if (rdev->has_uvd) { | ||
3044 | ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | ||
3045 | if (ring->ring_size) { | ||
3046 | r = radeon_ring_init(rdev, ring, ring->ring_size, 0, | ||
3047 | RADEON_CP_PACKET2); | ||
3048 | if (!r) | ||
3049 | r = uvd_v1_0_init(rdev); | ||
3050 | if (r) | ||
3051 | DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); | ||
3052 | } | ||
3053 | } | ||
3054 | |||
2938 | r = radeon_ib_pool_init(rdev); | 3055 | r = radeon_ib_pool_init(rdev); |
2939 | if (r) { | 3056 | if (r) { |
2940 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | 3057 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
@@ -2994,6 +3111,10 @@ int r600_suspend(struct radeon_device *rdev) | |||
2994 | radeon_pm_suspend(rdev); | 3111 | radeon_pm_suspend(rdev); |
2995 | r600_audio_fini(rdev); | 3112 | r600_audio_fini(rdev); |
2996 | r600_cp_stop(rdev); | 3113 | r600_cp_stop(rdev); |
3114 | if (rdev->has_uvd) { | ||
3115 | uvd_v1_0_fini(rdev); | ||
3116 | radeon_uvd_suspend(rdev); | ||
3117 | } | ||
2997 | r600_irq_suspend(rdev); | 3118 | r600_irq_suspend(rdev); |
2998 | radeon_wb_disable(rdev); | 3119 | radeon_wb_disable(rdev); |
2999 | r600_pcie_gart_disable(rdev); | 3120 | r600_pcie_gart_disable(rdev); |
@@ -3073,6 +3194,14 @@ int r600_init(struct radeon_device *rdev) | |||
3073 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 3194 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
3074 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 3195 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
3075 | 3196 | ||
3197 | if (rdev->has_uvd) { | ||
3198 | r = radeon_uvd_init(rdev); | ||
3199 | if (!r) { | ||
3200 | rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; | ||
3201 | r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); | ||
3202 | } | ||
3203 | } | ||
3204 | |||
3076 | rdev->ih.ring_obj = NULL; | 3205 | rdev->ih.ring_obj = NULL; |
3077 | r600_ih_ring_init(rdev, 64 * 1024); | 3206 | r600_ih_ring_init(rdev, 64 * 1024); |
3078 | 3207 | ||
@@ -3102,6 +3231,10 @@ void r600_fini(struct radeon_device *rdev) | |||
3102 | r600_audio_fini(rdev); | 3231 | r600_audio_fini(rdev); |
3103 | r600_cp_fini(rdev); | 3232 | r600_cp_fini(rdev); |
3104 | r600_irq_fini(rdev); | 3233 | r600_irq_fini(rdev); |
3234 | if (rdev->has_uvd) { | ||
3235 | uvd_v1_0_fini(rdev); | ||
3236 | radeon_uvd_fini(rdev); | ||
3237 | } | ||
3105 | radeon_wb_fini(rdev); | 3238 | radeon_wb_fini(rdev); |
3106 | radeon_ib_pool_fini(rdev); | 3239 | radeon_ib_pool_fini(rdev); |
3107 | radeon_irq_kms_fini(rdev); | 3240 | radeon_irq_kms_fini(rdev); |
@@ -3235,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) | |||
3235 | r = radeon_bo_create(rdev, rdev->ih.ring_size, | 3368 | r = radeon_bo_create(rdev, rdev->ih.ring_size, |
3236 | PAGE_SIZE, true, | 3369 | PAGE_SIZE, true, |
3237 | RADEON_GEM_DOMAIN_GTT, 0, | 3370 | RADEON_GEM_DOMAIN_GTT, 0, |
3238 | NULL, &rdev->ih.ring_obj); | 3371 | NULL, NULL, &rdev->ih.ring_obj); |
3239 | if (r) { | 3372 | if (r) { |
3240 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); | 3373 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); |
3241 | return r; | 3374 | return r; |