diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 116 |
1 files changed, 37 insertions, 79 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 927509ff349a..fbce58b2cd04 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1918,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1918 | void r600_cp_stop(struct radeon_device *rdev) | 1918 | void r600_cp_stop(struct radeon_device *rdev) |
1919 | { | 1919 | { |
1920 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1920 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1921 | WREG32(SCRATCH_UMSK, 0); | ||
1921 | } | 1922 | } |
1922 | 1923 | ||
1923 | int r600_init_microcode(struct radeon_device *rdev) | 1924 | int r600_init_microcode(struct radeon_device *rdev) |
@@ -2150,7 +2151,7 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2150 | 2151 | ||
2151 | /* Set ring buffer size */ | 2152 | /* Set ring buffer size */ |
2152 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 2153 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
2153 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 2154 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
2154 | #ifdef __BIG_ENDIAN | 2155 | #ifdef __BIG_ENDIAN |
2155 | tmp |= BUF_SWAP_32BIT; | 2156 | tmp |= BUF_SWAP_32BIT; |
2156 | #endif | 2157 | #endif |
@@ -2164,8 +2165,19 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2164 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2165 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2165 | WREG32(CP_RB_RPTR_WR, 0); | 2166 | WREG32(CP_RB_RPTR_WR, 0); |
2166 | WREG32(CP_RB_WPTR, 0); | 2167 | WREG32(CP_RB_WPTR, 0); |
2167 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 2168 | |
2168 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 2169 | /* set the wb address whether it's enabled or not */ |
2170 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | ||
2171 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
2172 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
2173 | |||
2174 | if (rdev->wb.enabled) | ||
2175 | WREG32(SCRATCH_UMSK, 0xff); | ||
2176 | else { | ||
2177 | tmp |= RB_NO_UPDATE; | ||
2178 | WREG32(SCRATCH_UMSK, 0); | ||
2179 | } | ||
2180 | |||
2169 | mdelay(1); | 2181 | mdelay(1); |
2170 | WREG32(CP_RB_CNTL, tmp); | 2182 | WREG32(CP_RB_CNTL, tmp); |
2171 | 2183 | ||
@@ -2217,9 +2229,10 @@ void r600_scratch_init(struct radeon_device *rdev) | |||
2217 | int i; | 2229 | int i; |
2218 | 2230 | ||
2219 | rdev->scratch.num_reg = 7; | 2231 | rdev->scratch.num_reg = 7; |
2232 | rdev->scratch.reg_base = SCRATCH_REG0; | ||
2220 | for (i = 0; i < rdev->scratch.num_reg; i++) { | 2233 | for (i = 0; i < rdev->scratch.num_reg; i++) { |
2221 | rdev->scratch.free[i] = true; | 2234 | rdev->scratch.free[i] = true; |
2222 | rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4); | 2235 | rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4); |
2223 | } | 2236 | } |
2224 | } | 2237 | } |
2225 | 2238 | ||
@@ -2263,70 +2276,6 @@ int r600_ring_test(struct radeon_device *rdev) | |||
2263 | return r; | 2276 | return r; |
2264 | } | 2277 | } |
2265 | 2278 | ||
2266 | void r600_wb_disable(struct radeon_device *rdev) | ||
2267 | { | ||
2268 | int r; | ||
2269 | |||
2270 | WREG32(SCRATCH_UMSK, 0); | ||
2271 | if (rdev->wb.wb_obj) { | ||
2272 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2273 | if (unlikely(r != 0)) | ||
2274 | return; | ||
2275 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
2276 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
2277 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2278 | } | ||
2279 | } | ||
2280 | |||
2281 | void r600_wb_fini(struct radeon_device *rdev) | ||
2282 | { | ||
2283 | r600_wb_disable(rdev); | ||
2284 | if (rdev->wb.wb_obj) { | ||
2285 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
2286 | rdev->wb.wb = NULL; | ||
2287 | rdev->wb.wb_obj = NULL; | ||
2288 | } | ||
2289 | } | ||
2290 | |||
2291 | int r600_wb_enable(struct radeon_device *rdev) | ||
2292 | { | ||
2293 | int r; | ||
2294 | |||
2295 | if (rdev->wb.wb_obj == NULL) { | ||
2296 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | ||
2297 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); | ||
2298 | if (r) { | ||
2299 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); | ||
2300 | return r; | ||
2301 | } | ||
2302 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
2303 | if (unlikely(r != 0)) { | ||
2304 | r600_wb_fini(rdev); | ||
2305 | return r; | ||
2306 | } | ||
2307 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
2308 | &rdev->wb.gpu_addr); | ||
2309 | if (r) { | ||
2310 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2311 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
2312 | r600_wb_fini(rdev); | ||
2313 | return r; | ||
2314 | } | ||
2315 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | ||
2316 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
2317 | if (r) { | ||
2318 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); | ||
2319 | r600_wb_fini(rdev); | ||
2320 | return r; | ||
2321 | } | ||
2322 | } | ||
2323 | WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF); | ||
2324 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC); | ||
2325 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF); | ||
2326 | WREG32(SCRATCH_UMSK, 0xff); | ||
2327 | return 0; | ||
2328 | } | ||
2329 | |||
2330 | void r600_fence_ring_emit(struct radeon_device *rdev, | 2279 | void r600_fence_ring_emit(struct radeon_device *rdev, |
2331 | struct radeon_fence *fence) | 2280 | struct radeon_fence *fence) |
2332 | { | 2281 | { |
@@ -2427,6 +2376,11 @@ int r600_startup(struct radeon_device *rdev) | |||
2427 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | 2376 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); |
2428 | } | 2377 | } |
2429 | 2378 | ||
2379 | /* allocate wb buffer */ | ||
2380 | r = radeon_wb_init(rdev); | ||
2381 | if (r) | ||
2382 | return r; | ||
2383 | |||
2430 | /* Enable IRQ */ | 2384 | /* Enable IRQ */ |
2431 | r = r600_irq_init(rdev); | 2385 | r = r600_irq_init(rdev); |
2432 | if (r) { | 2386 | if (r) { |
@@ -2445,8 +2399,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2445 | r = r600_cp_resume(rdev); | 2399 | r = r600_cp_resume(rdev); |
2446 | if (r) | 2400 | if (r) |
2447 | return r; | 2401 | return r; |
2448 | /* write back buffer are not vital so don't worry about failure */ | 2402 | |
2449 | r600_wb_enable(rdev); | ||
2450 | return 0; | 2403 | return 0; |
2451 | } | 2404 | } |
2452 | 2405 | ||
@@ -2505,7 +2458,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
2505 | r600_cp_stop(rdev); | 2458 | r600_cp_stop(rdev); |
2506 | rdev->cp.ready = false; | 2459 | rdev->cp.ready = false; |
2507 | r600_irq_suspend(rdev); | 2460 | r600_irq_suspend(rdev); |
2508 | r600_wb_disable(rdev); | 2461 | radeon_wb_disable(rdev); |
2509 | r600_pcie_gart_disable(rdev); | 2462 | r600_pcie_gart_disable(rdev); |
2510 | /* unpin shaders bo */ | 2463 | /* unpin shaders bo */ |
2511 | if (rdev->r600_blit.shader_obj) { | 2464 | if (rdev->r600_blit.shader_obj) { |
@@ -2602,8 +2555,8 @@ int r600_init(struct radeon_device *rdev) | |||
2602 | if (r) { | 2555 | if (r) { |
2603 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2556 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2604 | r600_cp_fini(rdev); | 2557 | r600_cp_fini(rdev); |
2605 | r600_wb_fini(rdev); | ||
2606 | r600_irq_fini(rdev); | 2558 | r600_irq_fini(rdev); |
2559 | radeon_wb_fini(rdev); | ||
2607 | radeon_irq_kms_fini(rdev); | 2560 | radeon_irq_kms_fini(rdev); |
2608 | r600_pcie_gart_fini(rdev); | 2561 | r600_pcie_gart_fini(rdev); |
2609 | rdev->accel_working = false; | 2562 | rdev->accel_working = false; |
@@ -2633,8 +2586,8 @@ void r600_fini(struct radeon_device *rdev) | |||
2633 | r600_audio_fini(rdev); | 2586 | r600_audio_fini(rdev); |
2634 | r600_blit_fini(rdev); | 2587 | r600_blit_fini(rdev); |
2635 | r600_cp_fini(rdev); | 2588 | r600_cp_fini(rdev); |
2636 | r600_wb_fini(rdev); | ||
2637 | r600_irq_fini(rdev); | 2589 | r600_irq_fini(rdev); |
2590 | radeon_wb_fini(rdev); | ||
2638 | radeon_irq_kms_fini(rdev); | 2591 | radeon_irq_kms_fini(rdev); |
2639 | r600_pcie_gart_fini(rdev); | 2592 | r600_pcie_gart_fini(rdev); |
2640 | radeon_agp_fini(rdev); | 2593 | radeon_agp_fini(rdev); |
@@ -2969,10 +2922,13 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2969 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | 2922 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | |
2970 | IH_WPTR_OVERFLOW_CLEAR | | 2923 | IH_WPTR_OVERFLOW_CLEAR | |
2971 | (rb_bufsz << 1)); | 2924 | (rb_bufsz << 1)); |
2972 | /* WPTR writeback, not yet */ | 2925 | |
2973 | /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ | 2926 | if (rdev->wb.enabled) |
2974 | WREG32(IH_RB_WPTR_ADDR_LO, 0); | 2927 | ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE; |
2975 | WREG32(IH_RB_WPTR_ADDR_HI, 0); | 2928 | |
2929 | /* set the writeback address whether it's enabled or not */ | ||
2930 | WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC); | ||
2931 | WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF); | ||
2976 | 2932 | ||
2977 | WREG32(IH_RB_CNTL, ih_rb_cntl); | 2933 | WREG32(IH_RB_CNTL, ih_rb_cntl); |
2978 | 2934 | ||
@@ -3230,8 +3186,10 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
3230 | { | 3186 | { |
3231 | u32 wptr, tmp; | 3187 | u32 wptr, tmp; |
3232 | 3188 | ||
3233 | /* XXX use writeback */ | 3189 | if (rdev->wb.enabled) |
3234 | wptr = RREG32(IH_RB_WPTR); | 3190 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; |
3191 | else | ||
3192 | wptr = RREG32(IH_RB_WPTR); | ||
3235 | 3193 | ||
3236 | if (wptr & RB_OVERFLOW) { | 3194 | if (wptr & RB_OVERFLOW) { |
3237 | /* When a ring buffer overflow happen start parsing interrupt | 3195 | /* When a ring buffer overflow happen start parsing interrupt |