diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 79082d4398ae..e47d221e24ac 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -731,7 +731,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
731 | 731 | ||
732 | /* Set ring buffer size */ | 732 | /* Set ring buffer size */ |
733 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 733 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); |
734 | tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 734 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
735 | #ifdef __BIG_ENDIAN | 735 | #ifdef __BIG_ENDIAN |
736 | tmp |= BUF_SWAP_32BIT; | 736 | tmp |= BUF_SWAP_32BIT; |
737 | #endif | 737 | #endif |
@@ -745,8 +745,19 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
745 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 745 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
746 | WREG32(CP_RB_RPTR_WR, 0); | 746 | WREG32(CP_RB_RPTR_WR, 0); |
747 | WREG32(CP_RB_WPTR, 0); | 747 | WREG32(CP_RB_WPTR, 0); |
748 | WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF); | 748 | |
749 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr)); | 749 | /* set the wb address wether it's enabled or not */ |
750 | WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | ||
751 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | ||
752 | WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); | ||
753 | |||
754 | if (rdev->wb.enabled) | ||
755 | WREG32(SCRATCH_UMSK, 0xff); | ||
756 | else { | ||
757 | tmp |= RB_NO_UPDATE; | ||
758 | WREG32(SCRATCH_UMSK, 0); | ||
759 | } | ||
760 | |||
750 | mdelay(1); | 761 | mdelay(1); |
751 | WREG32(CP_RB_CNTL, tmp); | 762 | WREG32(CP_RB_CNTL, tmp); |
752 | 763 | ||
@@ -1759,8 +1770,10 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev) | |||
1759 | { | 1770 | { |
1760 | u32 wptr, tmp; | 1771 | u32 wptr, tmp; |
1761 | 1772 | ||
1762 | /* XXX use writeback */ | 1773 | if (rdev->wb.enabled) |
1763 | wptr = RREG32(IH_RB_WPTR); | 1774 | wptr = rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]; |
1775 | else | ||
1776 | wptr = RREG32(IH_RB_WPTR); | ||
1764 | 1777 | ||
1765 | if (wptr & RB_OVERFLOW) { | 1778 | if (wptr & RB_OVERFLOW) { |
1766 | /* When a ring buffer overflow happen start parsing interrupt | 1779 | /* When a ring buffer overflow happen start parsing interrupt |
@@ -2068,6 +2081,11 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2068 | } | 2081 | } |
2069 | #endif | 2082 | #endif |
2070 | 2083 | ||
2084 | /* allocate wb buffer */ | ||
2085 | r = radeon_wb_init(rdev); | ||
2086 | if (r) | ||
2087 | return r; | ||
2088 | |||
2071 | /* Enable IRQ */ | 2089 | /* Enable IRQ */ |
2072 | r = r600_irq_init(rdev); | 2090 | r = r600_irq_init(rdev); |
2073 | if (r) { | 2091 | if (r) { |
@@ -2086,8 +2104,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2086 | r = evergreen_cp_resume(rdev); | 2104 | r = evergreen_cp_resume(rdev); |
2087 | if (r) | 2105 | if (r) |
2088 | return r; | 2106 | return r; |
2089 | /* write back buffer are not vital so don't worry about failure */ | ||
2090 | r600_wb_enable(rdev); | ||
2091 | 2107 | ||
2092 | return 0; | 2108 | return 0; |
2093 | } | 2109 | } |
@@ -2128,7 +2144,7 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
2128 | r700_cp_stop(rdev); | 2144 | r700_cp_stop(rdev); |
2129 | rdev->cp.ready = false; | 2145 | rdev->cp.ready = false; |
2130 | evergreen_irq_suspend(rdev); | 2146 | evergreen_irq_suspend(rdev); |
2131 | r600_wb_disable(rdev); | 2147 | radeon_wb_disable(rdev); |
2132 | evergreen_pcie_gart_disable(rdev); | 2148 | evergreen_pcie_gart_disable(rdev); |
2133 | #if 0 | 2149 | #if 0 |
2134 | /* unpin shaders bo */ | 2150 | /* unpin shaders bo */ |
@@ -2245,8 +2261,8 @@ int evergreen_init(struct radeon_device *rdev) | |||
2245 | if (r) { | 2261 | if (r) { |
2246 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2262 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2247 | r700_cp_fini(rdev); | 2263 | r700_cp_fini(rdev); |
2248 | r600_wb_fini(rdev); | ||
2249 | r600_irq_fini(rdev); | 2264 | r600_irq_fini(rdev); |
2265 | radeon_wb_fini(rdev); | ||
2250 | radeon_irq_kms_fini(rdev); | 2266 | radeon_irq_kms_fini(rdev); |
2251 | evergreen_pcie_gart_fini(rdev); | 2267 | evergreen_pcie_gart_fini(rdev); |
2252 | rdev->accel_working = false; | 2268 | rdev->accel_working = false; |
@@ -2270,8 +2286,8 @@ void evergreen_fini(struct radeon_device *rdev) | |||
2270 | { | 2286 | { |
2271 | /*r600_blit_fini(rdev);*/ | 2287 | /*r600_blit_fini(rdev);*/ |
2272 | r700_cp_fini(rdev); | 2288 | r700_cp_fini(rdev); |
2273 | r600_wb_fini(rdev); | ||
2274 | r600_irq_fini(rdev); | 2289 | r600_irq_fini(rdev); |
2290 | radeon_wb_fini(rdev); | ||
2275 | radeon_irq_kms_fini(rdev); | 2291 | radeon_irq_kms_fini(rdev); |
2276 | evergreen_pcie_gart_fini(rdev); | 2292 | evergreen_pcie_gart_fini(rdev); |
2277 | radeon_gem_fini(rdev); | 2293 | radeon_gem_fini(rdev); |