aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2011-10-23 06:56:27 -0400
committerDave Airlie <airlied@redhat.com>2011-12-20 14:50:56 -0500
commite32eb50dbe43862606a51caa94368ec6bd019434 (patch)
treea064cf4e60c0d42694e5dcc3759794b4b24b8e77 /drivers/gpu/drm/radeon/r600.c
parentd6d2730c71a5d41a121a7b567bf7ff9c5d4cd3ab (diff)
drm/radeon: rename struct radeon_cp to radeon_ring
That naming seems to make more sense, since we not only want to run PM4 rings with it. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c188
1 files changed, 94 insertions, 94 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aaf8cd42943e..d26e7c9f047c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
1344 return 0; 1344 return 0;
1345} 1345}
1346 1346
1347bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) 1347bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1348{ 1348{
1349 u32 srbm_status; 1349 u32 srbm_status;
1350 u32 grbm_status; 1350 u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
1361 grbm_status = RREG32(R_008010_GRBM_STATUS); 1361 grbm_status = RREG32(R_008010_GRBM_STATUS);
1362 grbm_status2 = RREG32(R_008014_GRBM_STATUS2); 1362 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1363 if (!G_008010_GUI_ACTIVE(grbm_status)) { 1363 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1364 r100_gpu_lockup_update(lockup, cp); 1364 r100_gpu_lockup_update(lockup, ring);
1365 return false; 1365 return false;
1366 } 1366 }
1367 /* force CP activities */ 1367 /* force CP activities */
1368 r = radeon_ring_lock(rdev, cp, 2); 1368 r = radeon_ring_lock(rdev, ring, 2);
1369 if (!r) { 1369 if (!r) {
1370 /* PACKET2 NOP */ 1370 /* PACKET2 NOP */
1371 radeon_ring_write(cp, 0x80000000); 1371 radeon_ring_write(ring, 0x80000000);
1372 radeon_ring_write(cp, 0x80000000); 1372 radeon_ring_write(ring, 0x80000000);
1373 radeon_ring_unlock_commit(rdev, cp); 1373 radeon_ring_unlock_commit(rdev, ring);
1374 } 1374 }
1375 cp->rptr = RREG32(cp->rptr_reg); 1375 ring->rptr = RREG32(ring->rptr_reg);
1376 return r100_gpu_cp_is_lockup(rdev, lockup, cp); 1376 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1377} 1377}
1378 1378
1379int r600_asic_reset(struct radeon_device *rdev) 1379int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,28 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2144 2144
2145int r600_cp_start(struct radeon_device *rdev) 2145int r600_cp_start(struct radeon_device *rdev)
2146{ 2146{
2147 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; 2147 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2148 int r; 2148 int r;
2149 uint32_t cp_me; 2149 uint32_t cp_me;
2150 2150
2151 r = radeon_ring_lock(rdev, cp, 7); 2151 r = radeon_ring_lock(rdev, ring, 7);
2152 if (r) { 2152 if (r) {
2153 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 2153 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2154 return r; 2154 return r;
2155 } 2155 }
2156 radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5)); 2156 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
2157 radeon_ring_write(cp, 0x1); 2157 radeon_ring_write(ring, 0x1);
2158 if (rdev->family >= CHIP_RV770) { 2158 if (rdev->family >= CHIP_RV770) {
2159 radeon_ring_write(cp, 0x0); 2159 radeon_ring_write(ring, 0x0);
2160 radeon_ring_write(cp, rdev->config.rv770.max_hw_contexts - 1); 2160 radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
2161 } else { 2161 } else {
2162 radeon_ring_write(cp, 0x3); 2162 radeon_ring_write(ring, 0x3);
2163 radeon_ring_write(cp, rdev->config.r600.max_hw_contexts - 1); 2163 radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
2164 } 2164 }
2165 radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2165 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2166 radeon_ring_write(cp, 0); 2166 radeon_ring_write(ring, 0);
2167 radeon_ring_write(cp, 0); 2167 radeon_ring_write(ring, 0);
2168 radeon_ring_unlock_commit(rdev, cp); 2168 radeon_ring_unlock_commit(rdev, ring);
2169 2169
2170 cp_me = 0xff; 2170 cp_me = 0xff;
2171 WREG32(R_0086D8_CP_ME_CNTL, cp_me); 2171 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
2174 2174
2175int r600_cp_resume(struct radeon_device *rdev) 2175int r600_cp_resume(struct radeon_device *rdev)
2176{ 2176{
2177 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; 2177 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2178 u32 tmp; 2178 u32 tmp;
2179 u32 rb_bufsz; 2179 u32 rb_bufsz;
2180 int r; 2180 int r;
@@ -2186,7 +2186,7 @@ int r600_cp_resume(struct radeon_device *rdev)
2186 WREG32(GRBM_SOFT_RESET, 0); 2186 WREG32(GRBM_SOFT_RESET, 0);
2187 2187
2188 /* Set ring buffer size */ 2188 /* Set ring buffer size */
2189 rb_bufsz = drm_order(cp->ring_size / 8); 2189 rb_bufsz = drm_order(ring->ring_size / 8);
2190 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 2190 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2191#ifdef __BIG_ENDIAN 2191#ifdef __BIG_ENDIAN
2192 tmp |= BUF_SWAP_32BIT; 2192 tmp |= BUF_SWAP_32BIT;
@@ -2200,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
2200 /* Initialize the ring buffer's read and write pointers */ 2200 /* Initialize the ring buffer's read and write pointers */
2201 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 2201 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2202 WREG32(CP_RB_RPTR_WR, 0); 2202 WREG32(CP_RB_RPTR_WR, 0);
2203 cp->wptr = 0; 2203 ring->wptr = 0;
2204 WREG32(CP_RB_WPTR, cp->wptr); 2204 WREG32(CP_RB_WPTR, ring->wptr);
2205 2205
2206 /* set the wb address whether it's enabled or not */ 2206 /* set the wb address whether it's enabled or not */
2207 WREG32(CP_RB_RPTR_ADDR, 2207 WREG32(CP_RB_RPTR_ADDR,
@@ -2219,36 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
2219 mdelay(1); 2219 mdelay(1);
2220 WREG32(CP_RB_CNTL, tmp); 2220 WREG32(CP_RB_CNTL, tmp);
2221 2221
2222 WREG32(CP_RB_BASE, cp->gpu_addr >> 8); 2222 WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
2223 WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); 2223 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2224 2224
2225 cp->rptr = RREG32(CP_RB_RPTR); 2225 ring->rptr = RREG32(CP_RB_RPTR);
2226 2226
2227 r600_cp_start(rdev); 2227 r600_cp_start(rdev);
2228 cp->ready = true; 2228 ring->ready = true;
2229 r = radeon_ring_test(rdev, cp); 2229 r = radeon_ring_test(rdev, ring);
2230 if (r) { 2230 if (r) {
2231 cp->ready = false; 2231 ring->ready = false;
2232 return r; 2232 return r;
2233 } 2233 }
2234 return 0; 2234 return 0;
2235} 2235}
2236 2236
2237void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size) 2237void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
2238{ 2238{
2239 u32 rb_bufsz; 2239 u32 rb_bufsz;
2240 2240
2241 /* Align ring size */ 2241 /* Align ring size */
2242 rb_bufsz = drm_order(ring_size / 8); 2242 rb_bufsz = drm_order(ring_size / 8);
2243 ring_size = (1 << (rb_bufsz + 1)) * 4; 2243 ring_size = (1 << (rb_bufsz + 1)) * 4;
2244 cp->ring_size = ring_size; 2244 ring->ring_size = ring_size;
2245 cp->align_mask = 16 - 1; 2245 ring->align_mask = 16 - 1;
2246} 2246}
2247 2247
2248void r600_cp_fini(struct radeon_device *rdev) 2248void r600_cp_fini(struct radeon_device *rdev)
2249{ 2249{
2250 r600_cp_stop(rdev); 2250 r600_cp_stop(rdev);
2251 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); 2251 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
2252} 2252}
2253 2253
2254 2254
@@ -2267,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
2267 } 2267 }
2268} 2268}
2269 2269
2270int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) 2270int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2271{ 2271{
2272 uint32_t scratch; 2272 uint32_t scratch;
2273 uint32_t tmp = 0; 2273 uint32_t tmp = 0;
2274 unsigned i, ridx = radeon_ring_index(rdev, cp); 2274 unsigned i, ridx = radeon_ring_index(rdev, ring);
2275 int r; 2275 int r;
2276 2276
2277 r = radeon_scratch_get(rdev, &scratch); 2277 r = radeon_scratch_get(rdev, &scratch);
@@ -2280,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2280 return r; 2280 return r;
2281 } 2281 }
2282 WREG32(scratch, 0xCAFEDEAD); 2282 WREG32(scratch, 0xCAFEDEAD);
2283 r = radeon_ring_lock(rdev, cp, 3); 2283 r = radeon_ring_lock(rdev, ring, 3);
2284 if (r) { 2284 if (r) {
2285 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); 2285 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2286 radeon_scratch_free(rdev, scratch); 2286 radeon_scratch_free(rdev, scratch);
2287 return r; 2287 return r;
2288 } 2288 }
2289 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2289 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2290 radeon_ring_write(cp, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2290 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2291 radeon_ring_write(cp, 0xDEADBEEF); 2291 radeon_ring_write(ring, 0xDEADBEEF);
2292 radeon_ring_unlock_commit(rdev, cp); 2292 radeon_ring_unlock_commit(rdev, ring);
2293 for (i = 0; i < rdev->usec_timeout; i++) { 2293 for (i = 0; i < rdev->usec_timeout; i++) {
2294 tmp = RREG32(scratch); 2294 tmp = RREG32(scratch);
2295 if (tmp == 0xDEADBEEF) 2295 if (tmp == 0xDEADBEEF)
@@ -2310,62 +2310,62 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2310void r600_fence_ring_emit(struct radeon_device *rdev, 2310void r600_fence_ring_emit(struct radeon_device *rdev,
2311 struct radeon_fence *fence) 2311 struct radeon_fence *fence)
2312{ 2312{
2313 struct radeon_cp *cp = &rdev->cp[fence->ring]; 2313 struct radeon_ring *ring = &rdev->ring[fence->ring];
2314 2314
2315 if (rdev->wb.use_event) { 2315 if (rdev->wb.use_event) {
2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + 2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2317 (u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base); 2317 (u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base);
2318 /* flush read cache over gart */ 2318 /* flush read cache over gart */
2319 radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2319 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2320 radeon_ring_write(cp, PACKET3_TC_ACTION_ENA | 2320 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2321 PACKET3_VC_ACTION_ENA | 2321 PACKET3_VC_ACTION_ENA |
2322 PACKET3_SH_ACTION_ENA); 2322 PACKET3_SH_ACTION_ENA);
2323 radeon_ring_write(cp, 0xFFFFFFFF); 2323 radeon_ring_write(ring, 0xFFFFFFFF);
2324 radeon_ring_write(cp, 0); 2324 radeon_ring_write(ring, 0);
2325 radeon_ring_write(cp, 10); /* poll interval */ 2325 radeon_ring_write(ring, 10); /* poll interval */
2326 /* EVENT_WRITE_EOP - flush caches, send int */ 2326 /* EVENT_WRITE_EOP - flush caches, send int */
2327 radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); 2327 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2328 radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); 2328 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2329 radeon_ring_write(cp, addr & 0xffffffff); 2329 radeon_ring_write(ring, addr & 0xffffffff);
2330 radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); 2330 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2331 radeon_ring_write(cp, fence->seq); 2331 radeon_ring_write(ring, fence->seq);
2332 radeon_ring_write(cp, 0); 2332 radeon_ring_write(ring, 0);
2333 } else { 2333 } else {
2334 /* flush read cache over gart */ 2334 /* flush read cache over gart */
2335 radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); 2335 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
2336 radeon_ring_write(cp, PACKET3_TC_ACTION_ENA | 2336 radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
2337 PACKET3_VC_ACTION_ENA | 2337 PACKET3_VC_ACTION_ENA |
2338 PACKET3_SH_ACTION_ENA); 2338 PACKET3_SH_ACTION_ENA);
2339 radeon_ring_write(cp, 0xFFFFFFFF); 2339 radeon_ring_write(ring, 0xFFFFFFFF);
2340 radeon_ring_write(cp, 0); 2340 radeon_ring_write(ring, 0);
2341 radeon_ring_write(cp, 10); /* poll interval */ 2341 radeon_ring_write(ring, 10); /* poll interval */
2342 radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE, 0)); 2342 radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
2343 radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); 2343 radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2344 /* wait for 3D idle clean */ 2344 /* wait for 3D idle clean */
2345 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2345 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2346 radeon_ring_write(cp, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2346 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2347 radeon_ring_write(cp, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); 2347 radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2348 /* Emit fence sequence & fire IRQ */ 2348 /* Emit fence sequence & fire IRQ */
2349 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2349 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2350 radeon_ring_write(cp, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2350 radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2351 radeon_ring_write(cp, fence->seq); 2351 radeon_ring_write(ring, fence->seq);
2352 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 2352 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2353 radeon_ring_write(cp, PACKET0(CP_INT_STATUS, 0)); 2353 radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
2354 radeon_ring_write(cp, RB_INT_STAT); 2354 radeon_ring_write(ring, RB_INT_STAT);
2355 } 2355 }
2356} 2356}
2357 2357
2358void r600_semaphore_ring_emit(struct radeon_device *rdev, 2358void r600_semaphore_ring_emit(struct radeon_device *rdev,
2359 struct radeon_cp *cp, 2359 struct radeon_ring *ring,
2360 struct radeon_semaphore *semaphore, 2360 struct radeon_semaphore *semaphore,
2361 bool emit_wait) 2361 bool emit_wait)
2362{ 2362{
2363 uint64_t addr = semaphore->gpu_addr; 2363 uint64_t addr = semaphore->gpu_addr;
2364 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; 2364 unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
2365 2365
2366 radeon_ring_write(cp, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); 2366 radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
2367 radeon_ring_write(cp, addr & 0xffffffff); 2367 radeon_ring_write(ring, addr & 0xffffffff);
2368 radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | sel); 2368 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2369} 2369}
2370 2370
2371int r600_copy_blit(struct radeon_device *rdev, 2371int r600_copy_blit(struct radeon_device *rdev,
@@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2420 2420
2421int r600_startup(struct radeon_device *rdev) 2421int r600_startup(struct radeon_device *rdev)
2422{ 2422{
2423 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; 2423 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2424 int r; 2424 int r;
2425 2425
2426 /* enable pcie gen2 link */ 2426 /* enable pcie gen2 link */
@@ -2468,7 +2468,7 @@ int r600_startup(struct radeon_device *rdev)
2468 } 2468 }
2469 r600_irq_set(rdev); 2469 r600_irq_set(rdev);
2470 2470
2471 r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2471 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2472 R600_CP_RB_RPTR, R600_CP_RB_WPTR); 2472 R600_CP_RB_RPTR, R600_CP_RB_WPTR);
2473 2473
2474 if (r) 2474 if (r)
@@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev)
2534 r600_audio_fini(rdev); 2534 r600_audio_fini(rdev);
2535 /* FIXME: we should wait for ring to be empty */ 2535 /* FIXME: we should wait for ring to be empty */
2536 r600_cp_stop(rdev); 2536 r600_cp_stop(rdev);
2537 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2537 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2538 r600_irq_suspend(rdev); 2538 r600_irq_suspend(rdev);
2539 radeon_wb_disable(rdev); 2539 radeon_wb_disable(rdev);
2540 r600_pcie_gart_disable(rdev); 2540 r600_pcie_gart_disable(rdev);
@@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev)
2609 if (r) 2609 if (r)
2610 return r; 2610 return r;
2611 2611
2612 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2612 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2613 r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2613 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2614 2614
2615 rdev->ih.ring_obj = NULL; 2615 rdev->ih.ring_obj = NULL;
2616 r600_ih_ring_init(rdev, 64 * 1024); 2616 r600_ih_ring_init(rdev, 64 * 1024);
@@ -2677,17 +2677,17 @@ void r600_fini(struct radeon_device *rdev)
2677 */ 2677 */
2678void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 2678void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2679{ 2679{
2680 struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; 2680 struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
2681 2681
2682 /* FIXME: implement */ 2682 /* FIXME: implement */
2683 radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2683 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2684 radeon_ring_write(cp, 2684 radeon_ring_write(ring,
2685#ifdef __BIG_ENDIAN 2685#ifdef __BIG_ENDIAN
2686 (2 << 0) | 2686 (2 << 0) |
2687#endif 2687#endif
2688 (ib->gpu_addr & 0xFFFFFFFC)); 2688 (ib->gpu_addr & 0xFFFFFFFC));
2689 radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF); 2689 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
2690 radeon_ring_write(cp, ib->length_dw); 2690 radeon_ring_write(ring, ib->length_dw);
2691} 2691}
2692 2692
2693int r600_ib_test(struct radeon_device *rdev, int ring) 2693int r600_ib_test(struct radeon_device *rdev, int ring)
@@ -3518,22 +3518,22 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3518 struct drm_info_node *node = (struct drm_info_node *) m->private; 3518 struct drm_info_node *node = (struct drm_info_node *) m->private;
3519 struct drm_device *dev = node->minor->dev; 3519 struct drm_device *dev = node->minor->dev;
3520 struct radeon_device *rdev = dev->dev_private; 3520 struct radeon_device *rdev = dev->dev_private;
3521 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; 3521 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3522 unsigned count, i, j; 3522 unsigned count, i, j;
3523 3523
3524 radeon_ring_free_size(rdev, cp); 3524 radeon_ring_free_size(rdev, ring);
3525 count = (cp->ring_size / 4) - cp->ring_free_dw; 3525 count = (ring->ring_size / 4) - ring->ring_free_dw;
3526 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); 3526 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3527 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); 3527 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3528 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); 3528 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3529 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", cp->wptr); 3529 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", ring->wptr);
3530 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", cp->rptr); 3530 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", ring->rptr);
3531 seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw); 3531 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
3532 seq_printf(m, "%u dwords in ring\n", count); 3532 seq_printf(m, "%u dwords in ring\n", count);
3533 i = cp->rptr; 3533 i = ring->rptr;
3534 for (j = 0; j <= count; j++) { 3534 for (j = 0; j <= count; j++) {
3535 seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]); 3535 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
3536 i = (i + 1) & cp->ptr_mask; 3536 i = (i + 1) & ring->ptr_mask;
3537 } 3537 }
3538 return 0; 3538 return 0;
3539} 3539}