diff options
author | Christian König <deathsimple@vodafone.de> | 2011-10-23 06:56:27 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-12-20 14:50:56 -0500 |
commit | e32eb50dbe43862606a51caa94368ec6bd019434 (patch) | |
tree | a064cf4e60c0d42694e5dcc3759794b4b24b8e77 /drivers | |
parent | d6d2730c71a5d41a121a7b567bf7ff9c5d4cd3ab (diff) |
drm/radeon: rename struct radeon_cp to radeon_ring
That naming seems to make more sense, since we not
only want to run PM4 rings with it.
Signed-off-by: Christian König <deathsimple@vodafone.de>
Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 124 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen_blit_kms.c | 250 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 146 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 162 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r200.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 128 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r420.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 188 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600_blit_kms.c | 228 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_fence.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 136 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_semaphore.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_test.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv515.c | 82 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv770.c | 12 |
21 files changed, 826 insertions, 826 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index cb198aca9f5a..b19ace86121e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1311,20 +1311,20 @@ void evergreen_mc_program(struct radeon_device *rdev) | |||
1311 | */ | 1311 | */ |
1312 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 1312 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
1313 | { | 1313 | { |
1314 | struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; | 1314 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
1315 | 1315 | ||
1316 | /* set to DX10/11 mode */ | 1316 | /* set to DX10/11 mode */ |
1317 | radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0)); | 1317 | radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); |
1318 | radeon_ring_write(cp, 1); | 1318 | radeon_ring_write(ring, 1); |
1319 | /* FIXME: implement */ | 1319 | /* FIXME: implement */ |
1320 | radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 1320 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
1321 | radeon_ring_write(cp, | 1321 | radeon_ring_write(ring, |
1322 | #ifdef __BIG_ENDIAN | 1322 | #ifdef __BIG_ENDIAN |
1323 | (2 << 0) | | 1323 | (2 << 0) | |
1324 | #endif | 1324 | #endif |
1325 | (ib->gpu_addr & 0xFFFFFFFC)); | 1325 | (ib->gpu_addr & 0xFFFFFFFC)); |
1326 | radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF); | 1326 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); |
1327 | radeon_ring_write(cp, ib->length_dw); | 1327 | radeon_ring_write(ring, ib->length_dw); |
1328 | } | 1328 | } |
1329 | 1329 | ||
1330 | 1330 | ||
@@ -1362,73 +1362,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) | |||
1362 | 1362 | ||
1363 | static int evergreen_cp_start(struct radeon_device *rdev) | 1363 | static int evergreen_cp_start(struct radeon_device *rdev) |
1364 | { | 1364 | { |
1365 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1365 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1366 | int r, i; | 1366 | int r, i; |
1367 | uint32_t cp_me; | 1367 | uint32_t cp_me; |
1368 | 1368 | ||
1369 | r = radeon_ring_lock(rdev, cp, 7); | 1369 | r = radeon_ring_lock(rdev, ring, 7); |
1370 | if (r) { | 1370 | if (r) { |
1371 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1371 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1372 | return r; | 1372 | return r; |
1373 | } | 1373 | } |
1374 | radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 1374 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
1375 | radeon_ring_write(cp, 0x1); | 1375 | radeon_ring_write(ring, 0x1); |
1376 | radeon_ring_write(cp, 0x0); | 1376 | radeon_ring_write(ring, 0x0); |
1377 | radeon_ring_write(cp, rdev->config.evergreen.max_hw_contexts - 1); | 1377 | radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1); |
1378 | radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 1378 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
1379 | radeon_ring_write(cp, 0); | 1379 | radeon_ring_write(ring, 0); |
1380 | radeon_ring_write(cp, 0); | 1380 | radeon_ring_write(ring, 0); |
1381 | radeon_ring_unlock_commit(rdev, cp); | 1381 | radeon_ring_unlock_commit(rdev, ring); |
1382 | 1382 | ||
1383 | cp_me = 0xff; | 1383 | cp_me = 0xff; |
1384 | WREG32(CP_ME_CNTL, cp_me); | 1384 | WREG32(CP_ME_CNTL, cp_me); |
1385 | 1385 | ||
1386 | r = radeon_ring_lock(rdev, cp, evergreen_default_size + 19); | 1386 | r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19); |
1387 | if (r) { | 1387 | if (r) { |
1388 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1388 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1389 | return r; | 1389 | return r; |
1390 | } | 1390 | } |
1391 | 1391 | ||
1392 | /* setup clear context state */ | 1392 | /* setup clear context state */ |
1393 | radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1393 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1394 | radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | 1394 | radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); |
1395 | 1395 | ||
1396 | for (i = 0; i < evergreen_default_size; i++) | 1396 | for (i = 0; i < evergreen_default_size; i++) |
1397 | radeon_ring_write(cp, evergreen_default_state[i]); | 1397 | radeon_ring_write(ring, evergreen_default_state[i]); |
1398 | 1398 | ||
1399 | radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1399 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1400 | radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE); | 1400 | radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); |
1401 | 1401 | ||
1402 | /* set clear context state */ | 1402 | /* set clear context state */ |
1403 | radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0)); | 1403 | radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); |
1404 | radeon_ring_write(cp, 0); | 1404 | radeon_ring_write(ring, 0); |
1405 | 1405 | ||
1406 | /* SQ_VTX_BASE_VTX_LOC */ | 1406 | /* SQ_VTX_BASE_VTX_LOC */ |
1407 | radeon_ring_write(cp, 0xc0026f00); | 1407 | radeon_ring_write(ring, 0xc0026f00); |
1408 | radeon_ring_write(cp, 0x00000000); | 1408 | radeon_ring_write(ring, 0x00000000); |
1409 | radeon_ring_write(cp, 0x00000000); | 1409 | radeon_ring_write(ring, 0x00000000); |
1410 | radeon_ring_write(cp, 0x00000000); | 1410 | radeon_ring_write(ring, 0x00000000); |
1411 | 1411 | ||
1412 | /* Clear consts */ | 1412 | /* Clear consts */ |
1413 | radeon_ring_write(cp, 0xc0036f00); | 1413 | radeon_ring_write(ring, 0xc0036f00); |
1414 | radeon_ring_write(cp, 0x00000bc4); | 1414 | radeon_ring_write(ring, 0x00000bc4); |
1415 | radeon_ring_write(cp, 0xffffffff); | 1415 | radeon_ring_write(ring, 0xffffffff); |
1416 | radeon_ring_write(cp, 0xffffffff); | 1416 | radeon_ring_write(ring, 0xffffffff); |
1417 | radeon_ring_write(cp, 0xffffffff); | 1417 | radeon_ring_write(ring, 0xffffffff); |
1418 | 1418 | ||
1419 | radeon_ring_write(cp, 0xc0026900); | 1419 | radeon_ring_write(ring, 0xc0026900); |
1420 | radeon_ring_write(cp, 0x00000316); | 1420 | radeon_ring_write(ring, 0x00000316); |
1421 | radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 1421 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
1422 | radeon_ring_write(cp, 0x00000010); /* */ | 1422 | radeon_ring_write(ring, 0x00000010); /* */ |
1423 | 1423 | ||
1424 | radeon_ring_unlock_commit(rdev, cp); | 1424 | radeon_ring_unlock_commit(rdev, ring); |
1425 | 1425 | ||
1426 | return 0; | 1426 | return 0; |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | int evergreen_cp_resume(struct radeon_device *rdev) | 1429 | int evergreen_cp_resume(struct radeon_device *rdev) |
1430 | { | 1430 | { |
1431 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1431 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1432 | u32 tmp; | 1432 | u32 tmp; |
1433 | u32 rb_bufsz; | 1433 | u32 rb_bufsz; |
1434 | int r; | 1434 | int r; |
@@ -1446,7 +1446,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1446 | RREG32(GRBM_SOFT_RESET); | 1446 | RREG32(GRBM_SOFT_RESET); |
1447 | 1447 | ||
1448 | /* Set ring buffer size */ | 1448 | /* Set ring buffer size */ |
1449 | rb_bufsz = drm_order(cp->ring_size / 8); | 1449 | rb_bufsz = drm_order(ring->ring_size / 8); |
1450 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1450 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1451 | #ifdef __BIG_ENDIAN | 1451 | #ifdef __BIG_ENDIAN |
1452 | tmp |= BUF_SWAP_32BIT; | 1452 | tmp |= BUF_SWAP_32BIT; |
@@ -1460,8 +1460,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1460 | /* Initialize the ring buffer's read and write pointers */ | 1460 | /* Initialize the ring buffer's read and write pointers */ |
1461 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 1461 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
1462 | WREG32(CP_RB_RPTR_WR, 0); | 1462 | WREG32(CP_RB_RPTR_WR, 0); |
1463 | cp->wptr = 0; | 1463 | ring->wptr = 0; |
1464 | WREG32(CP_RB_WPTR, cp->wptr); | 1464 | WREG32(CP_RB_WPTR, ring->wptr); |
1465 | 1465 | ||
1466 | /* set the wb address wether it's enabled or not */ | 1466 | /* set the wb address wether it's enabled or not */ |
1467 | WREG32(CP_RB_RPTR_ADDR, | 1467 | WREG32(CP_RB_RPTR_ADDR, |
@@ -1479,16 +1479,16 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1479 | mdelay(1); | 1479 | mdelay(1); |
1480 | WREG32(CP_RB_CNTL, tmp); | 1480 | WREG32(CP_RB_CNTL, tmp); |
1481 | 1481 | ||
1482 | WREG32(CP_RB_BASE, cp->gpu_addr >> 8); | 1482 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
1483 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 1483 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
1484 | 1484 | ||
1485 | cp->rptr = RREG32(CP_RB_RPTR); | 1485 | ring->rptr = RREG32(CP_RB_RPTR); |
1486 | 1486 | ||
1487 | evergreen_cp_start(rdev); | 1487 | evergreen_cp_start(rdev); |
1488 | cp->ready = true; | 1488 | ring->ready = true; |
1489 | r = radeon_ring_test(rdev, cp); | 1489 | r = radeon_ring_test(rdev, ring); |
1490 | if (r) { | 1490 | if (r) { |
1491 | cp->ready = false; | 1491 | ring->ready = false; |
1492 | return r; | 1492 | return r; |
1493 | } | 1493 | } |
1494 | return 0; | 1494 | return 0; |
@@ -2357,7 +2357,7 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
2357 | return 0; | 2357 | return 0; |
2358 | } | 2358 | } |
2359 | 2359 | ||
2360 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | 2360 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
2361 | { | 2361 | { |
2362 | u32 srbm_status; | 2362 | u32 srbm_status; |
2363 | u32 grbm_status; | 2363 | u32 grbm_status; |
@@ -2370,19 +2370,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | |||
2370 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | 2370 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); |
2371 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | 2371 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); |
2372 | if (!(grbm_status & GUI_ACTIVE)) { | 2372 | if (!(grbm_status & GUI_ACTIVE)) { |
2373 | r100_gpu_lockup_update(lockup, cp); | 2373 | r100_gpu_lockup_update(lockup, ring); |
2374 | return false; | 2374 | return false; |
2375 | } | 2375 | } |
2376 | /* force CP activities */ | 2376 | /* force CP activities */ |
2377 | r = radeon_ring_lock(rdev, cp, 2); | 2377 | r = radeon_ring_lock(rdev, ring, 2); |
2378 | if (!r) { | 2378 | if (!r) { |
2379 | /* PACKET2 NOP */ | 2379 | /* PACKET2 NOP */ |
2380 | radeon_ring_write(cp, 0x80000000); | 2380 | radeon_ring_write(ring, 0x80000000); |
2381 | radeon_ring_write(cp, 0x80000000); | 2381 | radeon_ring_write(ring, 0x80000000); |
2382 | radeon_ring_unlock_commit(rdev, cp); | 2382 | radeon_ring_unlock_commit(rdev, ring); |
2383 | } | 2383 | } |
2384 | cp->rptr = RREG32(CP_RB_RPTR); | 2384 | ring->rptr = RREG32(CP_RB_RPTR); |
2385 | return r100_gpu_cp_is_lockup(rdev, lockup, cp); | 2385 | return r100_gpu_cp_is_lockup(rdev, lockup, ring); |
2386 | } | 2386 | } |
2387 | 2387 | ||
2388 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | 2388 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) |
@@ -3056,7 +3056,7 @@ restart_ih: | |||
3056 | 3056 | ||
3057 | static int evergreen_startup(struct radeon_device *rdev) | 3057 | static int evergreen_startup(struct radeon_device *rdev) |
3058 | { | 3058 | { |
3059 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 3059 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3060 | int r; | 3060 | int r; |
3061 | 3061 | ||
3062 | /* enable pcie gen2 link */ | 3062 | /* enable pcie gen2 link */ |
@@ -3120,7 +3120,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3120 | } | 3120 | } |
3121 | evergreen_irq_set(rdev); | 3121 | evergreen_irq_set(rdev); |
3122 | 3122 | ||
3123 | r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 3123 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
3124 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); | 3124 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); |
3125 | if (r) | 3125 | if (r) |
3126 | return r; | 3126 | return r; |
@@ -3168,11 +3168,11 @@ int evergreen_resume(struct radeon_device *rdev) | |||
3168 | 3168 | ||
3169 | int evergreen_suspend(struct radeon_device *rdev) | 3169 | int evergreen_suspend(struct radeon_device *rdev) |
3170 | { | 3170 | { |
3171 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 3171 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3172 | 3172 | ||
3173 | /* FIXME: we should wait for ring to be empty */ | 3173 | /* FIXME: we should wait for ring to be empty */ |
3174 | r700_cp_stop(rdev); | 3174 | r700_cp_stop(rdev); |
3175 | cp->ready = false; | 3175 | ring->ready = false; |
3176 | evergreen_irq_suspend(rdev); | 3176 | evergreen_irq_suspend(rdev); |
3177 | radeon_wb_disable(rdev); | 3177 | radeon_wb_disable(rdev); |
3178 | evergreen_pcie_gart_disable(rdev); | 3178 | evergreen_pcie_gart_disable(rdev); |
@@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev) | |||
3251 | if (r) | 3251 | if (r) |
3252 | return r; | 3252 | return r; |
3253 | 3253 | ||
3254 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 3254 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
3255 | r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 3255 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
3256 | 3256 | ||
3257 | rdev->ih.ring_obj = NULL; | 3257 | rdev->ih.ring_obj = NULL; |
3258 | r600_ih_ring_init(rdev, 64 * 1024); | 3258 | r600_ih_ring_init(rdev, 64 * 1024); |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 56f5d92cce24..2379849515c7 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -49,7 +49,7 @@ static void | |||
49 | set_render_target(struct radeon_device *rdev, int format, | 49 | set_render_target(struct radeon_device *rdev, int format, |
50 | int w, int h, u64 gpu_addr) | 50 | int w, int h, u64 gpu_addr) |
51 | { | 51 | { |
52 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 52 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
53 | u32 cb_color_info; | 53 | u32 cb_color_info; |
54 | int pitch, slice; | 54 | int pitch, slice; |
55 | 55 | ||
@@ -63,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format, | |||
63 | pitch = (w / 8) - 1; | 63 | pitch = (w / 8) - 1; |
64 | slice = ((w * h) / 64) - 1; | 64 | slice = ((w * h) / 64) - 1; |
65 | 65 | ||
66 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); | 66 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15)); |
67 | radeon_ring_write(cp, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); | 67 | radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2); |
68 | radeon_ring_write(cp, gpu_addr >> 8); | 68 | radeon_ring_write(ring, gpu_addr >> 8); |
69 | radeon_ring_write(cp, pitch); | 69 | radeon_ring_write(ring, pitch); |
70 | radeon_ring_write(cp, slice); | 70 | radeon_ring_write(ring, slice); |
71 | radeon_ring_write(cp, 0); | 71 | radeon_ring_write(ring, 0); |
72 | radeon_ring_write(cp, cb_color_info); | 72 | radeon_ring_write(ring, cb_color_info); |
73 | radeon_ring_write(cp, 0); | 73 | radeon_ring_write(ring, 0); |
74 | radeon_ring_write(cp, (w - 1) | ((h - 1) << 16)); | 74 | radeon_ring_write(ring, (w - 1) | ((h - 1) << 16)); |
75 | radeon_ring_write(cp, 0); | 75 | radeon_ring_write(ring, 0); |
76 | radeon_ring_write(cp, 0); | 76 | radeon_ring_write(ring, 0); |
77 | radeon_ring_write(cp, 0); | 77 | radeon_ring_write(ring, 0); |
78 | radeon_ring_write(cp, 0); | 78 | radeon_ring_write(ring, 0); |
79 | radeon_ring_write(cp, 0); | 79 | radeon_ring_write(ring, 0); |
80 | radeon_ring_write(cp, 0); | 80 | radeon_ring_write(ring, 0); |
81 | radeon_ring_write(cp, 0); | 81 | radeon_ring_write(ring, 0); |
82 | radeon_ring_write(cp, 0); | 82 | radeon_ring_write(ring, 0); |
83 | } | 83 | } |
84 | 84 | ||
85 | /* emits 5dw */ | 85 | /* emits 5dw */ |
@@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev, | |||
88 | u32 sync_type, u32 size, | 88 | u32 sync_type, u32 size, |
89 | u64 mc_addr) | 89 | u64 mc_addr) |
90 | { | 90 | { |
91 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 91 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
92 | u32 cp_coher_size; | 92 | u32 cp_coher_size; |
93 | 93 | ||
94 | if (size == 0xffffffff) | 94 | if (size == 0xffffffff) |
@@ -101,40 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev, | |||
101 | * to the RB directly. For IBs, the CP programs this as part of the | 101 | * to the RB directly. For IBs, the CP programs this as part of the |
102 | * surface_sync packet. | 102 | * surface_sync packet. |
103 | */ | 103 | */ |
104 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 104 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
105 | radeon_ring_write(cp, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); | 105 | radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2); |
106 | radeon_ring_write(cp, 0); /* CP_COHER_CNTL2 */ | 106 | radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */ |
107 | } | 107 | } |
108 | radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 108 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
109 | radeon_ring_write(cp, sync_type); | 109 | radeon_ring_write(ring, sync_type); |
110 | radeon_ring_write(cp, cp_coher_size); | 110 | radeon_ring_write(ring, cp_coher_size); |
111 | radeon_ring_write(cp, mc_addr >> 8); | 111 | radeon_ring_write(ring, mc_addr >> 8); |
112 | radeon_ring_write(cp, 10); /* poll interval */ | 112 | radeon_ring_write(ring, 10); /* poll interval */ |
113 | } | 113 | } |
114 | 114 | ||
115 | /* emits 11dw + 1 surface sync = 16dw */ | 115 | /* emits 11dw + 1 surface sync = 16dw */ |
116 | static void | 116 | static void |
117 | set_shaders(struct radeon_device *rdev) | 117 | set_shaders(struct radeon_device *rdev) |
118 | { | 118 | { |
119 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 119 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
120 | u64 gpu_addr; | 120 | u64 gpu_addr; |
121 | 121 | ||
122 | /* VS */ | 122 | /* VS */ |
123 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | 123 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; |
124 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); | 124 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3)); |
125 | radeon_ring_write(cp, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); | 125 | radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2); |
126 | radeon_ring_write(cp, gpu_addr >> 8); | 126 | radeon_ring_write(ring, gpu_addr >> 8); |
127 | radeon_ring_write(cp, 2); | 127 | radeon_ring_write(ring, 2); |
128 | radeon_ring_write(cp, 0); | 128 | radeon_ring_write(ring, 0); |
129 | 129 | ||
130 | /* PS */ | 130 | /* PS */ |
131 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; | 131 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; |
132 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); | 132 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4)); |
133 | radeon_ring_write(cp, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); | 133 | radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2); |
134 | radeon_ring_write(cp, gpu_addr >> 8); | 134 | radeon_ring_write(ring, gpu_addr >> 8); |
135 | radeon_ring_write(cp, 1); | 135 | radeon_ring_write(ring, 1); |
136 | radeon_ring_write(cp, 0); | 136 | radeon_ring_write(ring, 0); |
137 | radeon_ring_write(cp, 2); | 137 | radeon_ring_write(ring, 2); |
138 | 138 | ||
139 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | 139 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; |
140 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); | 140 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); |
@@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev) | |||
144 | static void | 144 | static void |
145 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | 145 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) |
146 | { | 146 | { |
147 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 147 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
148 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; | 148 | u32 sq_vtx_constant_word2, sq_vtx_constant_word3; |
149 | 149 | ||
150 | /* high addr, stride */ | 150 | /* high addr, stride */ |
@@ -159,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
159 | SQ_VTCX_SEL_Z(SQ_SEL_Z) | | 159 | SQ_VTCX_SEL_Z(SQ_SEL_Z) | |
160 | SQ_VTCX_SEL_W(SQ_SEL_W); | 160 | SQ_VTCX_SEL_W(SQ_SEL_W); |
161 | 161 | ||
162 | radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8)); | 162 | radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); |
163 | radeon_ring_write(cp, 0x580); | 163 | radeon_ring_write(ring, 0x580); |
164 | radeon_ring_write(cp, gpu_addr & 0xffffffff); | 164 | radeon_ring_write(ring, gpu_addr & 0xffffffff); |
165 | radeon_ring_write(cp, 48 - 1); /* size */ | 165 | radeon_ring_write(ring, 48 - 1); /* size */ |
166 | radeon_ring_write(cp, sq_vtx_constant_word2); | 166 | radeon_ring_write(ring, sq_vtx_constant_word2); |
167 | radeon_ring_write(cp, sq_vtx_constant_word3); | 167 | radeon_ring_write(ring, sq_vtx_constant_word3); |
168 | radeon_ring_write(cp, 0); | 168 | radeon_ring_write(ring, 0); |
169 | radeon_ring_write(cp, 0); | 169 | radeon_ring_write(ring, 0); |
170 | radeon_ring_write(cp, 0); | 170 | radeon_ring_write(ring, 0); |
171 | radeon_ring_write(cp, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); | 171 | radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER)); |
172 | 172 | ||
173 | if ((rdev->family == CHIP_CEDAR) || | 173 | if ((rdev->family == CHIP_CEDAR) || |
174 | (rdev->family == CHIP_PALM) || | 174 | (rdev->family == CHIP_PALM) || |
@@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
189 | int format, int w, int h, int pitch, | 189 | int format, int w, int h, int pitch, |
190 | u64 gpu_addr, u32 size) | 190 | u64 gpu_addr, u32 size) |
191 | { | 191 | { |
192 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 192 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
193 | u32 sq_tex_resource_word0, sq_tex_resource_word1; | 193 | u32 sq_tex_resource_word0, sq_tex_resource_word1; |
194 | u32 sq_tex_resource_word4, sq_tex_resource_word7; | 194 | u32 sq_tex_resource_word4, sq_tex_resource_word7; |
195 | 195 | ||
@@ -213,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev, | |||
213 | cp_set_surface_sync(rdev, | 213 | cp_set_surface_sync(rdev, |
214 | PACKET3_TC_ACTION_ENA, size, gpu_addr); | 214 | PACKET3_TC_ACTION_ENA, size, gpu_addr); |
215 | 215 | ||
216 | radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 8)); | 216 | radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8)); |
217 | radeon_ring_write(cp, 0); | 217 | radeon_ring_write(ring, 0); |
218 | radeon_ring_write(cp, sq_tex_resource_word0); | 218 | radeon_ring_write(ring, sq_tex_resource_word0); |
219 | radeon_ring_write(cp, sq_tex_resource_word1); | 219 | radeon_ring_write(ring, sq_tex_resource_word1); |
220 | radeon_ring_write(cp, gpu_addr >> 8); | 220 | radeon_ring_write(ring, gpu_addr >> 8); |
221 | radeon_ring_write(cp, gpu_addr >> 8); | 221 | radeon_ring_write(ring, gpu_addr >> 8); |
222 | radeon_ring_write(cp, sq_tex_resource_word4); | 222 | radeon_ring_write(ring, sq_tex_resource_word4); |
223 | radeon_ring_write(cp, 0); | 223 | radeon_ring_write(ring, 0); |
224 | radeon_ring_write(cp, 0); | 224 | radeon_ring_write(ring, 0); |
225 | radeon_ring_write(cp, sq_tex_resource_word7); | 225 | radeon_ring_write(ring, sq_tex_resource_word7); |
226 | } | 226 | } |
227 | 227 | ||
228 | /* emits 12 */ | 228 | /* emits 12 */ |
@@ -230,7 +230,7 @@ static void | |||
230 | set_scissors(struct radeon_device *rdev, int x1, int y1, | 230 | set_scissors(struct radeon_device *rdev, int x1, int y1, |
231 | int x2, int y2) | 231 | int x2, int y2) |
232 | { | 232 | { |
233 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 233 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
234 | /* workaround some hw bugs */ | 234 | /* workaround some hw bugs */ |
235 | if (x2 == 0) | 235 | if (x2 == 0) |
236 | x1 = 1; | 236 | x1 = 1; |
@@ -241,44 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1, | |||
241 | x2 = 2; | 241 | x2 = 2; |
242 | } | 242 | } |
243 | 243 | ||
244 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 244 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
245 | radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | 245 | radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); |
246 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16)); | 246 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); |
247 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 247 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
248 | 248 | ||
249 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 249 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
250 | radeon_ring_write(cp, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | 250 | radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); |
251 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31)); | 251 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); |
252 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 252 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
253 | 253 | ||
254 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 254 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
255 | radeon_ring_write(cp, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); | 255 | radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); |
256 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31)); | 256 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); |
257 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 257 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* emits 10 */ | 260 | /* emits 10 */ |
261 | static void | 261 | static void |
262 | draw_auto(struct radeon_device *rdev) | 262 | draw_auto(struct radeon_device *rdev) |
263 | { | 263 | { |
264 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 264 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
265 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 265 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
266 | radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); | 266 | radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); |
267 | radeon_ring_write(cp, DI_PT_RECTLIST); | 267 | radeon_ring_write(ring, DI_PT_RECTLIST); |
268 | 268 | ||
269 | radeon_ring_write(cp, PACKET3(PACKET3_INDEX_TYPE, 0)); | 269 | radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); |
270 | radeon_ring_write(cp, | 270 | radeon_ring_write(ring, |
271 | #ifdef __BIG_ENDIAN | 271 | #ifdef __BIG_ENDIAN |
272 | (2 << 2) | | 272 | (2 << 2) | |
273 | #endif | 273 | #endif |
274 | DI_INDEX_SIZE_16_BIT); | 274 | DI_INDEX_SIZE_16_BIT); |
275 | 275 | ||
276 | radeon_ring_write(cp, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 276 | radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
277 | radeon_ring_write(cp, 1); | 277 | radeon_ring_write(ring, 1); |
278 | 278 | ||
279 | radeon_ring_write(cp, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); | 279 | radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); |
280 | radeon_ring_write(cp, 3); | 280 | radeon_ring_write(ring, 3); |
281 | radeon_ring_write(cp, DI_SRC_SEL_AUTO_INDEX); | 281 | radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); |
282 | 282 | ||
283 | } | 283 | } |
284 | 284 | ||
@@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev) | |||
286 | static void | 286 | static void |
287 | set_default_state(struct radeon_device *rdev) | 287 | set_default_state(struct radeon_device *rdev) |
288 | { | 288 | { |
289 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 289 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
290 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; | 290 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; |
291 | u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; | 291 | u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; |
292 | u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; | 292 | u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; |
@@ -300,8 +300,8 @@ set_default_state(struct radeon_device *rdev) | |||
300 | int dwords; | 300 | int dwords; |
301 | 301 | ||
302 | /* set clear context state */ | 302 | /* set clear context state */ |
303 | radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0)); | 303 | radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); |
304 | radeon_ring_write(cp, 0); | 304 | radeon_ring_write(ring, 0); |
305 | 305 | ||
306 | if (rdev->family < CHIP_CAYMAN) { | 306 | if (rdev->family < CHIP_CAYMAN) { |
307 | switch (rdev->family) { | 307 | switch (rdev->family) { |
@@ -558,60 +558,60 @@ set_default_state(struct radeon_device *rdev) | |||
558 | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); | 558 | NUM_LS_STACK_ENTRIES(num_ls_stack_entries)); |
559 | 559 | ||
560 | /* disable dyn gprs */ | 560 | /* disable dyn gprs */ |
561 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 561 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
562 | radeon_ring_write(cp, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); | 562 | radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); |
563 | radeon_ring_write(cp, 0); | 563 | radeon_ring_write(ring, 0); |
564 | 564 | ||
565 | /* setup LDS */ | 565 | /* setup LDS */ |
566 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 566 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
567 | radeon_ring_write(cp, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); | 567 | radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2); |
568 | radeon_ring_write(cp, 0x10001000); | 568 | radeon_ring_write(ring, 0x10001000); |
569 | 569 | ||
570 | /* SQ config */ | 570 | /* SQ config */ |
571 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 11)); | 571 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11)); |
572 | radeon_ring_write(cp, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); | 572 | radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); |
573 | radeon_ring_write(cp, sq_config); | 573 | radeon_ring_write(ring, sq_config); |
574 | radeon_ring_write(cp, sq_gpr_resource_mgmt_1); | 574 | radeon_ring_write(ring, sq_gpr_resource_mgmt_1); |
575 | radeon_ring_write(cp, sq_gpr_resource_mgmt_2); | 575 | radeon_ring_write(ring, sq_gpr_resource_mgmt_2); |
576 | radeon_ring_write(cp, sq_gpr_resource_mgmt_3); | 576 | radeon_ring_write(ring, sq_gpr_resource_mgmt_3); |
577 | radeon_ring_write(cp, 0); | 577 | radeon_ring_write(ring, 0); |
578 | radeon_ring_write(cp, 0); | 578 | radeon_ring_write(ring, 0); |
579 | radeon_ring_write(cp, sq_thread_resource_mgmt); | 579 | radeon_ring_write(ring, sq_thread_resource_mgmt); |
580 | radeon_ring_write(cp, sq_thread_resource_mgmt_2); | 580 | radeon_ring_write(ring, sq_thread_resource_mgmt_2); |
581 | radeon_ring_write(cp, sq_stack_resource_mgmt_1); | 581 | radeon_ring_write(ring, sq_stack_resource_mgmt_1); |
582 | radeon_ring_write(cp, sq_stack_resource_mgmt_2); | 582 | radeon_ring_write(ring, sq_stack_resource_mgmt_2); |
583 | radeon_ring_write(cp, sq_stack_resource_mgmt_3); | 583 | radeon_ring_write(ring, sq_stack_resource_mgmt_3); |
584 | } | 584 | } |
585 | 585 | ||
586 | /* CONTEXT_CONTROL */ | 586 | /* CONTEXT_CONTROL */ |
587 | radeon_ring_write(cp, 0xc0012800); | 587 | radeon_ring_write(ring, 0xc0012800); |
588 | radeon_ring_write(cp, 0x80000000); | 588 | radeon_ring_write(ring, 0x80000000); |
589 | radeon_ring_write(cp, 0x80000000); | 589 | radeon_ring_write(ring, 0x80000000); |
590 | 590 | ||
591 | /* SQ_VTX_BASE_VTX_LOC */ | 591 | /* SQ_VTX_BASE_VTX_LOC */ |
592 | radeon_ring_write(cp, 0xc0026f00); | 592 | radeon_ring_write(ring, 0xc0026f00); |
593 | radeon_ring_write(cp, 0x00000000); | 593 | radeon_ring_write(ring, 0x00000000); |
594 | radeon_ring_write(cp, 0x00000000); | 594 | radeon_ring_write(ring, 0x00000000); |
595 | radeon_ring_write(cp, 0x00000000); | 595 | radeon_ring_write(ring, 0x00000000); |
596 | 596 | ||
597 | /* SET_SAMPLER */ | 597 | /* SET_SAMPLER */ |
598 | radeon_ring_write(cp, 0xc0036e00); | 598 | radeon_ring_write(ring, 0xc0036e00); |
599 | radeon_ring_write(cp, 0x00000000); | 599 | radeon_ring_write(ring, 0x00000000); |
600 | radeon_ring_write(cp, 0x00000012); | 600 | radeon_ring_write(ring, 0x00000012); |
601 | radeon_ring_write(cp, 0x00000000); | 601 | radeon_ring_write(ring, 0x00000000); |
602 | radeon_ring_write(cp, 0x00000000); | 602 | radeon_ring_write(ring, 0x00000000); |
603 | 603 | ||
604 | /* set to DX10/11 mode */ | 604 | /* set to DX10/11 mode */ |
605 | radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0)); | 605 | radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); |
606 | radeon_ring_write(cp, 1); | 606 | radeon_ring_write(ring, 1); |
607 | 607 | ||
608 | /* emit an IB pointing at default state */ | 608 | /* emit an IB pointing at default state */ |
609 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | 609 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); |
610 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | 610 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; |
611 | radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 611 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
612 | radeon_ring_write(cp, gpu_addr & 0xFFFFFFFC); | 612 | radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC); |
613 | radeon_ring_write(cp, upper_32_bits(gpu_addr) & 0xFF); | 613 | radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); |
614 | radeon_ring_write(cp, dwords); | 614 | radeon_ring_write(ring, dwords); |
615 | 615 | ||
616 | } | 616 | } |
617 | 617 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7c953579a405..022a606c783b 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1049,64 +1049,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev) | |||
1049 | 1049 | ||
1050 | static int cayman_cp_start(struct radeon_device *rdev) | 1050 | static int cayman_cp_start(struct radeon_device *rdev) |
1051 | { | 1051 | { |
1052 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1052 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1053 | int r, i; | 1053 | int r, i; |
1054 | 1054 | ||
1055 | r = radeon_ring_lock(rdev, cp, 7); | 1055 | r = radeon_ring_lock(rdev, ring, 7); |
1056 | if (r) { | 1056 | if (r) { |
1057 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1057 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1058 | return r; | 1058 | return r; |
1059 | } | 1059 | } |
1060 | radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 1060 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
1061 | radeon_ring_write(cp, 0x1); | 1061 | radeon_ring_write(ring, 0x1); |
1062 | radeon_ring_write(cp, 0x0); | 1062 | radeon_ring_write(ring, 0x0); |
1063 | radeon_ring_write(cp, rdev->config.cayman.max_hw_contexts - 1); | 1063 | radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); |
1064 | radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 1064 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
1065 | radeon_ring_write(cp, 0); | 1065 | radeon_ring_write(ring, 0); |
1066 | radeon_ring_write(cp, 0); | 1066 | radeon_ring_write(ring, 0); |
1067 | radeon_ring_unlock_commit(rdev, cp); | 1067 | radeon_ring_unlock_commit(rdev, ring); |
1068 | 1068 | ||
1069 | cayman_cp_enable(rdev, true); | 1069 | cayman_cp_enable(rdev, true); |
1070 | 1070 | ||
1071 | r = radeon_ring_lock(rdev, cp, cayman_default_size + 19); | 1071 | r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); |
1072 | if (r) { | 1072 | if (r) { |
1073 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1073 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1074 | return r; | 1074 | return r; |
1075 | } | 1075 | } |
1076 | 1076 | ||
1077 | /* setup clear context state */ | 1077 | /* setup clear context state */ |
1078 | radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1078 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1079 | radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | 1079 | radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); |
1080 | 1080 | ||
1081 | for (i = 0; i < cayman_default_size; i++) | 1081 | for (i = 0; i < cayman_default_size; i++) |
1082 | radeon_ring_write(cp, cayman_default_state[i]); | 1082 | radeon_ring_write(ring, cayman_default_state[i]); |
1083 | 1083 | ||
1084 | radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1084 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1085 | radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE); | 1085 | radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); |
1086 | 1086 | ||
1087 | /* set clear context state */ | 1087 | /* set clear context state */ |
1088 | radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0)); | 1088 | radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); |
1089 | radeon_ring_write(cp, 0); | 1089 | radeon_ring_write(ring, 0); |
1090 | 1090 | ||
1091 | /* SQ_VTX_BASE_VTX_LOC */ | 1091 | /* SQ_VTX_BASE_VTX_LOC */ |
1092 | radeon_ring_write(cp, 0xc0026f00); | 1092 | radeon_ring_write(ring, 0xc0026f00); |
1093 | radeon_ring_write(cp, 0x00000000); | 1093 | radeon_ring_write(ring, 0x00000000); |
1094 | radeon_ring_write(cp, 0x00000000); | 1094 | radeon_ring_write(ring, 0x00000000); |
1095 | radeon_ring_write(cp, 0x00000000); | 1095 | radeon_ring_write(ring, 0x00000000); |
1096 | 1096 | ||
1097 | /* Clear consts */ | 1097 | /* Clear consts */ |
1098 | radeon_ring_write(cp, 0xc0036f00); | 1098 | radeon_ring_write(ring, 0xc0036f00); |
1099 | radeon_ring_write(cp, 0x00000bc4); | 1099 | radeon_ring_write(ring, 0x00000bc4); |
1100 | radeon_ring_write(cp, 0xffffffff); | 1100 | radeon_ring_write(ring, 0xffffffff); |
1101 | radeon_ring_write(cp, 0xffffffff); | 1101 | radeon_ring_write(ring, 0xffffffff); |
1102 | radeon_ring_write(cp, 0xffffffff); | 1102 | radeon_ring_write(ring, 0xffffffff); |
1103 | 1103 | ||
1104 | radeon_ring_write(cp, 0xc0026900); | 1104 | radeon_ring_write(ring, 0xc0026900); |
1105 | radeon_ring_write(cp, 0x00000316); | 1105 | radeon_ring_write(ring, 0x00000316); |
1106 | radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 1106 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
1107 | radeon_ring_write(cp, 0x00000010); /* */ | 1107 | radeon_ring_write(ring, 0x00000010); /* */ |
1108 | 1108 | ||
1109 | radeon_ring_unlock_commit(rdev, cp); | 1109 | radeon_ring_unlock_commit(rdev, ring); |
1110 | 1110 | ||
1111 | /* XXX init other rings */ | 1111 | /* XXX init other rings */ |
1112 | 1112 | ||
@@ -1116,12 +1116,12 @@ static int cayman_cp_start(struct radeon_device *rdev) | |||
1116 | static void cayman_cp_fini(struct radeon_device *rdev) | 1116 | static void cayman_cp_fini(struct radeon_device *rdev) |
1117 | { | 1117 | { |
1118 | cayman_cp_enable(rdev, false); | 1118 | cayman_cp_enable(rdev, false); |
1119 | radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 1119 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | int cayman_cp_resume(struct radeon_device *rdev) | 1122 | int cayman_cp_resume(struct radeon_device *rdev) |
1123 | { | 1123 | { |
1124 | struct radeon_cp *cp; | 1124 | struct radeon_ring *ring; |
1125 | u32 tmp; | 1125 | u32 tmp; |
1126 | u32 rb_bufsz; | 1126 | u32 rb_bufsz; |
1127 | int r; | 1127 | int r; |
@@ -1147,8 +1147,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1147 | 1147 | ||
1148 | /* ring 0 - compute and gfx */ | 1148 | /* ring 0 - compute and gfx */ |
1149 | /* Set ring buffer size */ | 1149 | /* Set ring buffer size */ |
1150 | cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1150 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1151 | rb_bufsz = drm_order(cp->ring_size / 8); | 1151 | rb_bufsz = drm_order(ring->ring_size / 8); |
1152 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1152 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1153 | #ifdef __BIG_ENDIAN | 1153 | #ifdef __BIG_ENDIAN |
1154 | tmp |= BUF_SWAP_32BIT; | 1154 | tmp |= BUF_SWAP_32BIT; |
@@ -1157,8 +1157,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1157 | 1157 | ||
1158 | /* Initialize the ring buffer's read and write pointers */ | 1158 | /* Initialize the ring buffer's read and write pointers */ |
1159 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | 1159 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); |
1160 | cp->wptr = 0; | 1160 | ring->wptr = 0; |
1161 | WREG32(CP_RB0_WPTR, cp->wptr); | 1161 | WREG32(CP_RB0_WPTR, ring->wptr); |
1162 | 1162 | ||
1163 | /* set the wb address wether it's enabled or not */ | 1163 | /* set the wb address wether it's enabled or not */ |
1164 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1164 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1175,14 +1175,14 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1175 | mdelay(1); | 1175 | mdelay(1); |
1176 | WREG32(CP_RB0_CNTL, tmp); | 1176 | WREG32(CP_RB0_CNTL, tmp); |
1177 | 1177 | ||
1178 | WREG32(CP_RB0_BASE, cp->gpu_addr >> 8); | 1178 | WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); |
1179 | 1179 | ||
1180 | cp->rptr = RREG32(CP_RB0_RPTR); | 1180 | ring->rptr = RREG32(CP_RB0_RPTR); |
1181 | 1181 | ||
1182 | /* ring1 - compute only */ | 1182 | /* ring1 - compute only */ |
1183 | /* Set ring buffer size */ | 1183 | /* Set ring buffer size */ |
1184 | cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]; | 1184 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
1185 | rb_bufsz = drm_order(cp->ring_size / 8); | 1185 | rb_bufsz = drm_order(ring->ring_size / 8); |
1186 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1186 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1187 | #ifdef __BIG_ENDIAN | 1187 | #ifdef __BIG_ENDIAN |
1188 | tmp |= BUF_SWAP_32BIT; | 1188 | tmp |= BUF_SWAP_32BIT; |
@@ -1191,8 +1191,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1191 | 1191 | ||
1192 | /* Initialize the ring buffer's read and write pointers */ | 1192 | /* Initialize the ring buffer's read and write pointers */ |
1193 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | 1193 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); |
1194 | cp->wptr = 0; | 1194 | ring->wptr = 0; |
1195 | WREG32(CP_RB1_WPTR, cp->wptr); | 1195 | WREG32(CP_RB1_WPTR, ring->wptr); |
1196 | 1196 | ||
1197 | /* set the wb address wether it's enabled or not */ | 1197 | /* set the wb address wether it's enabled or not */ |
1198 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | 1198 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1201,14 +1201,14 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1201 | mdelay(1); | 1201 | mdelay(1); |
1202 | WREG32(CP_RB1_CNTL, tmp); | 1202 | WREG32(CP_RB1_CNTL, tmp); |
1203 | 1203 | ||
1204 | WREG32(CP_RB1_BASE, cp->gpu_addr >> 8); | 1204 | WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); |
1205 | 1205 | ||
1206 | cp->rptr = RREG32(CP_RB1_RPTR); | 1206 | ring->rptr = RREG32(CP_RB1_RPTR); |
1207 | 1207 | ||
1208 | /* ring2 - compute only */ | 1208 | /* ring2 - compute only */ |
1209 | /* Set ring buffer size */ | 1209 | /* Set ring buffer size */ |
1210 | cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]; | 1210 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
1211 | rb_bufsz = drm_order(cp->ring_size / 8); | 1211 | rb_bufsz = drm_order(ring->ring_size / 8); |
1212 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1212 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1213 | #ifdef __BIG_ENDIAN | 1213 | #ifdef __BIG_ENDIAN |
1214 | tmp |= BUF_SWAP_32BIT; | 1214 | tmp |= BUF_SWAP_32BIT; |
@@ -1217,8 +1217,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1217 | 1217 | ||
1218 | /* Initialize the ring buffer's read and write pointers */ | 1218 | /* Initialize the ring buffer's read and write pointers */ |
1219 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | 1219 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); |
1220 | cp->wptr = 0; | 1220 | ring->wptr = 0; |
1221 | WREG32(CP_RB2_WPTR, cp->wptr); | 1221 | WREG32(CP_RB2_WPTR, ring->wptr); |
1222 | 1222 | ||
1223 | /* set the wb address wether it's enabled or not */ | 1223 | /* set the wb address wether it's enabled or not */ |
1224 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | 1224 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1227,28 +1227,28 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1227 | mdelay(1); | 1227 | mdelay(1); |
1228 | WREG32(CP_RB2_CNTL, tmp); | 1228 | WREG32(CP_RB2_CNTL, tmp); |
1229 | 1229 | ||
1230 | WREG32(CP_RB2_BASE, cp->gpu_addr >> 8); | 1230 | WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); |
1231 | 1231 | ||
1232 | cp->rptr = RREG32(CP_RB2_RPTR); | 1232 | ring->rptr = RREG32(CP_RB2_RPTR); |
1233 | 1233 | ||
1234 | /* start the rings */ | 1234 | /* start the rings */ |
1235 | cayman_cp_start(rdev); | 1235 | cayman_cp_start(rdev); |
1236 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true; | 1236 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; |
1237 | rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | 1237 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1238 | rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | 1238 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1239 | /* this only test cp0 */ | 1239 | /* this only test cp0 */ |
1240 | r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 1240 | r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1241 | if (r) { | 1241 | if (r) { |
1242 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1242 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1243 | rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; | 1243 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1244 | rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; | 1244 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1245 | return r; | 1245 | return r; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | return 0; | 1248 | return 0; |
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | 1251 | bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
1252 | { | 1252 | { |
1253 | u32 srbm_status; | 1253 | u32 srbm_status; |
1254 | u32 grbm_status; | 1254 | u32 grbm_status; |
@@ -1261,20 +1261,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | |||
1261 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | 1261 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); |
1262 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | 1262 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); |
1263 | if (!(grbm_status & GUI_ACTIVE)) { | 1263 | if (!(grbm_status & GUI_ACTIVE)) { |
1264 | r100_gpu_lockup_update(lockup, cp); | 1264 | r100_gpu_lockup_update(lockup, ring); |
1265 | return false; | 1265 | return false; |
1266 | } | 1266 | } |
1267 | /* force CP activities */ | 1267 | /* force CP activities */ |
1268 | r = radeon_ring_lock(rdev, cp, 2); | 1268 | r = radeon_ring_lock(rdev, ring, 2); |
1269 | if (!r) { | 1269 | if (!r) { |
1270 | /* PACKET2 NOP */ | 1270 | /* PACKET2 NOP */ |
1271 | radeon_ring_write(cp, 0x80000000); | 1271 | radeon_ring_write(ring, 0x80000000); |
1272 | radeon_ring_write(cp, 0x80000000); | 1272 | radeon_ring_write(ring, 0x80000000); |
1273 | radeon_ring_unlock_commit(rdev, cp); | 1273 | radeon_ring_unlock_commit(rdev, ring); |
1274 | } | 1274 | } |
1275 | /* XXX deal with CP0,1,2 */ | 1275 | /* XXX deal with CP0,1,2 */ |
1276 | cp->rptr = RREG32(cp->rptr_reg); | 1276 | ring->rptr = RREG32(ring->rptr_reg); |
1277 | return r100_gpu_cp_is_lockup(rdev, lockup, cp); | 1277 | return r100_gpu_cp_is_lockup(rdev, lockup, ring); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | static int cayman_gpu_soft_reset(struct radeon_device *rdev) | 1280 | static int cayman_gpu_soft_reset(struct radeon_device *rdev) |
@@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev) | |||
1343 | 1343 | ||
1344 | static int cayman_startup(struct radeon_device *rdev) | 1344 | static int cayman_startup(struct radeon_device *rdev) |
1345 | { | 1345 | { |
1346 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1346 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1347 | int r; | 1347 | int r; |
1348 | 1348 | ||
1349 | /* enable pcie gen2 link */ | 1349 | /* enable pcie gen2 link */ |
@@ -1393,7 +1393,7 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1393 | } | 1393 | } |
1394 | evergreen_irq_set(rdev); | 1394 | evergreen_irq_set(rdev); |
1395 | 1395 | ||
1396 | r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1396 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1397 | CP_RB0_RPTR, CP_RB0_WPTR); | 1397 | CP_RB0_RPTR, CP_RB0_WPTR); |
1398 | if (r) | 1398 | if (r) |
1399 | return r; | 1399 | return r; |
@@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
1438 | { | 1438 | { |
1439 | /* FIXME: we should wait for ring to be empty */ | 1439 | /* FIXME: we should wait for ring to be empty */ |
1440 | cayman_cp_enable(rdev, false); | 1440 | cayman_cp_enable(rdev, false); |
1441 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1441 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1442 | evergreen_irq_suspend(rdev); | 1442 | evergreen_irq_suspend(rdev); |
1443 | radeon_wb_disable(rdev); | 1443 | radeon_wb_disable(rdev); |
1444 | cayman_pcie_gart_disable(rdev); | 1444 | cayman_pcie_gart_disable(rdev); |
@@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
1455 | */ | 1455 | */ |
1456 | int cayman_init(struct radeon_device *rdev) | 1456 | int cayman_init(struct radeon_device *rdev) |
1457 | { | 1457 | { |
1458 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1458 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1459 | int r; | 1459 | int r; |
1460 | 1460 | ||
1461 | /* This don't do much */ | 1461 | /* This don't do much */ |
@@ -1508,8 +1508,8 @@ int cayman_init(struct radeon_device *rdev) | |||
1508 | if (r) | 1508 | if (r) |
1509 | return r; | 1509 | return r; |
1510 | 1510 | ||
1511 | cp->ring_obj = NULL; | 1511 | ring->ring_obj = NULL; |
1512 | r600_ring_init(rdev, cp, 1024 * 1024); | 1512 | r600_ring_init(rdev, ring, 1024 * 1024); |
1513 | 1513 | ||
1514 | rdev->ih.ring_obj = NULL; | 1514 | rdev->ih.ring_obj = NULL; |
1515 | r600_ih_ring_init(rdev, 64 * 1024); | 1515 | r600_ih_ring_init(rdev, 64 * 1024); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 6ca20d7bf626..a40e893a7b69 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -811,31 +811,31 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) | |||
811 | void r100_fence_ring_emit(struct radeon_device *rdev, | 811 | void r100_fence_ring_emit(struct radeon_device *rdev, |
812 | struct radeon_fence *fence) | 812 | struct radeon_fence *fence) |
813 | { | 813 | { |
814 | struct radeon_cp *cp = &rdev->cp[fence->ring]; | 814 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
815 | 815 | ||
816 | /* We have to make sure that caches are flushed before | 816 | /* We have to make sure that caches are flushed before |
817 | * CPU might read something from VRAM. */ | 817 | * CPU might read something from VRAM. */ |
818 | radeon_ring_write(cp, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); | 818 | radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); |
819 | radeon_ring_write(cp, RADEON_RB3D_DC_FLUSH_ALL); | 819 | radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL); |
820 | radeon_ring_write(cp, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); | 820 | radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); |
821 | radeon_ring_write(cp, RADEON_RB3D_ZC_FLUSH_ALL); | 821 | radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL); |
822 | /* Wait until IDLE & CLEAN */ | 822 | /* Wait until IDLE & CLEAN */ |
823 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 823 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
824 | radeon_ring_write(cp, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); | 824 | radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); |
825 | radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | 825 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
826 | radeon_ring_write(cp, rdev->config.r100.hdp_cntl | | 826 | radeon_ring_write(ring, rdev->config.r100.hdp_cntl | |
827 | RADEON_HDP_READ_BUFFER_INVALIDATE); | 827 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
828 | radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | 828 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
829 | radeon_ring_write(cp, rdev->config.r100.hdp_cntl); | 829 | radeon_ring_write(ring, rdev->config.r100.hdp_cntl); |
830 | /* Emit fence sequence & fire IRQ */ | 830 | /* Emit fence sequence & fire IRQ */ |
831 | radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); | 831 | radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); |
832 | radeon_ring_write(cp, fence->seq); | 832 | radeon_ring_write(ring, fence->seq); |
833 | radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0)); | 833 | radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
834 | radeon_ring_write(cp, RADEON_SW_INT_FIRE); | 834 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
835 | } | 835 | } |
836 | 836 | ||
837 | void r100_semaphore_ring_emit(struct radeon_device *rdev, | 837 | void r100_semaphore_ring_emit(struct radeon_device *rdev, |
838 | struct radeon_cp *cp, | 838 | struct radeon_ring *ring, |
839 | struct radeon_semaphore *semaphore, | 839 | struct radeon_semaphore *semaphore, |
840 | bool emit_wait) | 840 | bool emit_wait) |
841 | { | 841 | { |
@@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
849 | unsigned num_gpu_pages, | 849 | unsigned num_gpu_pages, |
850 | struct radeon_fence *fence) | 850 | struct radeon_fence *fence) |
851 | { | 851 | { |
852 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 852 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
853 | uint32_t cur_pages; | 853 | uint32_t cur_pages; |
854 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; | 854 | uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; |
855 | uint32_t pitch; | 855 | uint32_t pitch; |
@@ -867,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
867 | 867 | ||
868 | /* Ask for enough room for blit + flush + fence */ | 868 | /* Ask for enough room for blit + flush + fence */ |
869 | ndw = 64 + (10 * num_loops); | 869 | ndw = 64 + (10 * num_loops); |
870 | r = radeon_ring_lock(rdev, cp, ndw); | 870 | r = radeon_ring_lock(rdev, ring, ndw); |
871 | if (r) { | 871 | if (r) { |
872 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); | 872 | DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); |
873 | return -EINVAL; | 873 | return -EINVAL; |
@@ -881,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
881 | 881 | ||
882 | /* pages are in Y direction - height | 882 | /* pages are in Y direction - height |
883 | page width in X direction - width */ | 883 | page width in X direction - width */ |
884 | radeon_ring_write(cp, PACKET3(PACKET3_BITBLT_MULTI, 8)); | 884 | radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8)); |
885 | radeon_ring_write(cp, | 885 | radeon_ring_write(ring, |
886 | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | | 886 | RADEON_GMC_SRC_PITCH_OFFSET_CNTL | |
887 | RADEON_GMC_DST_PITCH_OFFSET_CNTL | | 887 | RADEON_GMC_DST_PITCH_OFFSET_CNTL | |
888 | RADEON_GMC_SRC_CLIPPING | | 888 | RADEON_GMC_SRC_CLIPPING | |
@@ -894,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
894 | RADEON_DP_SRC_SOURCE_MEMORY | | 894 | RADEON_DP_SRC_SOURCE_MEMORY | |
895 | RADEON_GMC_CLR_CMP_CNTL_DIS | | 895 | RADEON_GMC_CLR_CMP_CNTL_DIS | |
896 | RADEON_GMC_WR_MSK_DIS); | 896 | RADEON_GMC_WR_MSK_DIS); |
897 | radeon_ring_write(cp, (pitch << 22) | (src_offset >> 10)); | 897 | radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10)); |
898 | radeon_ring_write(cp, (pitch << 22) | (dst_offset >> 10)); | 898 | radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10)); |
899 | radeon_ring_write(cp, (0x1fff) | (0x1fff << 16)); | 899 | radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); |
900 | radeon_ring_write(cp, 0); | 900 | radeon_ring_write(ring, 0); |
901 | radeon_ring_write(cp, (0x1fff) | (0x1fff << 16)); | 901 | radeon_ring_write(ring, (0x1fff) | (0x1fff << 16)); |
902 | radeon_ring_write(cp, num_gpu_pages); | 902 | radeon_ring_write(ring, num_gpu_pages); |
903 | radeon_ring_write(cp, num_gpu_pages); | 903 | radeon_ring_write(ring, num_gpu_pages); |
904 | radeon_ring_write(cp, cur_pages | (stride_pixels << 16)); | 904 | radeon_ring_write(ring, cur_pages | (stride_pixels << 16)); |
905 | } | 905 | } |
906 | radeon_ring_write(cp, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 906 | radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
907 | radeon_ring_write(cp, RADEON_RB2D_DC_FLUSH_ALL); | 907 | radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL); |
908 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 908 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
909 | radeon_ring_write(cp, | 909 | radeon_ring_write(ring, |
910 | RADEON_WAIT_2D_IDLECLEAN | | 910 | RADEON_WAIT_2D_IDLECLEAN | |
911 | RADEON_WAIT_HOST_IDLECLEAN | | 911 | RADEON_WAIT_HOST_IDLECLEAN | |
912 | RADEON_WAIT_DMA_GUI_IDLE); | 912 | RADEON_WAIT_DMA_GUI_IDLE); |
913 | if (fence) { | 913 | if (fence) { |
914 | r = radeon_fence_emit(rdev, fence); | 914 | r = radeon_fence_emit(rdev, fence); |
915 | } | 915 | } |
916 | radeon_ring_unlock_commit(rdev, cp); | 916 | radeon_ring_unlock_commit(rdev, ring); |
917 | return r; | 917 | return r; |
918 | } | 918 | } |
919 | 919 | ||
@@ -934,20 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev) | |||
934 | 934 | ||
935 | void r100_ring_start(struct radeon_device *rdev) | 935 | void r100_ring_start(struct radeon_device *rdev) |
936 | { | 936 | { |
937 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 937 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
938 | int r; | 938 | int r; |
939 | 939 | ||
940 | r = radeon_ring_lock(rdev, cp, 2); | 940 | r = radeon_ring_lock(rdev, ring, 2); |
941 | if (r) { | 941 | if (r) { |
942 | return; | 942 | return; |
943 | } | 943 | } |
944 | radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0)); | 944 | radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); |
945 | radeon_ring_write(cp, | 945 | radeon_ring_write(ring, |
946 | RADEON_ISYNC_ANY2D_IDLE3D | | 946 | RADEON_ISYNC_ANY2D_IDLE3D | |
947 | RADEON_ISYNC_ANY3D_IDLE2D | | 947 | RADEON_ISYNC_ANY3D_IDLE2D | |
948 | RADEON_ISYNC_WAIT_IDLEGUI | | 948 | RADEON_ISYNC_WAIT_IDLEGUI | |
949 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | 949 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
950 | radeon_ring_unlock_commit(rdev, cp); | 950 | radeon_ring_unlock_commit(rdev, ring); |
951 | } | 951 | } |
952 | 952 | ||
953 | 953 | ||
@@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev) | |||
1048 | 1048 | ||
1049 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | 1049 | int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) |
1050 | { | 1050 | { |
1051 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1051 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1052 | unsigned rb_bufsz; | 1052 | unsigned rb_bufsz; |
1053 | unsigned rb_blksz; | 1053 | unsigned rb_blksz; |
1054 | unsigned max_fetch; | 1054 | unsigned max_fetch; |
@@ -1074,7 +1074,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1074 | rb_bufsz = drm_order(ring_size / 8); | 1074 | rb_bufsz = drm_order(ring_size / 8); |
1075 | ring_size = (1 << (rb_bufsz + 1)) * 4; | 1075 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
1076 | r100_cp_load_microcode(rdev); | 1076 | r100_cp_load_microcode(rdev); |
1077 | r = radeon_ring_init(rdev, cp, ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1077 | r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1078 | RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR); | 1078 | RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR); |
1079 | if (r) { | 1079 | if (r) { |
1080 | return r; | 1080 | return r; |
@@ -1084,7 +1084,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1084 | rb_blksz = 9; | 1084 | rb_blksz = 9; |
1085 | /* cp will read 128bytes at a time (4 dwords) */ | 1085 | /* cp will read 128bytes at a time (4 dwords) */ |
1086 | max_fetch = 1; | 1086 | max_fetch = 1; |
1087 | cp->align_mask = 16 - 1; | 1087 | ring->align_mask = 16 - 1; |
1088 | /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ | 1088 | /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ |
1089 | pre_write_timer = 64; | 1089 | pre_write_timer = 64; |
1090 | /* Force CP_RB_WPTR write if written more than one time before the | 1090 | /* Force CP_RB_WPTR write if written more than one time before the |
@@ -1114,13 +1114,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1114 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); | 1114 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); |
1115 | 1115 | ||
1116 | /* Set ring address */ | 1116 | /* Set ring address */ |
1117 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)cp->gpu_addr); | 1117 | DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr); |
1118 | WREG32(RADEON_CP_RB_BASE, cp->gpu_addr); | 1118 | WREG32(RADEON_CP_RB_BASE, ring->gpu_addr); |
1119 | /* Force read & write ptr to 0 */ | 1119 | /* Force read & write ptr to 0 */ |
1120 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); | 1120 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); |
1121 | WREG32(RADEON_CP_RB_RPTR_WR, 0); | 1121 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
1122 | cp->wptr = 0; | 1122 | ring->wptr = 0; |
1123 | WREG32(RADEON_CP_RB_WPTR, cp->wptr); | 1123 | WREG32(RADEON_CP_RB_WPTR, ring->wptr); |
1124 | 1124 | ||
1125 | /* set the wb address whether it's enabled or not */ | 1125 | /* set the wb address whether it's enabled or not */ |
1126 | WREG32(R_00070C_CP_RB_RPTR_ADDR, | 1126 | WREG32(R_00070C_CP_RB_RPTR_ADDR, |
@@ -1136,7 +1136,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1136 | 1136 | ||
1137 | WREG32(RADEON_CP_RB_CNTL, tmp); | 1137 | WREG32(RADEON_CP_RB_CNTL, tmp); |
1138 | udelay(10); | 1138 | udelay(10); |
1139 | cp->rptr = RREG32(RADEON_CP_RB_RPTR); | 1139 | ring->rptr = RREG32(RADEON_CP_RB_RPTR); |
1140 | /* Set cp mode to bus mastering & enable cp*/ | 1140 | /* Set cp mode to bus mastering & enable cp*/ |
1141 | WREG32(RADEON_CP_CSQ_MODE, | 1141 | WREG32(RADEON_CP_CSQ_MODE, |
1142 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | | 1142 | REG_SET(RADEON_INDIRECT2_START, indirect2_start) | |
@@ -1145,12 +1145,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1145 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); | 1145 | WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); |
1146 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); | 1146 | WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); |
1147 | radeon_ring_start(rdev); | 1147 | radeon_ring_start(rdev); |
1148 | r = radeon_ring_test(rdev, cp); | 1148 | r = radeon_ring_test(rdev, ring); |
1149 | if (r) { | 1149 | if (r) { |
1150 | DRM_ERROR("radeon: cp isn't working (%d).\n", r); | 1150 | DRM_ERROR("radeon: cp isn't working (%d).\n", r); |
1151 | return r; | 1151 | return r; |
1152 | } | 1152 | } |
1153 | cp->ready = true; | 1153 | ring->ready = true; |
1154 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); | 1154 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1155 | return 0; | 1155 | return 0; |
1156 | } | 1156 | } |
@@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
1162 | } | 1162 | } |
1163 | /* Disable ring */ | 1163 | /* Disable ring */ |
1164 | r100_cp_disable(rdev); | 1164 | r100_cp_disable(rdev); |
1165 | radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 1165 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1166 | DRM_INFO("radeon: cp finalized\n"); | 1166 | DRM_INFO("radeon: cp finalized\n"); |
1167 | } | 1167 | } |
1168 | 1168 | ||
@@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev) | |||
1170 | { | 1170 | { |
1171 | /* Disable ring */ | 1171 | /* Disable ring */ |
1172 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | 1172 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1173 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1173 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1174 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1174 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1175 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1175 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
1176 | WREG32(R_000770_SCRATCH_UMSK, 0); | 1176 | WREG32(R_000770_SCRATCH_UMSK, 0); |
@@ -2107,9 +2107,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) | |||
2107 | return -1; | 2107 | return -1; |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp) | 2110 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring) |
2111 | { | 2111 | { |
2112 | lockup->last_cp_rptr = cp->rptr; | 2112 | lockup->last_cp_rptr = ring->rptr; |
2113 | lockup->last_jiffies = jiffies; | 2113 | lockup->last_jiffies = jiffies; |
2114 | } | 2114 | } |
2115 | 2115 | ||
@@ -2134,20 +2134,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp | |||
2134 | * false positive when CP is just gived nothing to do. | 2134 | * false positive when CP is just gived nothing to do. |
2135 | * | 2135 | * |
2136 | **/ | 2136 | **/ |
2137 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp) | 2137 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring) |
2138 | { | 2138 | { |
2139 | unsigned long cjiffies, elapsed; | 2139 | unsigned long cjiffies, elapsed; |
2140 | 2140 | ||
2141 | cjiffies = jiffies; | 2141 | cjiffies = jiffies; |
2142 | if (!time_after(cjiffies, lockup->last_jiffies)) { | 2142 | if (!time_after(cjiffies, lockup->last_jiffies)) { |
2143 | /* likely a wrap around */ | 2143 | /* likely a wrap around */ |
2144 | lockup->last_cp_rptr = cp->rptr; | 2144 | lockup->last_cp_rptr = ring->rptr; |
2145 | lockup->last_jiffies = jiffies; | 2145 | lockup->last_jiffies = jiffies; |
2146 | return false; | 2146 | return false; |
2147 | } | 2147 | } |
2148 | if (cp->rptr != lockup->last_cp_rptr) { | 2148 | if (ring->rptr != lockup->last_cp_rptr) { |
2149 | /* CP is still working no lockup */ | 2149 | /* CP is still working no lockup */ |
2150 | lockup->last_cp_rptr = cp->rptr; | 2150 | lockup->last_cp_rptr = ring->rptr; |
2151 | lockup->last_jiffies = jiffies; | 2151 | lockup->last_jiffies = jiffies; |
2152 | return false; | 2152 | return false; |
2153 | } | 2153 | } |
@@ -2160,26 +2160,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l | |||
2160 | return false; | 2160 | return false; |
2161 | } | 2161 | } |
2162 | 2162 | ||
2163 | bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | 2163 | bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
2164 | { | 2164 | { |
2165 | u32 rbbm_status; | 2165 | u32 rbbm_status; |
2166 | int r; | 2166 | int r; |
2167 | 2167 | ||
2168 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); | 2168 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); |
2169 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { | 2169 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { |
2170 | r100_gpu_lockup_update(&rdev->config.r100.lockup, cp); | 2170 | r100_gpu_lockup_update(&rdev->config.r100.lockup, ring); |
2171 | return false; | 2171 | return false; |
2172 | } | 2172 | } |
2173 | /* force CP activities */ | 2173 | /* force CP activities */ |
2174 | r = radeon_ring_lock(rdev, cp, 2); | 2174 | r = radeon_ring_lock(rdev, ring, 2); |
2175 | if (!r) { | 2175 | if (!r) { |
2176 | /* PACKET2 NOP */ | 2176 | /* PACKET2 NOP */ |
2177 | radeon_ring_write(cp, 0x80000000); | 2177 | radeon_ring_write(ring, 0x80000000); |
2178 | radeon_ring_write(cp, 0x80000000); | 2178 | radeon_ring_write(ring, 0x80000000); |
2179 | radeon_ring_unlock_commit(rdev, cp); | 2179 | radeon_ring_unlock_commit(rdev, ring); |
2180 | } | 2180 | } |
2181 | cp->rptr = RREG32(cp->rptr_reg); | 2181 | ring->rptr = RREG32(ring->rptr_reg); |
2182 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, cp); | 2182 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring); |
2183 | } | 2183 | } |
2184 | 2184 | ||
2185 | void r100_bm_disable(struct radeon_device *rdev) | 2185 | void r100_bm_disable(struct radeon_device *rdev) |
@@ -2587,22 +2587,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data) | |||
2587 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 2587 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
2588 | struct drm_device *dev = node->minor->dev; | 2588 | struct drm_device *dev = node->minor->dev; |
2589 | struct radeon_device *rdev = dev->dev_private; | 2589 | struct radeon_device *rdev = dev->dev_private; |
2590 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 2590 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2591 | uint32_t rdp, wdp; | 2591 | uint32_t rdp, wdp; |
2592 | unsigned count, i, j; | 2592 | unsigned count, i, j; |
2593 | 2593 | ||
2594 | radeon_ring_free_size(rdev, cp); | 2594 | radeon_ring_free_size(rdev, ring); |
2595 | rdp = RREG32(RADEON_CP_RB_RPTR); | 2595 | rdp = RREG32(RADEON_CP_RB_RPTR); |
2596 | wdp = RREG32(RADEON_CP_RB_WPTR); | 2596 | wdp = RREG32(RADEON_CP_RB_WPTR); |
2597 | count = (rdp + cp->ring_size - wdp) & cp->ptr_mask; | 2597 | count = (rdp + ring->ring_size - wdp) & ring->ptr_mask; |
2598 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); | 2598 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); |
2599 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); | 2599 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); |
2600 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); | 2600 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); |
2601 | seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw); | 2601 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
2602 | seq_printf(m, "%u dwords in ring\n", count); | 2602 | seq_printf(m, "%u dwords in ring\n", count); |
2603 | for (j = 0; j <= count; j++) { | 2603 | for (j = 0; j <= count; j++) { |
2604 | i = (rdp + j) & cp->ptr_mask; | 2604 | i = (rdp + j) & ring->ptr_mask; |
2605 | seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]); | 2605 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); |
2606 | } | 2606 | } |
2607 | return 0; | 2607 | return 0; |
2608 | } | 2608 | } |
@@ -3644,7 +3644,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3644 | } | 3644 | } |
3645 | } | 3645 | } |
3646 | 3646 | ||
3647 | int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | 3647 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3648 | { | 3648 | { |
3649 | uint32_t scratch; | 3649 | uint32_t scratch; |
3650 | uint32_t tmp = 0; | 3650 | uint32_t tmp = 0; |
@@ -3657,15 +3657,15 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | |||
3657 | return r; | 3657 | return r; |
3658 | } | 3658 | } |
3659 | WREG32(scratch, 0xCAFEDEAD); | 3659 | WREG32(scratch, 0xCAFEDEAD); |
3660 | r = radeon_ring_lock(rdev, cp, 2); | 3660 | r = radeon_ring_lock(rdev, ring, 2); |
3661 | if (r) { | 3661 | if (r) { |
3662 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 3662 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
3663 | radeon_scratch_free(rdev, scratch); | 3663 | radeon_scratch_free(rdev, scratch); |
3664 | return r; | 3664 | return r; |
3665 | } | 3665 | } |
3666 | radeon_ring_write(cp, PACKET0(scratch, 0)); | 3666 | radeon_ring_write(ring, PACKET0(scratch, 0)); |
3667 | radeon_ring_write(cp, 0xDEADBEEF); | 3667 | radeon_ring_write(ring, 0xDEADBEEF); |
3668 | radeon_ring_unlock_commit(rdev, cp); | 3668 | radeon_ring_unlock_commit(rdev, ring); |
3669 | for (i = 0; i < rdev->usec_timeout; i++) { | 3669 | for (i = 0; i < rdev->usec_timeout; i++) { |
3670 | tmp = RREG32(scratch); | 3670 | tmp = RREG32(scratch); |
3671 | if (tmp == 0xDEADBEEF) { | 3671 | if (tmp == 0xDEADBEEF) { |
@@ -3686,11 +3686,11 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | |||
3686 | 3686 | ||
3687 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 3687 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
3688 | { | 3688 | { |
3689 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 3689 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3690 | 3690 | ||
3691 | radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1)); | 3691 | radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1)); |
3692 | radeon_ring_write(cp, ib->gpu_addr); | 3692 | radeon_ring_write(ring, ib->gpu_addr); |
3693 | radeon_ring_write(cp, ib->length_dw); | 3693 | radeon_ring_write(ring, ib->length_dw); |
3694 | } | 3694 | } |
3695 | 3695 | ||
3696 | int r100_ib_test(struct radeon_device *rdev) | 3696 | int r100_ib_test(struct radeon_device *rdev) |
@@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | |||
3778 | /* Shutdown CP we shouldn't need to do that but better be safe than | 3778 | /* Shutdown CP we shouldn't need to do that but better be safe than |
3779 | * sorry | 3779 | * sorry |
3780 | */ | 3780 | */ |
3781 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 3781 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
3782 | WREG32(R_000740_CP_CSQ_CNTL, 0); | 3782 | WREG32(R_000740_CP_CSQ_CNTL, 0); |
3783 | 3783 | ||
3784 | /* Save few CRTC registers */ | 3784 | /* Save few CRTC registers */ |
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index d59c727a8e0f..eba4cbfa78f6 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c | |||
@@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
87 | unsigned num_gpu_pages, | 87 | unsigned num_gpu_pages, |
88 | struct radeon_fence *fence) | 88 | struct radeon_fence *fence) |
89 | { | 89 | { |
90 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 90 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
91 | uint32_t size; | 91 | uint32_t size; |
92 | uint32_t cur_size; | 92 | uint32_t cur_size; |
93 | int i, num_loops; | 93 | int i, num_loops; |
@@ -96,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev, | |||
96 | /* radeon pitch is /64 */ | 96 | /* radeon pitch is /64 */ |
97 | size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; | 97 | size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT; |
98 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); | 98 | num_loops = DIV_ROUND_UP(size, 0x1FFFFF); |
99 | r = radeon_ring_lock(rdev, cp, num_loops * 4 + 64); | 99 | r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); |
100 | if (r) { | 100 | if (r) { |
101 | DRM_ERROR("radeon: moving bo (%d).\n", r); | 101 | DRM_ERROR("radeon: moving bo (%d).\n", r); |
102 | return r; | 102 | return r; |
103 | } | 103 | } |
104 | /* Must wait for 2D idle & clean before DMA or hangs might happen */ | 104 | /* Must wait for 2D idle & clean before DMA or hangs might happen */ |
105 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 105 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
106 | radeon_ring_write(cp, (1 << 16)); | 106 | radeon_ring_write(ring, (1 << 16)); |
107 | for (i = 0; i < num_loops; i++) { | 107 | for (i = 0; i < num_loops; i++) { |
108 | cur_size = size; | 108 | cur_size = size; |
109 | if (cur_size > 0x1FFFFF) { | 109 | if (cur_size > 0x1FFFFF) { |
110 | cur_size = 0x1FFFFF; | 110 | cur_size = 0x1FFFFF; |
111 | } | 111 | } |
112 | size -= cur_size; | 112 | size -= cur_size; |
113 | radeon_ring_write(cp, PACKET0(0x720, 2)); | 113 | radeon_ring_write(ring, PACKET0(0x720, 2)); |
114 | radeon_ring_write(cp, src_offset); | 114 | radeon_ring_write(ring, src_offset); |
115 | radeon_ring_write(cp, dst_offset); | 115 | radeon_ring_write(ring, dst_offset); |
116 | radeon_ring_write(cp, cur_size | (1 << 31) | (1 << 30)); | 116 | radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30)); |
117 | src_offset += cur_size; | 117 | src_offset += cur_size; |
118 | dst_offset += cur_size; | 118 | dst_offset += cur_size; |
119 | } | 119 | } |
120 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 120 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
121 | radeon_ring_write(cp, RADEON_WAIT_DMA_GUI_IDLE); | 121 | radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE); |
122 | if (fence) { | 122 | if (fence) { |
123 | r = radeon_fence_emit(rdev, fence); | 123 | r = radeon_fence_emit(rdev, fence); |
124 | } | 124 | } |
125 | radeon_ring_unlock_commit(rdev, cp); | 125 | radeon_ring_unlock_commit(rdev, ring); |
126 | return r; | 126 | return r; |
127 | } | 127 | } |
128 | 128 | ||
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 66ff35f394ce..6a96b31b558f 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -175,40 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) | |||
175 | void r300_fence_ring_emit(struct radeon_device *rdev, | 175 | void r300_fence_ring_emit(struct radeon_device *rdev, |
176 | struct radeon_fence *fence) | 176 | struct radeon_fence *fence) |
177 | { | 177 | { |
178 | struct radeon_cp *cp = &rdev->cp[fence->ring]; | 178 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
179 | 179 | ||
180 | /* Who ever call radeon_fence_emit should call ring_lock and ask | 180 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
181 | * for enough space (today caller are ib schedule and buffer move) */ | 181 | * for enough space (today caller are ib schedule and buffer move) */ |
182 | /* Write SC register so SC & US assert idle */ | 182 | /* Write SC register so SC & US assert idle */ |
183 | radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_TL, 0)); | 183 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); |
184 | radeon_ring_write(cp, 0); | 184 | radeon_ring_write(ring, 0); |
185 | radeon_ring_write(cp, PACKET0(R300_RE_SCISSORS_BR, 0)); | 185 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); |
186 | radeon_ring_write(cp, 0); | 186 | radeon_ring_write(ring, 0); |
187 | /* Flush 3D cache */ | 187 | /* Flush 3D cache */ |
188 | radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 188 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
189 | radeon_ring_write(cp, R300_RB3D_DC_FLUSH); | 189 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH); |
190 | radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 190 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
191 | radeon_ring_write(cp, R300_ZC_FLUSH); | 191 | radeon_ring_write(ring, R300_ZC_FLUSH); |
192 | /* Wait until IDLE & CLEAN */ | 192 | /* Wait until IDLE & CLEAN */ |
193 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 193 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
194 | radeon_ring_write(cp, (RADEON_WAIT_3D_IDLECLEAN | | 194 | radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN | |
195 | RADEON_WAIT_2D_IDLECLEAN | | 195 | RADEON_WAIT_2D_IDLECLEAN | |
196 | RADEON_WAIT_DMA_GUI_IDLE)); | 196 | RADEON_WAIT_DMA_GUI_IDLE)); |
197 | radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | 197 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
198 | radeon_ring_write(cp, rdev->config.r300.hdp_cntl | | 198 | radeon_ring_write(ring, rdev->config.r300.hdp_cntl | |
199 | RADEON_HDP_READ_BUFFER_INVALIDATE); | 199 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
200 | radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | 200 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
201 | radeon_ring_write(cp, rdev->config.r300.hdp_cntl); | 201 | radeon_ring_write(ring, rdev->config.r300.hdp_cntl); |
202 | /* Emit fence sequence & fire IRQ */ | 202 | /* Emit fence sequence & fire IRQ */ |
203 | radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); | 203 | radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); |
204 | radeon_ring_write(cp, fence->seq); | 204 | radeon_ring_write(ring, fence->seq); |
205 | radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0)); | 205 | radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
206 | radeon_ring_write(cp, RADEON_SW_INT_FIRE); | 206 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
207 | } | 207 | } |
208 | 208 | ||
209 | void r300_ring_start(struct radeon_device *rdev) | 209 | void r300_ring_start(struct radeon_device *rdev) |
210 | { | 210 | { |
211 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 211 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
212 | unsigned gb_tile_config; | 212 | unsigned gb_tile_config; |
213 | int r; | 213 | int r; |
214 | 214 | ||
@@ -230,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev) | |||
230 | break; | 230 | break; |
231 | } | 231 | } |
232 | 232 | ||
233 | r = radeon_ring_lock(rdev, cp, 64); | 233 | r = radeon_ring_lock(rdev, ring, 64); |
234 | if (r) { | 234 | if (r) { |
235 | return; | 235 | return; |
236 | } | 236 | } |
237 | radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0)); | 237 | radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); |
238 | radeon_ring_write(cp, | 238 | radeon_ring_write(ring, |
239 | RADEON_ISYNC_ANY2D_IDLE3D | | 239 | RADEON_ISYNC_ANY2D_IDLE3D | |
240 | RADEON_ISYNC_ANY3D_IDLE2D | | 240 | RADEON_ISYNC_ANY3D_IDLE2D | |
241 | RADEON_ISYNC_WAIT_IDLEGUI | | 241 | RADEON_ISYNC_WAIT_IDLEGUI | |
242 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); | 242 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
243 | radeon_ring_write(cp, PACKET0(R300_GB_TILE_CONFIG, 0)); | 243 | radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); |
244 | radeon_ring_write(cp, gb_tile_config); | 244 | radeon_ring_write(ring, gb_tile_config); |
245 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 245 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
246 | radeon_ring_write(cp, | 246 | radeon_ring_write(ring, |
247 | RADEON_WAIT_2D_IDLECLEAN | | 247 | RADEON_WAIT_2D_IDLECLEAN | |
248 | RADEON_WAIT_3D_IDLECLEAN); | 248 | RADEON_WAIT_3D_IDLECLEAN); |
249 | radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0)); | 249 | radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
250 | radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG); | 250 | radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); |
251 | radeon_ring_write(cp, PACKET0(R300_GB_SELECT, 0)); | 251 | radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); |
252 | radeon_ring_write(cp, 0); | 252 | radeon_ring_write(ring, 0); |
253 | radeon_ring_write(cp, PACKET0(R300_GB_ENABLE, 0)); | 253 | radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); |
254 | radeon_ring_write(cp, 0); | 254 | radeon_ring_write(ring, 0); |
255 | radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 255 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
256 | radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 256 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
257 | radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 257 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
258 | radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE); | 258 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
259 | radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0)); | 259 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
260 | radeon_ring_write(cp, | 260 | radeon_ring_write(ring, |
261 | RADEON_WAIT_2D_IDLECLEAN | | 261 | RADEON_WAIT_2D_IDLECLEAN | |
262 | RADEON_WAIT_3D_IDLECLEAN); | 262 | RADEON_WAIT_3D_IDLECLEAN); |
263 | radeon_ring_write(cp, PACKET0(R300_GB_AA_CONFIG, 0)); | 263 | radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); |
264 | radeon_ring_write(cp, 0); | 264 | radeon_ring_write(ring, 0); |
265 | radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 265 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
266 | radeon_ring_write(cp, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); | 266 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
267 | radeon_ring_write(cp, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); | 267 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
268 | radeon_ring_write(cp, R300_ZC_FLUSH | R300_ZC_FREE); | 268 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
269 | radeon_ring_write(cp, PACKET0(R300_GB_MSPOS0, 0)); | 269 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); |
270 | radeon_ring_write(cp, | 270 | radeon_ring_write(ring, |
271 | ((6 << R300_MS_X0_SHIFT) | | 271 | ((6 << R300_MS_X0_SHIFT) | |
272 | (6 << R300_MS_Y0_SHIFT) | | 272 | (6 << R300_MS_Y0_SHIFT) | |
273 | (6 << R300_MS_X1_SHIFT) | | 273 | (6 << R300_MS_X1_SHIFT) | |
@@ -276,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev) | |||
276 | (6 << R300_MS_Y2_SHIFT) | | 276 | (6 << R300_MS_Y2_SHIFT) | |
277 | (6 << R300_MSBD0_Y_SHIFT) | | 277 | (6 << R300_MSBD0_Y_SHIFT) | |
278 | (6 << R300_MSBD0_X_SHIFT))); | 278 | (6 << R300_MSBD0_X_SHIFT))); |
279 | radeon_ring_write(cp, PACKET0(R300_GB_MSPOS1, 0)); | 279 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); |
280 | radeon_ring_write(cp, | 280 | radeon_ring_write(ring, |
281 | ((6 << R300_MS_X3_SHIFT) | | 281 | ((6 << R300_MS_X3_SHIFT) | |
282 | (6 << R300_MS_Y3_SHIFT) | | 282 | (6 << R300_MS_Y3_SHIFT) | |
283 | (6 << R300_MS_X4_SHIFT) | | 283 | (6 << R300_MS_X4_SHIFT) | |
@@ -285,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev) | |||
285 | (6 << R300_MS_X5_SHIFT) | | 285 | (6 << R300_MS_X5_SHIFT) | |
286 | (6 << R300_MS_Y5_SHIFT) | | 286 | (6 << R300_MS_Y5_SHIFT) | |
287 | (6 << R300_MSBD1_SHIFT))); | 287 | (6 << R300_MSBD1_SHIFT))); |
288 | radeon_ring_write(cp, PACKET0(R300_GA_ENHANCE, 0)); | 288 | radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); |
289 | radeon_ring_write(cp, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); | 289 | radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
290 | radeon_ring_write(cp, PACKET0(R300_GA_POLY_MODE, 0)); | 290 | radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); |
291 | radeon_ring_write(cp, | 291 | radeon_ring_write(ring, |
292 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); | 292 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
293 | radeon_ring_write(cp, PACKET0(R300_GA_ROUND_MODE, 0)); | 293 | radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); |
294 | radeon_ring_write(cp, | 294 | radeon_ring_write(ring, |
295 | R300_GEOMETRY_ROUND_NEAREST | | 295 | R300_GEOMETRY_ROUND_NEAREST | |
296 | R300_COLOR_ROUND_NEAREST); | 296 | R300_COLOR_ROUND_NEAREST); |
297 | radeon_ring_unlock_commit(rdev, cp); | 297 | radeon_ring_unlock_commit(rdev, ring); |
298 | } | 298 | } |
299 | 299 | ||
300 | void r300_errata(struct radeon_device *rdev) | 300 | void r300_errata(struct radeon_device *rdev) |
@@ -378,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev) | |||
378 | rdev->num_gb_pipes, rdev->num_z_pipes); | 378 | rdev->num_gb_pipes, rdev->num_z_pipes); |
379 | } | 379 | } |
380 | 380 | ||
381 | bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | 381 | bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
382 | { | 382 | { |
383 | u32 rbbm_status; | 383 | u32 rbbm_status; |
384 | int r; | 384 | int r; |
385 | 385 | ||
386 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); | 386 | rbbm_status = RREG32(R_000E40_RBBM_STATUS); |
387 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { | 387 | if (!G_000E40_GUI_ACTIVE(rbbm_status)) { |
388 | r100_gpu_lockup_update(&rdev->config.r300.lockup, cp); | 388 | r100_gpu_lockup_update(&rdev->config.r300.lockup, ring); |
389 | return false; | 389 | return false; |
390 | } | 390 | } |
391 | /* force CP activities */ | 391 | /* force CP activities */ |
392 | r = radeon_ring_lock(rdev, cp, 2); | 392 | r = radeon_ring_lock(rdev, ring, 2); |
393 | if (!r) { | 393 | if (!r) { |
394 | /* PACKET2 NOP */ | 394 | /* PACKET2 NOP */ |
395 | radeon_ring_write(cp, 0x80000000); | 395 | radeon_ring_write(ring, 0x80000000); |
396 | radeon_ring_write(cp, 0x80000000); | 396 | radeon_ring_write(ring, 0x80000000); |
397 | radeon_ring_unlock_commit(rdev, cp); | 397 | radeon_ring_unlock_commit(rdev, ring); |
398 | } | 398 | } |
399 | cp->rptr = RREG32(RADEON_CP_RB_RPTR); | 399 | ring->rptr = RREG32(RADEON_CP_RB_RPTR); |
400 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, cp); | 400 | return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring); |
401 | } | 401 | } |
402 | 402 | ||
403 | int r300_asic_reset(struct radeon_device *rdev) | 403 | int r300_asic_reset(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 62e860436999..1d3231f3a090 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev) | |||
199 | 199 | ||
200 | static void r420_cp_errata_init(struct radeon_device *rdev) | 200 | static void r420_cp_errata_init(struct radeon_device *rdev) |
201 | { | 201 | { |
202 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 202 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
203 | 203 | ||
204 | /* RV410 and R420 can lock up if CP DMA to host memory happens | 204 | /* RV410 and R420 can lock up if CP DMA to host memory happens |
205 | * while the 2D engine is busy. | 205 | * while the 2D engine is busy. |
@@ -208,24 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev) | |||
208 | * of the CP init, apparently. | 208 | * of the CP init, apparently. |
209 | */ | 209 | */ |
210 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); | 210 | radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch); |
211 | radeon_ring_lock(rdev, cp, 8); | 211 | radeon_ring_lock(rdev, ring, 8); |
212 | radeon_ring_write(cp, PACKET0(R300_CP_RESYNC_ADDR, 1)); | 212 | radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); |
213 | radeon_ring_write(cp, rdev->config.r300.resync_scratch); | 213 | radeon_ring_write(ring, rdev->config.r300.resync_scratch); |
214 | radeon_ring_write(cp, 0xDEADBEEF); | 214 | radeon_ring_write(ring, 0xDEADBEEF); |
215 | radeon_ring_unlock_commit(rdev, cp); | 215 | radeon_ring_unlock_commit(rdev, ring); |
216 | } | 216 | } |
217 | 217 | ||
218 | static void r420_cp_errata_fini(struct radeon_device *rdev) | 218 | static void r420_cp_errata_fini(struct radeon_device *rdev) |
219 | { | 219 | { |
220 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 220 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
221 | 221 | ||
222 | /* Catch the RESYNC we dispatched all the way back, | 222 | /* Catch the RESYNC we dispatched all the way back, |
223 | * at the very beginning of the CP init. | 223 | * at the very beginning of the CP init. |
224 | */ | 224 | */ |
225 | radeon_ring_lock(rdev, cp, 8); | 225 | radeon_ring_lock(rdev, ring, 8); |
226 | radeon_ring_write(cp, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); | 226 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
227 | radeon_ring_write(cp, R300_RB3D_DC_FINISH); | 227 | radeon_ring_write(ring, R300_RB3D_DC_FINISH); |
228 | radeon_ring_unlock_commit(rdev, cp); | 228 | radeon_ring_unlock_commit(rdev, ring); |
229 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); | 229 | radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); |
230 | } | 230 | } |
231 | 231 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index aaf8cd42943e..d26e7c9f047c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) | |||
1344 | return 0; | 1344 | return 0; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | 1347 | bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
1348 | { | 1348 | { |
1349 | u32 srbm_status; | 1349 | u32 srbm_status; |
1350 | u32 grbm_status; | 1350 | u32 grbm_status; |
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp) | |||
1361 | grbm_status = RREG32(R_008010_GRBM_STATUS); | 1361 | grbm_status = RREG32(R_008010_GRBM_STATUS); |
1362 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); | 1362 | grbm_status2 = RREG32(R_008014_GRBM_STATUS2); |
1363 | if (!G_008010_GUI_ACTIVE(grbm_status)) { | 1363 | if (!G_008010_GUI_ACTIVE(grbm_status)) { |
1364 | r100_gpu_lockup_update(lockup, cp); | 1364 | r100_gpu_lockup_update(lockup, ring); |
1365 | return false; | 1365 | return false; |
1366 | } | 1366 | } |
1367 | /* force CP activities */ | 1367 | /* force CP activities */ |
1368 | r = radeon_ring_lock(rdev, cp, 2); | 1368 | r = radeon_ring_lock(rdev, ring, 2); |
1369 | if (!r) { | 1369 | if (!r) { |
1370 | /* PACKET2 NOP */ | 1370 | /* PACKET2 NOP */ |
1371 | radeon_ring_write(cp, 0x80000000); | 1371 | radeon_ring_write(ring, 0x80000000); |
1372 | radeon_ring_write(cp, 0x80000000); | 1372 | radeon_ring_write(ring, 0x80000000); |
1373 | radeon_ring_unlock_commit(rdev, cp); | 1373 | radeon_ring_unlock_commit(rdev, ring); |
1374 | } | 1374 | } |
1375 | cp->rptr = RREG32(cp->rptr_reg); | 1375 | ring->rptr = RREG32(ring->rptr_reg); |
1376 | return r100_gpu_cp_is_lockup(rdev, lockup, cp); | 1376 | return r100_gpu_cp_is_lockup(rdev, lockup, ring); |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | int r600_asic_reset(struct radeon_device *rdev) | 1379 | int r600_asic_reset(struct radeon_device *rdev) |
@@ -2144,28 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev) | |||
2144 | 2144 | ||
2145 | int r600_cp_start(struct radeon_device *rdev) | 2145 | int r600_cp_start(struct radeon_device *rdev) |
2146 | { | 2146 | { |
2147 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 2147 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2148 | int r; | 2148 | int r; |
2149 | uint32_t cp_me; | 2149 | uint32_t cp_me; |
2150 | 2150 | ||
2151 | r = radeon_ring_lock(rdev, cp, 7); | 2151 | r = radeon_ring_lock(rdev, ring, 7); |
2152 | if (r) { | 2152 | if (r) { |
2153 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 2153 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
2154 | return r; | 2154 | return r; |
2155 | } | 2155 | } |
2156 | radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 2156 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
2157 | radeon_ring_write(cp, 0x1); | 2157 | radeon_ring_write(ring, 0x1); |
2158 | if (rdev->family >= CHIP_RV770) { | 2158 | if (rdev->family >= CHIP_RV770) { |
2159 | radeon_ring_write(cp, 0x0); | 2159 | radeon_ring_write(ring, 0x0); |
2160 | radeon_ring_write(cp, rdev->config.rv770.max_hw_contexts - 1); | 2160 | radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1); |
2161 | } else { | 2161 | } else { |
2162 | radeon_ring_write(cp, 0x3); | 2162 | radeon_ring_write(ring, 0x3); |
2163 | radeon_ring_write(cp, rdev->config.r600.max_hw_contexts - 1); | 2163 | radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1); |
2164 | } | 2164 | } |
2165 | radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 2165 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
2166 | radeon_ring_write(cp, 0); | 2166 | radeon_ring_write(ring, 0); |
2167 | radeon_ring_write(cp, 0); | 2167 | radeon_ring_write(ring, 0); |
2168 | radeon_ring_unlock_commit(rdev, cp); | 2168 | radeon_ring_unlock_commit(rdev, ring); |
2169 | 2169 | ||
2170 | cp_me = 0xff; | 2170 | cp_me = 0xff; |
2171 | WREG32(R_0086D8_CP_ME_CNTL, cp_me); | 2171 | WREG32(R_0086D8_CP_ME_CNTL, cp_me); |
@@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev) | |||
2174 | 2174 | ||
2175 | int r600_cp_resume(struct radeon_device *rdev) | 2175 | int r600_cp_resume(struct radeon_device *rdev) |
2176 | { | 2176 | { |
2177 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 2177 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2178 | u32 tmp; | 2178 | u32 tmp; |
2179 | u32 rb_bufsz; | 2179 | u32 rb_bufsz; |
2180 | int r; | 2180 | int r; |
@@ -2186,7 +2186,7 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2186 | WREG32(GRBM_SOFT_RESET, 0); | 2186 | WREG32(GRBM_SOFT_RESET, 0); |
2187 | 2187 | ||
2188 | /* Set ring buffer size */ | 2188 | /* Set ring buffer size */ |
2189 | rb_bufsz = drm_order(cp->ring_size / 8); | 2189 | rb_bufsz = drm_order(ring->ring_size / 8); |
2190 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 2190 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
2191 | #ifdef __BIG_ENDIAN | 2191 | #ifdef __BIG_ENDIAN |
2192 | tmp |= BUF_SWAP_32BIT; | 2192 | tmp |= BUF_SWAP_32BIT; |
@@ -2200,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2200 | /* Initialize the ring buffer's read and write pointers */ | 2200 | /* Initialize the ring buffer's read and write pointers */ |
2201 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); | 2201 | WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); |
2202 | WREG32(CP_RB_RPTR_WR, 0); | 2202 | WREG32(CP_RB_RPTR_WR, 0); |
2203 | cp->wptr = 0; | 2203 | ring->wptr = 0; |
2204 | WREG32(CP_RB_WPTR, cp->wptr); | 2204 | WREG32(CP_RB_WPTR, ring->wptr); |
2205 | 2205 | ||
2206 | /* set the wb address whether it's enabled or not */ | 2206 | /* set the wb address whether it's enabled or not */ |
2207 | WREG32(CP_RB_RPTR_ADDR, | 2207 | WREG32(CP_RB_RPTR_ADDR, |
@@ -2219,36 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev) | |||
2219 | mdelay(1); | 2219 | mdelay(1); |
2220 | WREG32(CP_RB_CNTL, tmp); | 2220 | WREG32(CP_RB_CNTL, tmp); |
2221 | 2221 | ||
2222 | WREG32(CP_RB_BASE, cp->gpu_addr >> 8); | 2222 | WREG32(CP_RB_BASE, ring->gpu_addr >> 8); |
2223 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); | 2223 | WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); |
2224 | 2224 | ||
2225 | cp->rptr = RREG32(CP_RB_RPTR); | 2225 | ring->rptr = RREG32(CP_RB_RPTR); |
2226 | 2226 | ||
2227 | r600_cp_start(rdev); | 2227 | r600_cp_start(rdev); |
2228 | cp->ready = true; | 2228 | ring->ready = true; |
2229 | r = radeon_ring_test(rdev, cp); | 2229 | r = radeon_ring_test(rdev, ring); |
2230 | if (r) { | 2230 | if (r) { |
2231 | cp->ready = false; | 2231 | ring->ready = false; |
2232 | return r; | 2232 | return r; |
2233 | } | 2233 | } |
2234 | return 0; | 2234 | return 0; |
2235 | } | 2235 | } |
2236 | 2236 | ||
2237 | void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size) | 2237 | void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) |
2238 | { | 2238 | { |
2239 | u32 rb_bufsz; | 2239 | u32 rb_bufsz; |
2240 | 2240 | ||
2241 | /* Align ring size */ | 2241 | /* Align ring size */ |
2242 | rb_bufsz = drm_order(ring_size / 8); | 2242 | rb_bufsz = drm_order(ring_size / 8); |
2243 | ring_size = (1 << (rb_bufsz + 1)) * 4; | 2243 | ring_size = (1 << (rb_bufsz + 1)) * 4; |
2244 | cp->ring_size = ring_size; | 2244 | ring->ring_size = ring_size; |
2245 | cp->align_mask = 16 - 1; | 2245 | ring->align_mask = 16 - 1; |
2246 | } | 2246 | } |
2247 | 2247 | ||
2248 | void r600_cp_fini(struct radeon_device *rdev) | 2248 | void r600_cp_fini(struct radeon_device *rdev) |
2249 | { | 2249 | { |
2250 | r600_cp_stop(rdev); | 2250 | r600_cp_stop(rdev); |
2251 | radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 2251 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
2252 | } | 2252 | } |
2253 | 2253 | ||
2254 | 2254 | ||
@@ -2267,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev) | |||
2267 | } | 2267 | } |
2268 | } | 2268 | } |
2269 | 2269 | ||
2270 | int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | 2270 | int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
2271 | { | 2271 | { |
2272 | uint32_t scratch; | 2272 | uint32_t scratch; |
2273 | uint32_t tmp = 0; | 2273 | uint32_t tmp = 0; |
2274 | unsigned i, ridx = radeon_ring_index(rdev, cp); | 2274 | unsigned i, ridx = radeon_ring_index(rdev, ring); |
2275 | int r; | 2275 | int r; |
2276 | 2276 | ||
2277 | r = radeon_scratch_get(rdev, &scratch); | 2277 | r = radeon_scratch_get(rdev, &scratch); |
@@ -2280,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | |||
2280 | return r; | 2280 | return r; |
2281 | } | 2281 | } |
2282 | WREG32(scratch, 0xCAFEDEAD); | 2282 | WREG32(scratch, 0xCAFEDEAD); |
2283 | r = radeon_ring_lock(rdev, cp, 3); | 2283 | r = radeon_ring_lock(rdev, ring, 3); |
2284 | if (r) { | 2284 | if (r) { |
2285 | DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); | 2285 | DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r); |
2286 | radeon_scratch_free(rdev, scratch); | 2286 | radeon_scratch_free(rdev, scratch); |
2287 | return r; | 2287 | return r; |
2288 | } | 2288 | } |
2289 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2289 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2290 | radeon_ring_write(cp, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 2290 | radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
2291 | radeon_ring_write(cp, 0xDEADBEEF); | 2291 | radeon_ring_write(ring, 0xDEADBEEF); |
2292 | radeon_ring_unlock_commit(rdev, cp); | 2292 | radeon_ring_unlock_commit(rdev, ring); |
2293 | for (i = 0; i < rdev->usec_timeout; i++) { | 2293 | for (i = 0; i < rdev->usec_timeout; i++) { |
2294 | tmp = RREG32(scratch); | 2294 | tmp = RREG32(scratch); |
2295 | if (tmp == 0xDEADBEEF) | 2295 | if (tmp == 0xDEADBEEF) |
@@ -2310,62 +2310,62 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp) | |||
2310 | void r600_fence_ring_emit(struct radeon_device *rdev, | 2310 | void r600_fence_ring_emit(struct radeon_device *rdev, |
2311 | struct radeon_fence *fence) | 2311 | struct radeon_fence *fence) |
2312 | { | 2312 | { |
2313 | struct radeon_cp *cp = &rdev->cp[fence->ring]; | 2313 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
2314 | 2314 | ||
2315 | if (rdev->wb.use_event) { | 2315 | if (rdev->wb.use_event) { |
2316 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + | 2316 | u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + |
2317 | (u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base); | 2317 | (u64)(rdev->fence_drv[fence->ring].scratch_reg - rdev->scratch.reg_base); |
2318 | /* flush read cache over gart */ | 2318 | /* flush read cache over gart */ |
2319 | radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 2319 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
2320 | radeon_ring_write(cp, PACKET3_TC_ACTION_ENA | | 2320 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | |
2321 | PACKET3_VC_ACTION_ENA | | 2321 | PACKET3_VC_ACTION_ENA | |
2322 | PACKET3_SH_ACTION_ENA); | 2322 | PACKET3_SH_ACTION_ENA); |
2323 | radeon_ring_write(cp, 0xFFFFFFFF); | 2323 | radeon_ring_write(ring, 0xFFFFFFFF); |
2324 | radeon_ring_write(cp, 0); | 2324 | radeon_ring_write(ring, 0); |
2325 | radeon_ring_write(cp, 10); /* poll interval */ | 2325 | radeon_ring_write(ring, 10); /* poll interval */ |
2326 | /* EVENT_WRITE_EOP - flush caches, send int */ | 2326 | /* EVENT_WRITE_EOP - flush caches, send int */ |
2327 | radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | 2327 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); |
2328 | radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); | 2328 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); |
2329 | radeon_ring_write(cp, addr & 0xffffffff); | 2329 | radeon_ring_write(ring, addr & 0xffffffff); |
2330 | radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); | 2330 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); |
2331 | radeon_ring_write(cp, fence->seq); | 2331 | radeon_ring_write(ring, fence->seq); |
2332 | radeon_ring_write(cp, 0); | 2332 | radeon_ring_write(ring, 0); |
2333 | } else { | 2333 | } else { |
2334 | /* flush read cache over gart */ | 2334 | /* flush read cache over gart */ |
2335 | radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 2335 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
2336 | radeon_ring_write(cp, PACKET3_TC_ACTION_ENA | | 2336 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | |
2337 | PACKET3_VC_ACTION_ENA | | 2337 | PACKET3_VC_ACTION_ENA | |
2338 | PACKET3_SH_ACTION_ENA); | 2338 | PACKET3_SH_ACTION_ENA); |
2339 | radeon_ring_write(cp, 0xFFFFFFFF); | 2339 | radeon_ring_write(ring, 0xFFFFFFFF); |
2340 | radeon_ring_write(cp, 0); | 2340 | radeon_ring_write(ring, 0); |
2341 | radeon_ring_write(cp, 10); /* poll interval */ | 2341 | radeon_ring_write(ring, 10); /* poll interval */ |
2342 | radeon_ring_write(cp, PACKET3(PACKET3_EVENT_WRITE, 0)); | 2342 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); |
2343 | radeon_ring_write(cp, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); | 2343 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0)); |
2344 | /* wait for 3D idle clean */ | 2344 | /* wait for 3D idle clean */ |
2345 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2345 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2346 | radeon_ring_write(cp, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 2346 | radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
2347 | radeon_ring_write(cp, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); | 2347 | radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); |
2348 | /* Emit fence sequence & fire IRQ */ | 2348 | /* Emit fence sequence & fire IRQ */ |
2349 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 2349 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2350 | radeon_ring_write(cp, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 2350 | radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
2351 | radeon_ring_write(cp, fence->seq); | 2351 | radeon_ring_write(ring, fence->seq); |
2352 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | 2352 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ |
2353 | radeon_ring_write(cp, PACKET0(CP_INT_STATUS, 0)); | 2353 | radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0)); |
2354 | radeon_ring_write(cp, RB_INT_STAT); | 2354 | radeon_ring_write(ring, RB_INT_STAT); |
2355 | } | 2355 | } |
2356 | } | 2356 | } |
2357 | 2357 | ||
2358 | void r600_semaphore_ring_emit(struct radeon_device *rdev, | 2358 | void r600_semaphore_ring_emit(struct radeon_device *rdev, |
2359 | struct radeon_cp *cp, | 2359 | struct radeon_ring *ring, |
2360 | struct radeon_semaphore *semaphore, | 2360 | struct radeon_semaphore *semaphore, |
2361 | bool emit_wait) | 2361 | bool emit_wait) |
2362 | { | 2362 | { |
2363 | uint64_t addr = semaphore->gpu_addr; | 2363 | uint64_t addr = semaphore->gpu_addr; |
2364 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; | 2364 | unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; |
2365 | 2365 | ||
2366 | radeon_ring_write(cp, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); | 2366 | radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); |
2367 | radeon_ring_write(cp, addr & 0xffffffff); | 2367 | radeon_ring_write(ring, addr & 0xffffffff); |
2368 | radeon_ring_write(cp, (upper_32_bits(addr) & 0xff) | sel); | 2368 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
2369 | } | 2369 | } |
2370 | 2370 | ||
2371 | int r600_copy_blit(struct radeon_device *rdev, | 2371 | int r600_copy_blit(struct radeon_device *rdev, |
@@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg) | |||
2420 | 2420 | ||
2421 | int r600_startup(struct radeon_device *rdev) | 2421 | int r600_startup(struct radeon_device *rdev) |
2422 | { | 2422 | { |
2423 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 2423 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
2424 | int r; | 2424 | int r; |
2425 | 2425 | ||
2426 | /* enable pcie gen2 link */ | 2426 | /* enable pcie gen2 link */ |
@@ -2468,7 +2468,7 @@ int r600_startup(struct radeon_device *rdev) | |||
2468 | } | 2468 | } |
2469 | r600_irq_set(rdev); | 2469 | r600_irq_set(rdev); |
2470 | 2470 | ||
2471 | r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 2471 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
2472 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); | 2472 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); |
2473 | 2473 | ||
2474 | if (r) | 2474 | if (r) |
@@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev) | |||
2534 | r600_audio_fini(rdev); | 2534 | r600_audio_fini(rdev); |
2535 | /* FIXME: we should wait for ring to be empty */ | 2535 | /* FIXME: we should wait for ring to be empty */ |
2536 | r600_cp_stop(rdev); | 2536 | r600_cp_stop(rdev); |
2537 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 2537 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
2538 | r600_irq_suspend(rdev); | 2538 | r600_irq_suspend(rdev); |
2539 | radeon_wb_disable(rdev); | 2539 | radeon_wb_disable(rdev); |
2540 | r600_pcie_gart_disable(rdev); | 2540 | r600_pcie_gart_disable(rdev); |
@@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev) | |||
2609 | if (r) | 2609 | if (r) |
2610 | return r; | 2610 | return r; |
2611 | 2611 | ||
2612 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 2612 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
2613 | r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 2613 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
2614 | 2614 | ||
2615 | rdev->ih.ring_obj = NULL; | 2615 | rdev->ih.ring_obj = NULL; |
2616 | r600_ih_ring_init(rdev, 64 * 1024); | 2616 | r600_ih_ring_init(rdev, 64 * 1024); |
@@ -2677,17 +2677,17 @@ void r600_fini(struct radeon_device *rdev) | |||
2677 | */ | 2677 | */ |
2678 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | 2678 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) |
2679 | { | 2679 | { |
2680 | struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; | 2680 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
2681 | 2681 | ||
2682 | /* FIXME: implement */ | 2682 | /* FIXME: implement */ |
2683 | radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 2683 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
2684 | radeon_ring_write(cp, | 2684 | radeon_ring_write(ring, |
2685 | #ifdef __BIG_ENDIAN | 2685 | #ifdef __BIG_ENDIAN |
2686 | (2 << 0) | | 2686 | (2 << 0) | |
2687 | #endif | 2687 | #endif |
2688 | (ib->gpu_addr & 0xFFFFFFFC)); | 2688 | (ib->gpu_addr & 0xFFFFFFFC)); |
2689 | radeon_ring_write(cp, upper_32_bits(ib->gpu_addr) & 0xFF); | 2689 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); |
2690 | radeon_ring_write(cp, ib->length_dw); | 2690 | radeon_ring_write(ring, ib->length_dw); |
2691 | } | 2691 | } |
2692 | 2692 | ||
2693 | int r600_ib_test(struct radeon_device *rdev, int ring) | 2693 | int r600_ib_test(struct radeon_device *rdev, int ring) |
@@ -3518,22 +3518,22 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) | |||
3518 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 3518 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
3519 | struct drm_device *dev = node->minor->dev; | 3519 | struct drm_device *dev = node->minor->dev; |
3520 | struct radeon_device *rdev = dev->dev_private; | 3520 | struct radeon_device *rdev = dev->dev_private; |
3521 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 3521 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3522 | unsigned count, i, j; | 3522 | unsigned count, i, j; |
3523 | 3523 | ||
3524 | radeon_ring_free_size(rdev, cp); | 3524 | radeon_ring_free_size(rdev, ring); |
3525 | count = (cp->ring_size / 4) - cp->ring_free_dw; | 3525 | count = (ring->ring_size / 4) - ring->ring_free_dw; |
3526 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); | 3526 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); |
3527 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); | 3527 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); |
3528 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); | 3528 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); |
3529 | seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", cp->wptr); | 3529 | seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", ring->wptr); |
3530 | seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", cp->rptr); | 3530 | seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", ring->rptr); |
3531 | seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw); | 3531 | seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); |
3532 | seq_printf(m, "%u dwords in ring\n", count); | 3532 | seq_printf(m, "%u dwords in ring\n", count); |
3533 | i = cp->rptr; | 3533 | i = ring->rptr; |
3534 | for (j = 0; j <= count; j++) { | 3534 | for (j = 0; j <= count; j++) { |
3535 | seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]); | 3535 | seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); |
3536 | i = (i + 1) & cp->ptr_mask; | 3536 | i = (i + 1) & ring->ptr_mask; |
3537 | } | 3537 | } |
3538 | return 0; | 3538 | return 0; |
3539 | } | 3539 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 62dd1c281c76..02a757470291 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -50,7 +50,7 @@ static void | |||
50 | set_render_target(struct radeon_device *rdev, int format, | 50 | set_render_target(struct radeon_device *rdev, int format, |
51 | int w, int h, u64 gpu_addr) | 51 | int w, int h, u64 gpu_addr) |
52 | { | 52 | { |
53 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 53 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
54 | u32 cb_color_info; | 54 | u32 cb_color_info; |
55 | int pitch, slice; | 55 | int pitch, slice; |
56 | 56 | ||
@@ -64,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format, | |||
64 | pitch = (w / 8) - 1; | 64 | pitch = (w / 8) - 1; |
65 | slice = ((w * h) / 64) - 1; | 65 | slice = ((w * h) / 64) - 1; |
66 | 66 | ||
67 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 67 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
68 | radeon_ring_write(cp, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 68 | radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
69 | radeon_ring_write(cp, gpu_addr >> 8); | 69 | radeon_ring_write(ring, gpu_addr >> 8); |
70 | 70 | ||
71 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { | 71 | if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { |
72 | radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); | 72 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); |
73 | radeon_ring_write(cp, 2 << 0); | 73 | radeon_ring_write(ring, 2 << 0); |
74 | } | 74 | } |
75 | 75 | ||
76 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 76 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
77 | radeon_ring_write(cp, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 77 | radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
78 | radeon_ring_write(cp, (pitch << 0) | (slice << 10)); | 78 | radeon_ring_write(ring, (pitch << 0) | (slice << 10)); |
79 | 79 | ||
80 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 80 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
81 | radeon_ring_write(cp, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 81 | radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
82 | radeon_ring_write(cp, 0); | 82 | radeon_ring_write(ring, 0); |
83 | 83 | ||
84 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 84 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
85 | radeon_ring_write(cp, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 85 | radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
86 | radeon_ring_write(cp, cb_color_info); | 86 | radeon_ring_write(ring, cb_color_info); |
87 | 87 | ||
88 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 88 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
89 | radeon_ring_write(cp, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 89 | radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
90 | radeon_ring_write(cp, 0); | 90 | radeon_ring_write(ring, 0); |
91 | 91 | ||
92 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 92 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
93 | radeon_ring_write(cp, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 93 | radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
94 | radeon_ring_write(cp, 0); | 94 | radeon_ring_write(ring, 0); |
95 | 95 | ||
96 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 96 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
97 | radeon_ring_write(cp, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 97 | radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
98 | radeon_ring_write(cp, 0); | 98 | radeon_ring_write(ring, 0); |
99 | } | 99 | } |
100 | 100 | ||
101 | /* emits 5dw */ | 101 | /* emits 5dw */ |
@@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev, | |||
104 | u32 sync_type, u32 size, | 104 | u32 sync_type, u32 size, |
105 | u64 mc_addr) | 105 | u64 mc_addr) |
106 | { | 106 | { |
107 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 107 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
108 | u32 cp_coher_size; | 108 | u32 cp_coher_size; |
109 | 109 | ||
110 | if (size == 0xffffffff) | 110 | if (size == 0xffffffff) |
@@ -112,18 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev, | |||
112 | else | 112 | else |
113 | cp_coher_size = ((size + 255) >> 8); | 113 | cp_coher_size = ((size + 255) >> 8); |
114 | 114 | ||
115 | radeon_ring_write(cp, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 115 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
116 | radeon_ring_write(cp, sync_type); | 116 | radeon_ring_write(ring, sync_type); |
117 | radeon_ring_write(cp, cp_coher_size); | 117 | radeon_ring_write(ring, cp_coher_size); |
118 | radeon_ring_write(cp, mc_addr >> 8); | 118 | radeon_ring_write(ring, mc_addr >> 8); |
119 | radeon_ring_write(cp, 10); /* poll interval */ | 119 | radeon_ring_write(ring, 10); /* poll interval */ |
120 | } | 120 | } |
121 | 121 | ||
122 | /* emits 21dw + 1 surface sync = 26dw */ | 122 | /* emits 21dw + 1 surface sync = 26dw */ |
123 | static void | 123 | static void |
124 | set_shaders(struct radeon_device *rdev) | 124 | set_shaders(struct radeon_device *rdev) |
125 | { | 125 | { |
126 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 126 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
127 | u64 gpu_addr; | 127 | u64 gpu_addr; |
128 | u32 sq_pgm_resources; | 128 | u32 sq_pgm_resources; |
129 | 129 | ||
@@ -132,35 +132,35 @@ set_shaders(struct radeon_device *rdev) | |||
132 | 132 | ||
133 | /* VS */ | 133 | /* VS */ |
134 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | 134 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; |
135 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 135 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
136 | radeon_ring_write(cp, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 136 | radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
137 | radeon_ring_write(cp, gpu_addr >> 8); | 137 | radeon_ring_write(ring, gpu_addr >> 8); |
138 | 138 | ||
139 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 139 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
140 | radeon_ring_write(cp, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 140 | radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
141 | radeon_ring_write(cp, sq_pgm_resources); | 141 | radeon_ring_write(ring, sq_pgm_resources); |
142 | 142 | ||
143 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 143 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
144 | radeon_ring_write(cp, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 144 | radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
145 | radeon_ring_write(cp, 0); | 145 | radeon_ring_write(ring, 0); |
146 | 146 | ||
147 | /* PS */ | 147 | /* PS */ |
148 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; | 148 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; |
149 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 149 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
150 | radeon_ring_write(cp, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 150 | radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
151 | radeon_ring_write(cp, gpu_addr >> 8); | 151 | radeon_ring_write(ring, gpu_addr >> 8); |
152 | 152 | ||
153 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 153 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
154 | radeon_ring_write(cp, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 154 | radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
155 | radeon_ring_write(cp, sq_pgm_resources | (1 << 28)); | 155 | radeon_ring_write(ring, sq_pgm_resources | (1 << 28)); |
156 | 156 | ||
157 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 157 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
158 | radeon_ring_write(cp, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 158 | radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
159 | radeon_ring_write(cp, 2); | 159 | radeon_ring_write(ring, 2); |
160 | 160 | ||
161 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); | 161 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); |
162 | radeon_ring_write(cp, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 162 | radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
163 | radeon_ring_write(cp, 0); | 163 | radeon_ring_write(ring, 0); |
164 | 164 | ||
165 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; | 165 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; |
166 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); | 166 | cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); |
@@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev) | |||
170 | static void | 170 | static void |
171 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | 171 | set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) |
172 | { | 172 | { |
173 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 173 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
174 | u32 sq_vtx_constant_word2; | 174 | u32 sq_vtx_constant_word2; |
175 | 175 | ||
176 | sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | | 176 | sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | |
@@ -179,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) | |||
179 | sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); | 179 | sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32); |
180 | #endif | 180 | #endif |
181 | 181 | ||
182 | radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 7)); | 182 | radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); |
183 | radeon_ring_write(cp, 0x460); | 183 | radeon_ring_write(ring, 0x460); |
184 | radeon_ring_write(cp, gpu_addr & 0xffffffff); | 184 | radeon_ring_write(ring, gpu_addr & 0xffffffff); |
185 | radeon_ring_write(cp, 48 - 1); | 185 | radeon_ring_write(ring, 48 - 1); |
186 | radeon_ring_write(cp, sq_vtx_constant_word2); | 186 | radeon_ring_write(ring, sq_vtx_constant_word2); |
187 | radeon_ring_write(cp, 1 << 0); | 187 | radeon_ring_write(ring, 1 << 0); |
188 | radeon_ring_write(cp, 0); | 188 | radeon_ring_write(ring, 0); |
189 | radeon_ring_write(cp, 0); | 189 | radeon_ring_write(ring, 0); |
190 | radeon_ring_write(cp, SQ_TEX_VTX_VALID_BUFFER << 30); | 190 | radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30); |
191 | 191 | ||
192 | if ((rdev->family == CHIP_RV610) || | 192 | if ((rdev->family == CHIP_RV610) || |
193 | (rdev->family == CHIP_RV620) || | 193 | (rdev->family == CHIP_RV620) || |
@@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev, | |||
207 | int format, int w, int h, int pitch, | 207 | int format, int w, int h, int pitch, |
208 | u64 gpu_addr, u32 size) | 208 | u64 gpu_addr, u32 size) |
209 | { | 209 | { |
210 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 210 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
211 | uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; | 211 | uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; |
212 | 212 | ||
213 | if (h < 1) | 213 | if (h < 1) |
@@ -230,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev, | |||
230 | cp_set_surface_sync(rdev, | 230 | cp_set_surface_sync(rdev, |
231 | PACKET3_TC_ACTION_ENA, size, gpu_addr); | 231 | PACKET3_TC_ACTION_ENA, size, gpu_addr); |
232 | 232 | ||
233 | radeon_ring_write(cp, PACKET3(PACKET3_SET_RESOURCE, 7)); | 233 | radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7)); |
234 | radeon_ring_write(cp, 0); | 234 | radeon_ring_write(ring, 0); |
235 | radeon_ring_write(cp, sq_tex_resource_word0); | 235 | radeon_ring_write(ring, sq_tex_resource_word0); |
236 | radeon_ring_write(cp, sq_tex_resource_word1); | 236 | radeon_ring_write(ring, sq_tex_resource_word1); |
237 | radeon_ring_write(cp, gpu_addr >> 8); | 237 | radeon_ring_write(ring, gpu_addr >> 8); |
238 | radeon_ring_write(cp, gpu_addr >> 8); | 238 | radeon_ring_write(ring, gpu_addr >> 8); |
239 | radeon_ring_write(cp, sq_tex_resource_word4); | 239 | radeon_ring_write(ring, sq_tex_resource_word4); |
240 | radeon_ring_write(cp, 0); | 240 | radeon_ring_write(ring, 0); |
241 | radeon_ring_write(cp, SQ_TEX_VTX_VALID_TEXTURE << 30); | 241 | radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30); |
242 | } | 242 | } |
243 | 243 | ||
244 | /* emits 12 */ | 244 | /* emits 12 */ |
@@ -246,45 +246,45 @@ static void | |||
246 | set_scissors(struct radeon_device *rdev, int x1, int y1, | 246 | set_scissors(struct radeon_device *rdev, int x1, int y1, |
247 | int x2, int y2) | 247 | int x2, int y2) |
248 | { | 248 | { |
249 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 249 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
250 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 250 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
251 | radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 251 | radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
252 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16)); | 252 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16)); |
253 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 253 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
254 | 254 | ||
255 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 255 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
256 | radeon_ring_write(cp, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 256 | radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
257 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31)); | 257 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); |
258 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 258 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
259 | 259 | ||
260 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); | 260 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); |
261 | radeon_ring_write(cp, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); | 261 | radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); |
262 | radeon_ring_write(cp, (x1 << 0) | (y1 << 16) | (1 << 31)); | 262 | radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31)); |
263 | radeon_ring_write(cp, (x2 << 0) | (y2 << 16)); | 263 | radeon_ring_write(ring, (x2 << 0) | (y2 << 16)); |
264 | } | 264 | } |
265 | 265 | ||
266 | /* emits 10 */ | 266 | /* emits 10 */ |
267 | static void | 267 | static void |
268 | draw_auto(struct radeon_device *rdev) | 268 | draw_auto(struct radeon_device *rdev) |
269 | { | 269 | { |
270 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 270 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
271 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 271 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
272 | radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 272 | radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
273 | radeon_ring_write(cp, DI_PT_RECTLIST); | 273 | radeon_ring_write(ring, DI_PT_RECTLIST); |
274 | 274 | ||
275 | radeon_ring_write(cp, PACKET3(PACKET3_INDEX_TYPE, 0)); | 275 | radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0)); |
276 | radeon_ring_write(cp, | 276 | radeon_ring_write(ring, |
277 | #ifdef __BIG_ENDIAN | 277 | #ifdef __BIG_ENDIAN |
278 | (2 << 2) | | 278 | (2 << 2) | |
279 | #endif | 279 | #endif |
280 | DI_INDEX_SIZE_16_BIT); | 280 | DI_INDEX_SIZE_16_BIT); |
281 | 281 | ||
282 | radeon_ring_write(cp, PACKET3(PACKET3_NUM_INSTANCES, 0)); | 282 | radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0)); |
283 | radeon_ring_write(cp, 1); | 283 | radeon_ring_write(ring, 1); |
284 | 284 | ||
285 | radeon_ring_write(cp, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); | 285 | radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); |
286 | radeon_ring_write(cp, 3); | 286 | radeon_ring_write(ring, 3); |
287 | radeon_ring_write(cp, DI_SRC_SEL_AUTO_INDEX); | 287 | radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX); |
288 | 288 | ||
289 | } | 289 | } |
290 | 290 | ||
@@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev) | |||
292 | static void | 292 | static void |
293 | set_default_state(struct radeon_device *rdev) | 293 | set_default_state(struct radeon_device *rdev) |
294 | { | 294 | { |
295 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 295 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
296 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; | 296 | u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; |
297 | u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; | 297 | u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; |
298 | int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; | 298 | int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; |
@@ -448,24 +448,24 @@ set_default_state(struct radeon_device *rdev) | |||
448 | /* emit an IB pointing at default state */ | 448 | /* emit an IB pointing at default state */ |
449 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); | 449 | dwords = ALIGN(rdev->r600_blit.state_len, 0x10); |
450 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; | 450 | gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; |
451 | radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | 451 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
452 | radeon_ring_write(cp, | 452 | radeon_ring_write(ring, |
453 | #ifdef __BIG_ENDIAN | 453 | #ifdef __BIG_ENDIAN |
454 | (2 << 0) | | 454 | (2 << 0) | |
455 | #endif | 455 | #endif |
456 | (gpu_addr & 0xFFFFFFFC)); | 456 | (gpu_addr & 0xFFFFFFFC)); |
457 | radeon_ring_write(cp, upper_32_bits(gpu_addr) & 0xFF); | 457 | radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF); |
458 | radeon_ring_write(cp, dwords); | 458 | radeon_ring_write(ring, dwords); |
459 | 459 | ||
460 | /* SQ config */ | 460 | /* SQ config */ |
461 | radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 6)); | 461 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6)); |
462 | radeon_ring_write(cp, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); | 462 | radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); |
463 | radeon_ring_write(cp, sq_config); | 463 | radeon_ring_write(ring, sq_config); |
464 | radeon_ring_write(cp, sq_gpr_resource_mgmt_1); | 464 | radeon_ring_write(ring, sq_gpr_resource_mgmt_1); |
465 | radeon_ring_write(cp, sq_gpr_resource_mgmt_2); | 465 | radeon_ring_write(ring, sq_gpr_resource_mgmt_2); |
466 | radeon_ring_write(cp, sq_thread_resource_mgmt); | 466 | radeon_ring_write(ring, sq_thread_resource_mgmt); |
467 | radeon_ring_write(cp, sq_stack_resource_mgmt_1); | 467 | radeon_ring_write(ring, sq_stack_resource_mgmt_1); |
468 | radeon_ring_write(cp, sq_stack_resource_mgmt_2); | 468 | radeon_ring_write(ring, sq_stack_resource_mgmt_2); |
469 | } | 469 | } |
470 | 470 | ||
471 | static uint32_t i2f(uint32_t input) | 471 | static uint32_t i2f(uint32_t input) |
@@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages, | |||
687 | 687 | ||
688 | int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) | 688 | int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) |
689 | { | 689 | { |
690 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 690 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
691 | int r; | 691 | int r; |
692 | int ring_size; | 692 | int ring_size; |
693 | int num_loops = 0; | 693 | int num_loops = 0; |
@@ -708,7 +708,7 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) | |||
708 | /* calculate number of loops correctly */ | 708 | /* calculate number of loops correctly */ |
709 | ring_size = num_loops * dwords_per_loop; | 709 | ring_size = num_loops * dwords_per_loop; |
710 | ring_size += rdev->r600_blit.ring_size_common; | 710 | ring_size += rdev->r600_blit.ring_size_common; |
711 | r = radeon_ring_lock(rdev, cp, ring_size); | 711 | r = radeon_ring_lock(rdev, ring, ring_size); |
712 | if (r) | 712 | if (r) |
713 | return r; | 713 | return r; |
714 | 714 | ||
@@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) | |||
727 | if (fence) | 727 | if (fence) |
728 | r = radeon_fence_emit(rdev, fence); | 728 | r = radeon_fence_emit(rdev, fence); |
729 | 729 | ||
730 | radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 730 | radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
731 | } | 731 | } |
732 | 732 | ||
733 | void r600_kms_blit_copy(struct radeon_device *rdev, | 733 | void r600_kms_blit_copy(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index ea0cdf9ad61c..014065af60dc 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -231,7 +231,7 @@ int radeon_fence_count_emitted(struct radeon_device *rdev, int ring); | |||
231 | /* | 231 | /* |
232 | * Semaphores. | 232 | * Semaphores. |
233 | */ | 233 | */ |
234 | struct radeon_cp; | 234 | struct radeon_ring; |
235 | 235 | ||
236 | struct radeon_semaphore_driver { | 236 | struct radeon_semaphore_driver { |
237 | rwlock_t lock; | 237 | rwlock_t lock; |
@@ -485,7 +485,7 @@ void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); | |||
485 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); | 485 | void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); |
486 | 486 | ||
487 | /* | 487 | /* |
488 | * CP & ring. | 488 | * CP & rings. |
489 | */ | 489 | */ |
490 | 490 | ||
491 | /* max number of rings */ | 491 | /* max number of rings */ |
@@ -522,7 +522,7 @@ struct radeon_ib_pool { | |||
522 | unsigned head_id; | 522 | unsigned head_id; |
523 | }; | 523 | }; |
524 | 524 | ||
525 | struct radeon_cp { | 525 | struct radeon_ring { |
526 | struct radeon_bo *ring_obj; | 526 | struct radeon_bo *ring_obj; |
527 | volatile uint32_t *ring; | 527 | volatile uint32_t *ring; |
528 | unsigned rptr; | 528 | unsigned rptr; |
@@ -600,17 +600,17 @@ void radeon_ib_pool_fini(struct radeon_device *rdev); | |||
600 | int radeon_ib_test(struct radeon_device *rdev); | 600 | int radeon_ib_test(struct radeon_device *rdev); |
601 | extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); | 601 | extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); |
602 | /* Ring access between begin & end cannot sleep */ | 602 | /* Ring access between begin & end cannot sleep */ |
603 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp); | 603 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); |
604 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp); | 604 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); |
605 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); | 605 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
606 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); | 606 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
607 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp); | 607 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); |
608 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp); | 608 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); |
609 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp); | 609 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); |
610 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_cp *cp); | 610 | int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
611 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size, | 611 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, |
612 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg); | 612 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg); |
613 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp); | 613 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); |
614 | 614 | ||
615 | 615 | ||
616 | /* | 616 | /* |
@@ -915,8 +915,8 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number); | |||
915 | */ | 915 | */ |
916 | void radeon_test_moves(struct radeon_device *rdev); | 916 | void radeon_test_moves(struct radeon_device *rdev); |
917 | void radeon_test_ring_sync(struct radeon_device *rdev, | 917 | void radeon_test_ring_sync(struct radeon_device *rdev, |
918 | struct radeon_cp *cpA, | 918 | struct radeon_ring *cpA, |
919 | struct radeon_cp *cpB); | 919 | struct radeon_ring *cpB); |
920 | void radeon_test_syncing(struct radeon_device *rdev); | 920 | void radeon_test_syncing(struct radeon_device *rdev); |
921 | 921 | ||
922 | 922 | ||
@@ -943,7 +943,7 @@ struct radeon_asic { | |||
943 | int (*resume)(struct radeon_device *rdev); | 943 | int (*resume)(struct radeon_device *rdev); |
944 | int (*suspend)(struct radeon_device *rdev); | 944 | int (*suspend)(struct radeon_device *rdev); |
945 | void (*vga_set_state)(struct radeon_device *rdev, bool state); | 945 | void (*vga_set_state)(struct radeon_device *rdev, bool state); |
946 | bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_cp *cp); | 946 | bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); |
947 | int (*asic_reset)(struct radeon_device *rdev); | 947 | int (*asic_reset)(struct radeon_device *rdev); |
948 | void (*gart_tlb_flush)(struct radeon_device *rdev); | 948 | void (*gart_tlb_flush)(struct radeon_device *rdev); |
949 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); | 949 | int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
@@ -955,11 +955,11 @@ struct radeon_asic { | |||
955 | struct { | 955 | struct { |
956 | void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); | 956 | void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
957 | void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); | 957 | void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); |
958 | void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_cp *cp, | 958 | void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, |
959 | struct radeon_semaphore *semaphore, bool emit_wait); | 959 | struct radeon_semaphore *semaphore, bool emit_wait); |
960 | } ring[RADEON_NUM_RINGS]; | 960 | } ring[RADEON_NUM_RINGS]; |
961 | 961 | ||
962 | int (*ring_test)(struct radeon_device *rdev, struct radeon_cp *cp); | 962 | int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); |
963 | int (*irq_set)(struct radeon_device *rdev); | 963 | int (*irq_set)(struct radeon_device *rdev); |
964 | int (*irq_process)(struct radeon_device *rdev); | 964 | int (*irq_process)(struct radeon_device *rdev); |
965 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); | 965 | u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
@@ -1293,7 +1293,7 @@ struct radeon_device { | |||
1293 | rwlock_t fence_lock; | 1293 | rwlock_t fence_lock; |
1294 | struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; | 1294 | struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; |
1295 | struct radeon_semaphore_driver semaphore_drv; | 1295 | struct radeon_semaphore_driver semaphore_drv; |
1296 | struct radeon_cp cp[RADEON_NUM_RINGS]; | 1296 | struct radeon_ring ring[RADEON_NUM_RINGS]; |
1297 | struct radeon_ib_pool ib_pool; | 1297 | struct radeon_ib_pool ib_pool; |
1298 | struct radeon_irq irq; | 1298 | struct radeon_irq irq; |
1299 | struct radeon_asic *asic; | 1299 | struct radeon_asic *asic; |
@@ -1476,16 +1476,16 @@ void radeon_atombios_fini(struct radeon_device *rdev); | |||
1476 | * RING helpers. | 1476 | * RING helpers. |
1477 | */ | 1477 | */ |
1478 | #if DRM_DEBUG_CODE == 0 | 1478 | #if DRM_DEBUG_CODE == 0 |
1479 | static inline void radeon_ring_write(struct radeon_cp *cp, uint32_t v) | 1479 | static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
1480 | { | 1480 | { |
1481 | cp->ring[cp->wptr++] = v; | 1481 | ring->ring[ring->wptr++] = v; |
1482 | cp->wptr &= cp->ptr_mask; | 1482 | ring->wptr &= ring->ptr_mask; |
1483 | cp->count_dw--; | 1483 | ring->count_dw--; |
1484 | cp->ring_free_dw--; | 1484 | ring->ring_free_dw--; |
1485 | } | 1485 | } |
1486 | #else | 1486 | #else |
1487 | /* With debugging this is just too big to inline */ | 1487 | /* With debugging this is just too big to inline */ |
1488 | void radeon_ring_write(struct radeon_cp *cp, uint32_t v); | 1488 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v); |
1489 | #endif | 1489 | #endif |
1490 | 1490 | ||
1491 | /* | 1491 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 4f9ba6d0330f..a462fd9a2627 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -58,7 +58,7 @@ void r100_fini(struct radeon_device *rdev); | |||
58 | int r100_suspend(struct radeon_device *rdev); | 58 | int r100_suspend(struct radeon_device *rdev); |
59 | int r100_resume(struct radeon_device *rdev); | 59 | int r100_resume(struct radeon_device *rdev); |
60 | void r100_vga_set_state(struct radeon_device *rdev, bool state); | 60 | void r100_vga_set_state(struct radeon_device *rdev, bool state); |
61 | bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp); | 61 | bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
62 | int r100_asic_reset(struct radeon_device *rdev); | 62 | int r100_asic_reset(struct radeon_device *rdev); |
63 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); | 63 | u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
64 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); | 64 | void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
@@ -69,7 +69,7 @@ int r100_irq_process(struct radeon_device *rdev); | |||
69 | void r100_fence_ring_emit(struct radeon_device *rdev, | 69 | void r100_fence_ring_emit(struct radeon_device *rdev, |
70 | struct radeon_fence *fence); | 70 | struct radeon_fence *fence); |
71 | void r100_semaphore_ring_emit(struct radeon_device *rdev, | 71 | void r100_semaphore_ring_emit(struct radeon_device *rdev, |
72 | struct radeon_cp *cp, | 72 | struct radeon_ring *cp, |
73 | struct radeon_semaphore *semaphore, | 73 | struct radeon_semaphore *semaphore, |
74 | bool emit_wait); | 74 | bool emit_wait); |
75 | int r100_cs_parse(struct radeon_cs_parser *p); | 75 | int r100_cs_parse(struct radeon_cs_parser *p); |
@@ -86,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, | |||
86 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg); | 86 | void r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
87 | void r100_bandwidth_update(struct radeon_device *rdev); | 87 | void r100_bandwidth_update(struct radeon_device *rdev); |
88 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 88 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
89 | int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp); | 89 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
90 | void r100_hpd_init(struct radeon_device *rdev); | 90 | void r100_hpd_init(struct radeon_device *rdev); |
91 | void r100_hpd_fini(struct radeon_device *rdev); | 91 | void r100_hpd_fini(struct radeon_device *rdev); |
92 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | 92 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
@@ -104,10 +104,10 @@ void r100_pci_gart_disable(struct radeon_device *rdev); | |||
104 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); | 104 | int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
105 | int r100_gui_wait_for_idle(struct radeon_device *rdev); | 105 | int r100_gui_wait_for_idle(struct radeon_device *rdev); |
106 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, | 106 | void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, |
107 | struct radeon_cp *cp); | 107 | struct radeon_ring *cp); |
108 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, | 108 | bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, |
109 | struct r100_gpu_lockup *lockup, | 109 | struct r100_gpu_lockup *lockup, |
110 | struct radeon_cp *cp); | 110 | struct radeon_ring *cp); |
111 | void r100_ib_fini(struct radeon_device *rdev); | 111 | void r100_ib_fini(struct radeon_device *rdev); |
112 | int r100_ib_init(struct radeon_device *rdev); | 112 | int r100_ib_init(struct radeon_device *rdev); |
113 | void r100_irq_disable(struct radeon_device *rdev); | 113 | void r100_irq_disable(struct radeon_device *rdev); |
@@ -157,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev); | |||
157 | extern void r300_fini(struct radeon_device *rdev); | 157 | extern void r300_fini(struct radeon_device *rdev); |
158 | extern int r300_suspend(struct radeon_device *rdev); | 158 | extern int r300_suspend(struct radeon_device *rdev); |
159 | extern int r300_resume(struct radeon_device *rdev); | 159 | extern int r300_resume(struct radeon_device *rdev); |
160 | extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp); | 160 | extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
161 | extern int r300_asic_reset(struct radeon_device *rdev); | 161 | extern int r300_asic_reset(struct radeon_device *rdev); |
162 | extern void r300_ring_start(struct radeon_device *rdev); | 162 | extern void r300_ring_start(struct radeon_device *rdev); |
163 | extern void r300_fence_ring_emit(struct radeon_device *rdev, | 163 | extern void r300_fence_ring_emit(struct radeon_device *rdev, |
@@ -303,10 +303,10 @@ int r600_cs_parse(struct radeon_cs_parser *p); | |||
303 | void r600_fence_ring_emit(struct radeon_device *rdev, | 303 | void r600_fence_ring_emit(struct radeon_device *rdev, |
304 | struct radeon_fence *fence); | 304 | struct radeon_fence *fence); |
305 | void r600_semaphore_ring_emit(struct radeon_device *rdev, | 305 | void r600_semaphore_ring_emit(struct radeon_device *rdev, |
306 | struct radeon_cp *cp, | 306 | struct radeon_ring *cp, |
307 | struct radeon_semaphore *semaphore, | 307 | struct radeon_semaphore *semaphore, |
308 | bool emit_wait); | 308 | bool emit_wait); |
309 | bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp); | 309 | bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
310 | int r600_asic_reset(struct radeon_device *rdev); | 310 | int r600_asic_reset(struct radeon_device *rdev); |
311 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 311 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
312 | uint32_t tiling_flags, uint32_t pitch, | 312 | uint32_t tiling_flags, uint32_t pitch, |
@@ -314,7 +314,7 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, | |||
314 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); | 314 | void r600_clear_surface_reg(struct radeon_device *rdev, int reg); |
315 | int r600_ib_test(struct radeon_device *rdev, int ring); | 315 | int r600_ib_test(struct radeon_device *rdev, int ring); |
316 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 316 | void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
317 | int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp); | 317 | int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
318 | int r600_copy_blit(struct radeon_device *rdev, | 318 | int r600_copy_blit(struct radeon_device *rdev, |
319 | uint64_t src_offset, uint64_t dst_offset, | 319 | uint64_t src_offset, uint64_t dst_offset, |
320 | unsigned num_gpu_pages, struct radeon_fence *fence); | 320 | unsigned num_gpu_pages, struct radeon_fence *fence); |
@@ -334,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev); | |||
334 | bool r600_card_posted(struct radeon_device *rdev); | 334 | bool r600_card_posted(struct radeon_device *rdev); |
335 | void r600_cp_stop(struct radeon_device *rdev); | 335 | void r600_cp_stop(struct radeon_device *rdev); |
336 | int r600_cp_start(struct radeon_device *rdev); | 336 | int r600_cp_start(struct radeon_device *rdev); |
337 | void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size); | 337 | void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size); |
338 | int r600_cp_resume(struct radeon_device *rdev); | 338 | int r600_cp_resume(struct radeon_device *rdev); |
339 | void r600_cp_fini(struct radeon_device *rdev); | 339 | void r600_cp_fini(struct radeon_device *rdev); |
340 | int r600_count_pipe_bits(uint32_t val); | 340 | int r600_count_pipe_bits(uint32_t val); |
@@ -403,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev); | |||
403 | void evergreen_fini(struct radeon_device *rdev); | 403 | void evergreen_fini(struct radeon_device *rdev); |
404 | int evergreen_suspend(struct radeon_device *rdev); | 404 | int evergreen_suspend(struct radeon_device *rdev); |
405 | int evergreen_resume(struct radeon_device *rdev); | 405 | int evergreen_resume(struct radeon_device *rdev); |
406 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp); | 406 | bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
407 | int evergreen_asic_reset(struct radeon_device *rdev); | 407 | int evergreen_asic_reset(struct radeon_device *rdev); |
408 | void evergreen_bandwidth_update(struct radeon_device *rdev); | 408 | void evergreen_bandwidth_update(struct radeon_device *rdev); |
409 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 409 | void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
@@ -434,7 +434,7 @@ int cayman_init(struct radeon_device *rdev); | |||
434 | void cayman_fini(struct radeon_device *rdev); | 434 | void cayman_fini(struct radeon_device *rdev); |
435 | int cayman_suspend(struct radeon_device *rdev); | 435 | int cayman_suspend(struct radeon_device *rdev); |
436 | int cayman_resume(struct radeon_device *rdev); | 436 | int cayman_resume(struct radeon_device *rdev); |
437 | bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp); | 437 | bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); |
438 | int cayman_asic_reset(struct radeon_device *rdev); | 438 | int cayman_asic_reset(struct radeon_device *rdev); |
439 | 439 | ||
440 | #endif | 440 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index fa36b5368d95..e81c333e0f97 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -719,7 +719,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
719 | radeon_mutex_init(&rdev->cs_mutex); | 719 | radeon_mutex_init(&rdev->cs_mutex); |
720 | mutex_init(&rdev->ib_pool.mutex); | 720 | mutex_init(&rdev->ib_pool.mutex); |
721 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | 721 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
722 | mutex_init(&rdev->cp[i].mutex); | 722 | mutex_init(&rdev->ring[i].mutex); |
723 | mutex_init(&rdev->dc_hw_i2c_mutex); | 723 | mutex_init(&rdev->dc_hw_i2c_mutex); |
724 | if (rdev->family >= CHIP_R600) | 724 | if (rdev->family >= CHIP_R600) |
725 | spin_lock_init(&rdev->ih.lock); | 725 | spin_lock_init(&rdev->ih.lock); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 9e7f8921714a..809e66e8a86e 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) | |||
84 | return 0; | 84 | return 0; |
85 | } | 85 | } |
86 | fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); | 86 | fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); |
87 | if (!rdev->cp[fence->ring].ready) | 87 | if (!rdev->ring[fence->ring].ready) |
88 | /* FIXME: cp is not running assume everythings is done right | 88 | /* FIXME: cp is not running assume everythings is done right |
89 | * away | 89 | * away |
90 | */ | 90 | */ |
@@ -269,7 +269,7 @@ retry: | |||
269 | * if we experiencing a lockup the value doesn't change | 269 | * if we experiencing a lockup the value doesn't change |
270 | */ | 270 | */ |
271 | if (seq == rdev->fence_drv[fence->ring].last_seq && | 271 | if (seq == rdev->fence_drv[fence->ring].last_seq && |
272 | radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) { | 272 | radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) { |
273 | /* good news we believe it's a lockup */ | 273 | /* good news we believe it's a lockup */ |
274 | printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", | 274 | printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", |
275 | fence->seq, seq); | 275 | fence->seq, seq); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 1ce8fa71cf73..ae321975283c 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -163,7 +163,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
163 | args->vram_visible -= radeon_fbdev_total_size(rdev); | 163 | args->vram_visible -= radeon_fbdev_total_size(rdev); |
164 | args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; | 164 | args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; |
165 | for(i = 0; i < RADEON_NUM_RINGS; ++i) | 165 | for(i = 0; i < RADEON_NUM_RINGS; ++i) |
166 | args->gart_size -= rdev->cp[i].ring_size; | 166 | args->gart_size -= rdev->ring[i].ring_size; |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
169 | 169 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index aa4a4a209a6a..095148e29a1f 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -253,8 +253,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
253 | mutex_lock(&rdev->ddev->struct_mutex); | 253 | mutex_lock(&rdev->ddev->struct_mutex); |
254 | mutex_lock(&rdev->vram_mutex); | 254 | mutex_lock(&rdev->vram_mutex); |
255 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 255 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
256 | if (rdev->cp[i].ring_obj) | 256 | if (rdev->ring[i].ring_obj) |
257 | mutex_lock(&rdev->cp[i].mutex); | 257 | mutex_lock(&rdev->ring[i].mutex); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* gui idle int has issues on older chips it seems */ | 260 | /* gui idle int has issues on older chips it seems */ |
@@ -271,13 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
271 | radeon_irq_set(rdev); | 271 | radeon_irq_set(rdev); |
272 | } | 272 | } |
273 | } else { | 273 | } else { |
274 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 274 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
275 | if (cp->ready) { | 275 | if (ring->ready) { |
276 | struct radeon_fence *fence; | 276 | struct radeon_fence *fence; |
277 | radeon_ring_alloc(rdev, cp, 64); | 277 | radeon_ring_alloc(rdev, ring, 64); |
278 | radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp)); | 278 | radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring)); |
279 | radeon_fence_emit(rdev, fence); | 279 | radeon_fence_emit(rdev, fence); |
280 | radeon_ring_commit(rdev, cp); | 280 | radeon_ring_commit(rdev, ring); |
281 | radeon_fence_wait(fence, false); | 281 | radeon_fence_wait(fence, false); |
282 | radeon_fence_unref(&fence); | 282 | radeon_fence_unref(&fence); |
283 | } | 283 | } |
@@ -312,8 +312,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) | |||
312 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 312 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
313 | 313 | ||
314 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 314 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
315 | if (rdev->cp[i].ring_obj) | 315 | if (rdev->ring[i].ring_obj) |
316 | mutex_unlock(&rdev->cp[i].mutex); | 316 | mutex_unlock(&rdev->ring[i].mutex); |
317 | } | 317 | } |
318 | mutex_unlock(&rdev->vram_mutex); | 318 | mutex_unlock(&rdev->vram_mutex); |
319 | mutex_unlock(&rdev->ddev->struct_mutex); | 319 | mutex_unlock(&rdev->ddev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index a69cb049d877..52dd22f2596e 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -60,17 +60,17 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) | |||
60 | return idx_value; | 60 | return idx_value; |
61 | } | 61 | } |
62 | 62 | ||
63 | void radeon_ring_write(struct radeon_cp *cp, uint32_t v) | 63 | void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
64 | { | 64 | { |
65 | #if DRM_DEBUG_CODE | 65 | #if DRM_DEBUG_CODE |
66 | if (cp->count_dw <= 0) { | 66 | if (ring->count_dw <= 0) { |
67 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | 67 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); |
68 | } | 68 | } |
69 | #endif | 69 | #endif |
70 | cp->ring[cp->wptr++] = v; | 70 | ring->ring[ring->wptr++] = v; |
71 | cp->wptr &= cp->ptr_mask; | 71 | ring->wptr &= ring->ptr_mask; |
72 | cp->count_dw--; | 72 | ring->count_dw--; |
73 | cp->ring_free_dw--; | 73 | ring->ring_free_dw--; |
74 | } | 74 | } |
75 | 75 | ||
76 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) | 76 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) |
@@ -178,17 +178,17 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
178 | 178 | ||
179 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | 179 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
180 | { | 180 | { |
181 | struct radeon_cp *cp = &rdev->cp[ib->fence->ring]; | 181 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; |
182 | int r = 0; | 182 | int r = 0; |
183 | 183 | ||
184 | if (!ib->length_dw || !cp->ready) { | 184 | if (!ib->length_dw || !ring->ready) { |
185 | /* TODO: Nothings in the ib we should report. */ | 185 | /* TODO: Nothings in the ib we should report. */ |
186 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); | 186 | DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx); |
187 | return -EINVAL; | 187 | return -EINVAL; |
188 | } | 188 | } |
189 | 189 | ||
190 | /* 64 dwords should be enough for fence too */ | 190 | /* 64 dwords should be enough for fence too */ |
191 | r = radeon_ring_lock(rdev, cp, 64); | 191 | r = radeon_ring_lock(rdev, ring, 64); |
192 | if (r) { | 192 | if (r) { |
193 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); | 193 | DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); |
194 | return r; | 194 | return r; |
@@ -199,7 +199,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
199 | /* once scheduled IB is considered free and protected by the fence */ | 199 | /* once scheduled IB is considered free and protected by the fence */ |
200 | ib->free = true; | 200 | ib->free = true; |
201 | mutex_unlock(&rdev->ib_pool.mutex); | 201 | mutex_unlock(&rdev->ib_pool.mutex); |
202 | radeon_ring_unlock_commit(rdev, cp); | 202 | radeon_ring_unlock_commit(rdev, ring); |
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
@@ -284,150 +284,150 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) | |||
284 | /* | 284 | /* |
285 | * Ring. | 285 | * Ring. |
286 | */ | 286 | */ |
287 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp) | 287 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring) |
288 | { | 288 | { |
289 | /* r1xx-r5xx only has CP ring */ | 289 | /* r1xx-r5xx only has CP ring */ |
290 | if (rdev->family < CHIP_R600) | 290 | if (rdev->family < CHIP_R600) |
291 | return RADEON_RING_TYPE_GFX_INDEX; | 291 | return RADEON_RING_TYPE_GFX_INDEX; |
292 | 292 | ||
293 | if (rdev->family >= CHIP_CAYMAN) { | 293 | if (rdev->family >= CHIP_CAYMAN) { |
294 | if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX]) | 294 | if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]) |
295 | return CAYMAN_RING_TYPE_CP1_INDEX; | 295 | return CAYMAN_RING_TYPE_CP1_INDEX; |
296 | else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX]) | 296 | else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]) |
297 | return CAYMAN_RING_TYPE_CP2_INDEX; | 297 | return CAYMAN_RING_TYPE_CP2_INDEX; |
298 | } | 298 | } |
299 | return RADEON_RING_TYPE_GFX_INDEX; | 299 | return RADEON_RING_TYPE_GFX_INDEX; |
300 | } | 300 | } |
301 | 301 | ||
302 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp) | 302 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) |
303 | { | 303 | { |
304 | if (rdev->wb.enabled) | 304 | if (rdev->wb.enabled) |
305 | cp->rptr = le32_to_cpu(rdev->wb.wb[cp->rptr_offs/4]); | 305 | ring->rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
306 | else | 306 | else |
307 | cp->rptr = RREG32(cp->rptr_reg); | 307 | ring->rptr = RREG32(ring->rptr_reg); |
308 | /* This works because ring_size is a power of 2 */ | 308 | /* This works because ring_size is a power of 2 */ |
309 | cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4)); | 309 | ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4)); |
310 | cp->ring_free_dw -= cp->wptr; | 310 | ring->ring_free_dw -= ring->wptr; |
311 | cp->ring_free_dw &= cp->ptr_mask; | 311 | ring->ring_free_dw &= ring->ptr_mask; |
312 | if (!cp->ring_free_dw) { | 312 | if (!ring->ring_free_dw) { |
313 | cp->ring_free_dw = cp->ring_size / 4; | 313 | ring->ring_free_dw = ring->ring_size / 4; |
314 | } | 314 | } |
315 | } | 315 | } |
316 | 316 | ||
317 | 317 | ||
318 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw) | 318 | int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
319 | { | 319 | { |
320 | int r; | 320 | int r; |
321 | 321 | ||
322 | /* Align requested size with padding so unlock_commit can | 322 | /* Align requested size with padding so unlock_commit can |
323 | * pad safely */ | 323 | * pad safely */ |
324 | ndw = (ndw + cp->align_mask) & ~cp->align_mask; | 324 | ndw = (ndw + ring->align_mask) & ~ring->align_mask; |
325 | while (ndw > (cp->ring_free_dw - 1)) { | 325 | while (ndw > (ring->ring_free_dw - 1)) { |
326 | radeon_ring_free_size(rdev, cp); | 326 | radeon_ring_free_size(rdev, ring); |
327 | if (ndw < cp->ring_free_dw) { | 327 | if (ndw < ring->ring_free_dw) { |
328 | break; | 328 | break; |
329 | } | 329 | } |
330 | r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp)); | 330 | r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring)); |
331 | if (r) | 331 | if (r) |
332 | return r; | 332 | return r; |
333 | } | 333 | } |
334 | cp->count_dw = ndw; | 334 | ring->count_dw = ndw; |
335 | cp->wptr_old = cp->wptr; | 335 | ring->wptr_old = ring->wptr; |
336 | return 0; | 336 | return 0; |
337 | } | 337 | } |
338 | 338 | ||
339 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw) | 339 | int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) |
340 | { | 340 | { |
341 | int r; | 341 | int r; |
342 | 342 | ||
343 | mutex_lock(&cp->mutex); | 343 | mutex_lock(&ring->mutex); |
344 | r = radeon_ring_alloc(rdev, cp, ndw); | 344 | r = radeon_ring_alloc(rdev, ring, ndw); |
345 | if (r) { | 345 | if (r) { |
346 | mutex_unlock(&cp->mutex); | 346 | mutex_unlock(&ring->mutex); |
347 | return r; | 347 | return r; |
348 | } | 348 | } |
349 | return 0; | 349 | return 0; |
350 | } | 350 | } |
351 | 351 | ||
352 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp) | 352 | void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
353 | { | 353 | { |
354 | unsigned count_dw_pad; | 354 | unsigned count_dw_pad; |
355 | unsigned i; | 355 | unsigned i; |
356 | 356 | ||
357 | /* We pad to match fetch size */ | 357 | /* We pad to match fetch size */ |
358 | count_dw_pad = (cp->align_mask + 1) - | 358 | count_dw_pad = (ring->align_mask + 1) - |
359 | (cp->wptr & cp->align_mask); | 359 | (ring->wptr & ring->align_mask); |
360 | for (i = 0; i < count_dw_pad; i++) { | 360 | for (i = 0; i < count_dw_pad; i++) { |
361 | radeon_ring_write(cp, 2 << 30); | 361 | radeon_ring_write(ring, 2 << 30); |
362 | } | 362 | } |
363 | DRM_MEMORYBARRIER(); | 363 | DRM_MEMORYBARRIER(); |
364 | WREG32(cp->wptr_reg, cp->wptr); | 364 | WREG32(ring->wptr_reg, ring->wptr); |
365 | (void)RREG32(cp->wptr_reg); | 365 | (void)RREG32(ring->wptr_reg); |
366 | } | 366 | } |
367 | 367 | ||
368 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp) | 368 | void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) |
369 | { | 369 | { |
370 | radeon_ring_commit(rdev, cp); | 370 | radeon_ring_commit(rdev, ring); |
371 | mutex_unlock(&cp->mutex); | 371 | mutex_unlock(&ring->mutex); |
372 | } | 372 | } |
373 | 373 | ||
374 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp) | 374 | void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) |
375 | { | 375 | { |
376 | cp->wptr = cp->wptr_old; | 376 | ring->wptr = ring->wptr_old; |
377 | mutex_unlock(&cp->mutex); | 377 | mutex_unlock(&ring->mutex); |
378 | } | 378 | } |
379 | 379 | ||
380 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size, | 380 | int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, |
381 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg) | 381 | unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg) |
382 | { | 382 | { |
383 | int r; | 383 | int r; |
384 | 384 | ||
385 | cp->ring_size = ring_size; | 385 | ring->ring_size = ring_size; |
386 | cp->rptr_offs = rptr_offs; | 386 | ring->rptr_offs = rptr_offs; |
387 | cp->rptr_reg = rptr_reg; | 387 | ring->rptr_reg = rptr_reg; |
388 | cp->wptr_reg = wptr_reg; | 388 | ring->wptr_reg = wptr_reg; |
389 | /* Allocate ring buffer */ | 389 | /* Allocate ring buffer */ |
390 | if (cp->ring_obj == NULL) { | 390 | if (ring->ring_obj == NULL) { |
391 | r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true, | 391 | r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, |
392 | RADEON_GEM_DOMAIN_GTT, | 392 | RADEON_GEM_DOMAIN_GTT, |
393 | &cp->ring_obj); | 393 | &ring->ring_obj); |
394 | if (r) { | 394 | if (r) { |
395 | dev_err(rdev->dev, "(%d) ring create failed\n", r); | 395 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
396 | return r; | 396 | return r; |
397 | } | 397 | } |
398 | r = radeon_bo_reserve(cp->ring_obj, false); | 398 | r = radeon_bo_reserve(ring->ring_obj, false); |
399 | if (unlikely(r != 0)) | 399 | if (unlikely(r != 0)) |
400 | return r; | 400 | return r; |
401 | r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT, | 401 | r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT, |
402 | &cp->gpu_addr); | 402 | &ring->gpu_addr); |
403 | if (r) { | 403 | if (r) { |
404 | radeon_bo_unreserve(cp->ring_obj); | 404 | radeon_bo_unreserve(ring->ring_obj); |
405 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); | 405 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
406 | return r; | 406 | return r; |
407 | } | 407 | } |
408 | r = radeon_bo_kmap(cp->ring_obj, | 408 | r = radeon_bo_kmap(ring->ring_obj, |
409 | (void **)&cp->ring); | 409 | (void **)&ring->ring); |
410 | radeon_bo_unreserve(cp->ring_obj); | 410 | radeon_bo_unreserve(ring->ring_obj); |
411 | if (r) { | 411 | if (r) { |
412 | dev_err(rdev->dev, "(%d) ring map failed\n", r); | 412 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
413 | return r; | 413 | return r; |
414 | } | 414 | } |
415 | } | 415 | } |
416 | cp->ptr_mask = (cp->ring_size / 4) - 1; | 416 | ring->ptr_mask = (ring->ring_size / 4) - 1; |
417 | cp->ring_free_dw = cp->ring_size / 4; | 417 | ring->ring_free_dw = ring->ring_size / 4; |
418 | return 0; | 418 | return 0; |
419 | } | 419 | } |
420 | 420 | ||
421 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp) | 421 | void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) |
422 | { | 422 | { |
423 | int r; | 423 | int r; |
424 | struct radeon_bo *ring_obj; | 424 | struct radeon_bo *ring_obj; |
425 | 425 | ||
426 | mutex_lock(&cp->mutex); | 426 | mutex_lock(&ring->mutex); |
427 | ring_obj = cp->ring_obj; | 427 | ring_obj = ring->ring_obj; |
428 | cp->ring = NULL; | 428 | ring->ring = NULL; |
429 | cp->ring_obj = NULL; | 429 | ring->ring_obj = NULL; |
430 | mutex_unlock(&cp->mutex); | 430 | mutex_unlock(&ring->mutex); |
431 | 431 | ||
432 | if (ring_obj) { | 432 | if (ring_obj) { |
433 | r = radeon_bo_reserve(ring_obj, false); | 433 | r = radeon_bo_reserve(ring_obj, false); |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 80c1a9e010f7..f8cf04499a59 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
@@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev, | |||
121 | void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, | 121 | void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, |
122 | struct radeon_semaphore *semaphore) | 122 | struct radeon_semaphore *semaphore) |
123 | { | 123 | { |
124 | radeon_semaphore_ring_emit(rdev, ring, &rdev->cp[ring], semaphore, false); | 124 | radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false); |
125 | } | 125 | } |
126 | 126 | ||
127 | void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, | 127 | void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, |
128 | struct radeon_semaphore *semaphore) | 128 | struct radeon_semaphore *semaphore) |
129 | { | 129 | { |
130 | radeon_semaphore_ring_emit(rdev, ring, &rdev->cp[ring], semaphore, true); | 130 | radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true); |
131 | } | 131 | } |
132 | 132 | ||
133 | void radeon_semaphore_free(struct radeon_device *rdev, | 133 | void radeon_semaphore_free(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 5f4d31ef3933..3ab4be9e63d4 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -44,7 +44,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
44 | */ | 44 | */ |
45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; | 45 | n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; |
46 | for (i = 0; i < RADEON_NUM_RINGS; ++i) | 46 | for (i = 0; i < RADEON_NUM_RINGS; ++i) |
47 | n -= rdev->cp[i].ring_size; | 47 | n -= rdev->ring[i].ring_size; |
48 | if (rdev->wb.wb_obj) | 48 | if (rdev->wb.wb_obj) |
49 | n -= RADEON_GPU_PAGE_SIZE; | 49 | n -= RADEON_GPU_PAGE_SIZE; |
50 | if (rdev->ih.ring_obj) | 50 | if (rdev->ih.ring_obj) |
@@ -236,16 +236,16 @@ out_cleanup: | |||
236 | } | 236 | } |
237 | 237 | ||
238 | void radeon_test_ring_sync(struct radeon_device *rdev, | 238 | void radeon_test_ring_sync(struct radeon_device *rdev, |
239 | struct radeon_cp *cpA, | 239 | struct radeon_ring *ringA, |
240 | struct radeon_cp *cpB) | 240 | struct radeon_ring *ringB) |
241 | { | 241 | { |
242 | struct radeon_fence *fence = NULL; | 242 | struct radeon_fence *fence = NULL; |
243 | struct radeon_semaphore *semaphore = NULL; | 243 | struct radeon_semaphore *semaphore = NULL; |
244 | int ringA = radeon_ring_index(rdev, cpA); | 244 | int ridxA = radeon_ring_index(rdev, ringA); |
245 | int ringB = radeon_ring_index(rdev, cpB); | 245 | int ridxB = radeon_ring_index(rdev, ringB); |
246 | int r; | 246 | int r; |
247 | 247 | ||
248 | r = radeon_fence_create(rdev, &fence, ringA); | 248 | r = radeon_fence_create(rdev, &fence, ridxA); |
249 | if (r) { | 249 | if (r) { |
250 | DRM_ERROR("Failed to create sync fence\n"); | 250 | DRM_ERROR("Failed to create sync fence\n"); |
251 | goto out_cleanup; | 251 | goto out_cleanup; |
@@ -257,14 +257,14 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
257 | goto out_cleanup; | 257 | goto out_cleanup; |
258 | } | 258 | } |
259 | 259 | ||
260 | r = radeon_ring_lock(rdev, cpA, 64); | 260 | r = radeon_ring_lock(rdev, ringA, 64); |
261 | if (r) { | 261 | if (r) { |
262 | DRM_ERROR("Failed to lock ring %d\n", ringA); | 262 | DRM_ERROR("Failed to lock ring A %d\n", ridxA); |
263 | goto out_cleanup; | 263 | goto out_cleanup; |
264 | } | 264 | } |
265 | radeon_semaphore_emit_wait(rdev, ringA, semaphore); | 265 | radeon_semaphore_emit_wait(rdev, ridxA, semaphore); |
266 | radeon_fence_emit(rdev, fence); | 266 | radeon_fence_emit(rdev, fence); |
267 | radeon_ring_unlock_commit(rdev, cpA); | 267 | radeon_ring_unlock_commit(rdev, ringA); |
268 | 268 | ||
269 | mdelay(1000); | 269 | mdelay(1000); |
270 | 270 | ||
@@ -273,13 +273,13 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
273 | goto out_cleanup; | 273 | goto out_cleanup; |
274 | } | 274 | } |
275 | 275 | ||
276 | r = radeon_ring_lock(rdev, cpB, 64); | 276 | r = radeon_ring_lock(rdev, ringB, 64); |
277 | if (r) { | 277 | if (r) { |
278 | DRM_ERROR("Failed to lock ring %d\n", ringB); | 278 | DRM_ERROR("Failed to lock ring B %p\n", ringB); |
279 | goto out_cleanup; | 279 | goto out_cleanup; |
280 | } | 280 | } |
281 | radeon_semaphore_emit_signal(rdev, ringB, semaphore); | 281 | radeon_semaphore_emit_signal(rdev, ridxB, semaphore); |
282 | radeon_ring_unlock_commit(rdev, cpB); | 282 | radeon_ring_unlock_commit(rdev, ringB); |
283 | 283 | ||
284 | r = radeon_fence_wait(fence, false); | 284 | r = radeon_fence_wait(fence, false); |
285 | if (r) { | 285 | if (r) { |
@@ -287,7 +287,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev, | |||
287 | goto out_cleanup; | 287 | goto out_cleanup; |
288 | } | 288 | } |
289 | 289 | ||
290 | DRM_INFO("Syncing between rings %d and %d seems to work.\n", ringA, ringB); | 290 | DRM_INFO("Syncing between rings %d and %d seems to work.\n", ridxA, ridxB); |
291 | 291 | ||
292 | out_cleanup: | 292 | out_cleanup: |
293 | if (semaphore) | 293 | if (semaphore) |
@@ -305,20 +305,20 @@ void radeon_test_syncing(struct radeon_device *rdev) | |||
305 | int i, j; | 305 | int i, j; |
306 | 306 | ||
307 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { | 307 | for (i = 1; i < RADEON_NUM_RINGS; ++i) { |
308 | struct radeon_cp *cpA = &rdev->cp[i]; | 308 | struct radeon_ring *ringA = &rdev->ring[i]; |
309 | if (!cpA->ready) | 309 | if (!ringA->ready) |
310 | continue; | 310 | continue; |
311 | 311 | ||
312 | for (j = 0; j < i; ++j) { | 312 | for (j = 0; j < i; ++j) { |
313 | struct radeon_cp *cpB = &rdev->cp[j]; | 313 | struct radeon_ring *ringB = &rdev->ring[j]; |
314 | if (!cpB->ready) | 314 | if (!ringB->ready) |
315 | continue; | 315 | continue; |
316 | 316 | ||
317 | DRM_INFO("Testing syncing between rings %d and %d\n", i, j); | 317 | DRM_INFO("Testing syncing between rings %d and %d\n", i, j); |
318 | radeon_test_ring_sync(rdev, cpA, cpB); | 318 | radeon_test_ring_sync(rdev, ringA, ringB); |
319 | 319 | ||
320 | DRM_INFO("Testing syncing between rings %d and %d\n", j, i); | 320 | DRM_INFO("Testing syncing between rings %d and %d\n", j, i); |
321 | radeon_test_ring_sync(rdev, cpB, cpA); | 321 | radeon_test_ring_sync(rdev, ringB, ringA); |
322 | } | 322 | } |
323 | } | 323 | } |
324 | } | 324 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 0be15bf38d3c..b0ebaf893aca 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, | |||
188 | rbo = container_of(bo, struct radeon_bo, tbo); | 188 | rbo = container_of(bo, struct radeon_bo, tbo); |
189 | switch (bo->mem.mem_type) { | 189 | switch (bo->mem.mem_type) { |
190 | case TTM_PL_VRAM: | 190 | case TTM_PL_VRAM: |
191 | if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false) | 191 | if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
192 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); | 192 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
193 | else | 193 | else |
194 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | 194 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); |
@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | 255 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
256 | return -EINVAL; | 256 | return -EINVAL; |
257 | } | 257 | } |
258 | if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) { | 258 | if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready) { |
259 | DRM_ERROR("Trying to move memory with CP turned off.\n"); | 259 | DRM_ERROR("Trying to move memory with CP turned off.\n"); |
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | } | 261 | } |
@@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
380 | radeon_move_null(bo, new_mem); | 380 | radeon_move_null(bo, new_mem); |
381 | return 0; | 381 | return 0; |
382 | } | 382 | } |
383 | if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) { | 383 | if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) { |
384 | /* use memcpy */ | 384 | /* use memcpy */ |
385 | goto memcpy; | 385 | goto memcpy; |
386 | } | 386 | } |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 8a935987d022..beed57c7df96 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -55,45 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev) | |||
55 | 55 | ||
56 | void rv515_ring_start(struct radeon_device *rdev) | 56 | void rv515_ring_start(struct radeon_device *rdev) |
57 | { | 57 | { |
58 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 58 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
59 | int r; | 59 | int r; |
60 | 60 | ||
61 | r = radeon_ring_lock(rdev, cp, 64); | 61 | r = radeon_ring_lock(rdev, ring, 64); |
62 | if (r) { | 62 | if (r) { |
63 | return; | 63 | return; |
64 | } | 64 | } |
65 | radeon_ring_write(cp, PACKET0(ISYNC_CNTL, 0)); | 65 | radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0)); |
66 | radeon_ring_write(cp, | 66 | radeon_ring_write(ring, |
67 | ISYNC_ANY2D_IDLE3D | | 67 | ISYNC_ANY2D_IDLE3D | |
68 | ISYNC_ANY3D_IDLE2D | | 68 | ISYNC_ANY3D_IDLE2D | |
69 | ISYNC_WAIT_IDLEGUI | | 69 | ISYNC_WAIT_IDLEGUI | |
70 | ISYNC_CPSCRATCH_IDLEGUI); | 70 | ISYNC_CPSCRATCH_IDLEGUI); |
71 | radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0)); | 71 | radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); |
72 | radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | 72 | radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
73 | radeon_ring_write(cp, PACKET0(R300_DST_PIPE_CONFIG, 0)); | 73 | radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
74 | radeon_ring_write(cp, R300_PIPE_AUTO_CONFIG); | 74 | radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); |
75 | radeon_ring_write(cp, PACKET0(GB_SELECT, 0)); | 75 | radeon_ring_write(ring, PACKET0(GB_SELECT, 0)); |
76 | radeon_ring_write(cp, 0); | 76 | radeon_ring_write(ring, 0); |
77 | radeon_ring_write(cp, PACKET0(GB_ENABLE, 0)); | 77 | radeon_ring_write(ring, PACKET0(GB_ENABLE, 0)); |
78 | radeon_ring_write(cp, 0); | 78 | radeon_ring_write(ring, 0); |
79 | radeon_ring_write(cp, PACKET0(R500_SU_REG_DEST, 0)); | 79 | radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0)); |
80 | radeon_ring_write(cp, (1 << rdev->num_gb_pipes) - 1); | 80 | radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1); |
81 | radeon_ring_write(cp, PACKET0(VAP_INDEX_OFFSET, 0)); | 81 | radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0)); |
82 | radeon_ring_write(cp, 0); | 82 | radeon_ring_write(ring, 0); |
83 | radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); | 83 | radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
84 | radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE); | 84 | radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE); |
85 | radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); | 85 | radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
86 | radeon_ring_write(cp, ZC_FLUSH | ZC_FREE); | 86 | radeon_ring_write(ring, ZC_FLUSH | ZC_FREE); |
87 | radeon_ring_write(cp, PACKET0(WAIT_UNTIL, 0)); | 87 | radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0)); |
88 | radeon_ring_write(cp, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); | 88 | radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
89 | radeon_ring_write(cp, PACKET0(GB_AA_CONFIG, 0)); | 89 | radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0)); |
90 | radeon_ring_write(cp, 0); | 90 | radeon_ring_write(ring, 0); |
91 | radeon_ring_write(cp, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); | 91 | radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
92 | radeon_ring_write(cp, RB3D_DC_FLUSH | RB3D_DC_FREE); | 92 | radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE); |
93 | radeon_ring_write(cp, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); | 93 | radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
94 | radeon_ring_write(cp, ZC_FLUSH | ZC_FREE); | 94 | radeon_ring_write(ring, ZC_FLUSH | ZC_FREE); |
95 | radeon_ring_write(cp, PACKET0(GB_MSPOS0, 0)); | 95 | radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0)); |
96 | radeon_ring_write(cp, | 96 | radeon_ring_write(ring, |
97 | ((6 << MS_X0_SHIFT) | | 97 | ((6 << MS_X0_SHIFT) | |
98 | (6 << MS_Y0_SHIFT) | | 98 | (6 << MS_Y0_SHIFT) | |
99 | (6 << MS_X1_SHIFT) | | 99 | (6 << MS_X1_SHIFT) | |
@@ -102,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
102 | (6 << MS_Y2_SHIFT) | | 102 | (6 << MS_Y2_SHIFT) | |
103 | (6 << MSBD0_Y_SHIFT) | | 103 | (6 << MSBD0_Y_SHIFT) | |
104 | (6 << MSBD0_X_SHIFT))); | 104 | (6 << MSBD0_X_SHIFT))); |
105 | radeon_ring_write(cp, PACKET0(GB_MSPOS1, 0)); | 105 | radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0)); |
106 | radeon_ring_write(cp, | 106 | radeon_ring_write(ring, |
107 | ((6 << MS_X3_SHIFT) | | 107 | ((6 << MS_X3_SHIFT) | |
108 | (6 << MS_Y3_SHIFT) | | 108 | (6 << MS_Y3_SHIFT) | |
109 | (6 << MS_X4_SHIFT) | | 109 | (6 << MS_X4_SHIFT) | |
@@ -111,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev) | |||
111 | (6 << MS_X5_SHIFT) | | 111 | (6 << MS_X5_SHIFT) | |
112 | (6 << MS_Y5_SHIFT) | | 112 | (6 << MS_Y5_SHIFT) | |
113 | (6 << MSBD1_SHIFT))); | 113 | (6 << MSBD1_SHIFT))); |
114 | radeon_ring_write(cp, PACKET0(GA_ENHANCE, 0)); | 114 | radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0)); |
115 | radeon_ring_write(cp, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); | 115 | radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); |
116 | radeon_ring_write(cp, PACKET0(GA_POLY_MODE, 0)); | 116 | radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0)); |
117 | radeon_ring_write(cp, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); | 117 | radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); |
118 | radeon_ring_write(cp, PACKET0(GA_ROUND_MODE, 0)); | 118 | radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0)); |
119 | radeon_ring_write(cp, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); | 119 | radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); |
120 | radeon_ring_write(cp, PACKET0(0x20C8, 0)); | 120 | radeon_ring_write(ring, PACKET0(0x20C8, 0)); |
121 | radeon_ring_write(cp, 0); | 121 | radeon_ring_write(ring, 0); |
122 | radeon_ring_unlock_commit(rdev, cp); | 122 | radeon_ring_unlock_commit(rdev, ring); |
123 | } | 123 | } |
124 | 124 | ||
125 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) | 125 | int rv515_mc_wait_for_idle(struct radeon_device *rdev) |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 9e4c0418f54d..f01603d522bb 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) | |||
357 | void r700_cp_fini(struct radeon_device *rdev) | 357 | void r700_cp_fini(struct radeon_device *rdev) |
358 | { | 358 | { |
359 | r700_cp_stop(rdev); | 359 | r700_cp_stop(rdev); |
360 | radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]); | 360 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
361 | } | 361 | } |
362 | 362 | ||
363 | /* | 363 | /* |
@@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
1043 | 1043 | ||
1044 | static int rv770_startup(struct radeon_device *rdev) | 1044 | static int rv770_startup(struct radeon_device *rdev) |
1045 | { | 1045 | { |
1046 | struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]; | 1046 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1047 | int r; | 1047 | int r; |
1048 | 1048 | ||
1049 | /* enable pcie gen2 link */ | 1049 | /* enable pcie gen2 link */ |
@@ -1092,7 +1092,7 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1092 | } | 1092 | } |
1093 | r600_irq_set(rdev); | 1093 | r600_irq_set(rdev); |
1094 | 1094 | ||
1095 | r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET, | 1095 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1096 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); | 1096 | R600_CP_RB_RPTR, R600_CP_RB_WPTR); |
1097 | if (r) | 1097 | if (r) |
1098 | return r; | 1098 | return r; |
@@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev) | |||
1144 | r600_audio_fini(rdev); | 1144 | r600_audio_fini(rdev); |
1145 | /* FIXME: we should wait for ring to be empty */ | 1145 | /* FIXME: we should wait for ring to be empty */ |
1146 | r700_cp_stop(rdev); | 1146 | r700_cp_stop(rdev); |
1147 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1147 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1148 | r600_irq_suspend(rdev); | 1148 | r600_irq_suspend(rdev); |
1149 | radeon_wb_disable(rdev); | 1149 | radeon_wb_disable(rdev); |
1150 | rv770_pcie_gart_disable(rdev); | 1150 | rv770_pcie_gart_disable(rdev); |
@@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev) | |||
1217 | if (r) | 1217 | if (r) |
1218 | return r; | 1218 | return r; |
1219 | 1219 | ||
1220 | rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 1220 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
1221 | r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 1221 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
1222 | 1222 | ||
1223 | rdev->ih.ring_obj = NULL; | 1223 | rdev->ih.ring_obj = NULL; |
1224 | r600_ih_ring_init(rdev, 64 * 1024); | 1224 | r600_ih_ring_init(rdev, 64 * 1024); |