aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2011-10-13 07:19:22 -0400
committerDave Airlie <airlied@redhat.com>2011-12-20 14:50:10 -0500
commitbf85279958da96cb4b11aac89b34f0424c3c120e (patch)
tree1d0197a268a642ee02ebe1a22d8f5ac0e7798494 /drivers/gpu
parent5596a9db156107b01ceb7db4d50cc091117da627 (diff)
drm/radeon: make cp variable an array
Replace cp, cp1 and cp2 members with just an array of radeon_cp structs. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/ni.c30
-rw-r--r--drivers/gpu/drm/radeon/r100.c18
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c30
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c10
20 files changed, 119 insertions, 96 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index d1264a7154a..cb198aca9f5 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1311,7 +1311,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1311 */ 1311 */
1312void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1312void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1313{ 1313{
1314 struct radeon_cp *cp = &rdev->cp; 1314 struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
1315 1315
1316 /* set to DX10/11 mode */ 1316 /* set to DX10/11 mode */
1317 radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0)); 1317 radeon_ring_write(cp, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -1362,7 +1362,7 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1362 1362
1363static int evergreen_cp_start(struct radeon_device *rdev) 1363static int evergreen_cp_start(struct radeon_device *rdev)
1364{ 1364{
1365 struct radeon_cp *cp = &rdev->cp; 1365 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1366 int r, i; 1366 int r, i;
1367 uint32_t cp_me; 1367 uint32_t cp_me;
1368 1368
@@ -1428,7 +1428,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
1428 1428
1429int evergreen_cp_resume(struct radeon_device *rdev) 1429int evergreen_cp_resume(struct radeon_device *rdev)
1430{ 1430{
1431 struct radeon_cp *cp = &rdev->cp; 1431 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1432 u32 tmp; 1432 u32 tmp;
1433 u32 rb_bufsz; 1433 u32 rb_bufsz;
1434 int r; 1434 int r;
@@ -3056,7 +3056,7 @@ restart_ih:
3056 3056
3057static int evergreen_startup(struct radeon_device *rdev) 3057static int evergreen_startup(struct radeon_device *rdev)
3058{ 3058{
3059 struct radeon_cp *cp = &rdev->cp; 3059 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
3060 int r; 3060 int r;
3061 3061
3062 /* enable pcie gen2 link */ 3062 /* enable pcie gen2 link */
@@ -3168,7 +3168,7 @@ int evergreen_resume(struct radeon_device *rdev)
3168 3168
3169int evergreen_suspend(struct radeon_device *rdev) 3169int evergreen_suspend(struct radeon_device *rdev)
3170{ 3170{
3171 struct radeon_cp *cp = &rdev->cp; 3171 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
3172 3172
3173 /* FIXME: we should wait for ring to be empty */ 3173 /* FIXME: we should wait for ring to be empty */
3174 r700_cp_stop(rdev); 3174 r700_cp_stop(rdev);
@@ -3251,8 +3251,8 @@ int evergreen_init(struct radeon_device *rdev)
3251 if (r) 3251 if (r)
3252 return r; 3252 return r;
3253 3253
3254 rdev->cp.ring_obj = NULL; 3254 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3255 r600_ring_init(rdev, &rdev->cp, 1024 * 1024); 3255 r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3256 3256
3257 rdev->ih.ring_obj = NULL; 3257 rdev->ih.ring_obj = NULL;
3258 r600_ih_ring_init(rdev, 64 * 1024); 3258 r600_ih_ring_init(rdev, 64 * 1024);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 75d0a6f0a39..56f5d92cce2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -49,7 +49,7 @@ static void
49set_render_target(struct radeon_device *rdev, int format, 49set_render_target(struct radeon_device *rdev, int format,
50 int w, int h, u64 gpu_addr) 50 int w, int h, u64 gpu_addr)
51{ 51{
52 struct radeon_cp *cp = &rdev->cp; 52 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
53 u32 cb_color_info; 53 u32 cb_color_info;
54 int pitch, slice; 54 int pitch, slice;
55 55
@@ -88,7 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
88 u32 sync_type, u32 size, 88 u32 sync_type, u32 size,
89 u64 mc_addr) 89 u64 mc_addr)
90{ 90{
91 struct radeon_cp *cp = &rdev->cp; 91 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
92 u32 cp_coher_size; 92 u32 cp_coher_size;
93 93
94 if (size == 0xffffffff) 94 if (size == 0xffffffff)
@@ -116,7 +116,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
116static void 116static void
117set_shaders(struct radeon_device *rdev) 117set_shaders(struct radeon_device *rdev)
118{ 118{
119 struct radeon_cp *cp = &rdev->cp; 119 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
120 u64 gpu_addr; 120 u64 gpu_addr;
121 121
122 /* VS */ 122 /* VS */
@@ -144,7 +144,7 @@ set_shaders(struct radeon_device *rdev)
144static void 144static void
145set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) 145set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
146{ 146{
147 struct radeon_cp *cp = &rdev->cp; 147 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
148 u32 sq_vtx_constant_word2, sq_vtx_constant_word3; 148 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
149 149
150 /* high addr, stride */ 150 /* high addr, stride */
@@ -189,7 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
189 int format, int w, int h, int pitch, 189 int format, int w, int h, int pitch,
190 u64 gpu_addr, u32 size) 190 u64 gpu_addr, u32 size)
191{ 191{
192 struct radeon_cp *cp = &rdev->cp; 192 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
193 u32 sq_tex_resource_word0, sq_tex_resource_word1; 193 u32 sq_tex_resource_word0, sq_tex_resource_word1;
194 u32 sq_tex_resource_word4, sq_tex_resource_word7; 194 u32 sq_tex_resource_word4, sq_tex_resource_word7;
195 195
@@ -230,7 +230,7 @@ static void
230set_scissors(struct radeon_device *rdev, int x1, int y1, 230set_scissors(struct radeon_device *rdev, int x1, int y1,
231 int x2, int y2) 231 int x2, int y2)
232{ 232{
233 struct radeon_cp *cp = &rdev->cp; 233 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
234 /* workaround some hw bugs */ 234 /* workaround some hw bugs */
235 if (x2 == 0) 235 if (x2 == 0)
236 x1 = 1; 236 x1 = 1;
@@ -261,7 +261,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
261static void 261static void
262draw_auto(struct radeon_device *rdev) 262draw_auto(struct radeon_device *rdev)
263{ 263{
264 struct radeon_cp *cp = &rdev->cp; 264 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
265 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 265 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
266 radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2); 266 radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
267 radeon_ring_write(cp, DI_PT_RECTLIST); 267 radeon_ring_write(cp, DI_PT_RECTLIST);
@@ -286,7 +286,7 @@ draw_auto(struct radeon_device *rdev)
286static void 286static void
287set_default_state(struct radeon_device *rdev) 287set_default_state(struct radeon_device *rdev)
288{ 288{
289 struct radeon_cp *cp = &rdev->cp; 289 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
290 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3; 290 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
291 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2; 291 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
292 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3; 292 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cc9aaeb104f..2d809e62c4c 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1049,7 +1049,7 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
1049 1049
1050static int cayman_cp_start(struct radeon_device *rdev) 1050static int cayman_cp_start(struct radeon_device *rdev)
1051{ 1051{
1052 struct radeon_cp *cp = &rdev->cp; 1052 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1053 int r, i; 1053 int r, i;
1054 1054
1055 r = radeon_ring_lock(rdev, cp, 7); 1055 r = radeon_ring_lock(rdev, cp, 7);
@@ -1116,7 +1116,7 @@ static int cayman_cp_start(struct radeon_device *rdev)
1116static void cayman_cp_fini(struct radeon_device *rdev) 1116static void cayman_cp_fini(struct radeon_device *rdev)
1117{ 1117{
1118 cayman_cp_enable(rdev, false); 1118 cayman_cp_enable(rdev, false);
1119 radeon_ring_fini(rdev, &rdev->cp); 1119 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
1120} 1120}
1121 1121
1122int cayman_cp_resume(struct radeon_device *rdev) 1122int cayman_cp_resume(struct radeon_device *rdev)
@@ -1147,7 +1147,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1147 1147
1148 /* ring 0 - compute and gfx */ 1148 /* ring 0 - compute and gfx */
1149 /* Set ring buffer size */ 1149 /* Set ring buffer size */
1150 cp = &rdev->cp; 1150 cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1151 rb_bufsz = drm_order(cp->ring_size / 8); 1151 rb_bufsz = drm_order(cp->ring_size / 8);
1152 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1152 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1153#ifdef __BIG_ENDIAN 1153#ifdef __BIG_ENDIAN
@@ -1181,7 +1181,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1181 1181
1182 /* ring1 - compute only */ 1182 /* ring1 - compute only */
1183 /* Set ring buffer size */ 1183 /* Set ring buffer size */
1184 cp = &rdev->cp1; 1184 cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX];
1185 rb_bufsz = drm_order(cp->ring_size / 8); 1185 rb_bufsz = drm_order(cp->ring_size / 8);
1186 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1186 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1187#ifdef __BIG_ENDIAN 1187#ifdef __BIG_ENDIAN
@@ -1207,7 +1207,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
1207 1207
1208 /* ring2 - compute only */ 1208 /* ring2 - compute only */
1209 /* Set ring buffer size */ 1209 /* Set ring buffer size */
1210 cp = &rdev->cp2; 1210 cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX];
1211 rb_bufsz = drm_order(cp->ring_size / 8); 1211 rb_bufsz = drm_order(cp->ring_size / 8);
1212 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; 1212 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1213#ifdef __BIG_ENDIAN 1213#ifdef __BIG_ENDIAN
@@ -1233,15 +1233,15 @@ int cayman_cp_resume(struct radeon_device *rdev)
1233 1233
1234 /* start the rings */ 1234 /* start the rings */
1235 cayman_cp_start(rdev); 1235 cayman_cp_start(rdev);
1236 rdev->cp.ready = true; 1236 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1237 rdev->cp1.ready = true; 1237 rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
1238 rdev->cp2.ready = true; 1238 rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
1239 /* this only test cp0 */ 1239 /* this only test cp0 */
1240 r = radeon_ring_test(rdev, &rdev->cp); 1240 r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
1241 if (r) { 1241 if (r) {
1242 rdev->cp.ready = false; 1242 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1243 rdev->cp1.ready = false; 1243 rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1244 rdev->cp2.ready = false; 1244 rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1245 return r; 1245 return r;
1246 } 1246 }
1247 1247
@@ -1343,7 +1343,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
1343 1343
1344static int cayman_startup(struct radeon_device *rdev) 1344static int cayman_startup(struct radeon_device *rdev)
1345{ 1345{
1346 struct radeon_cp *cp = &rdev->cp; 1346 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1347 int r; 1347 int r;
1348 1348
1349 /* enable pcie gen2 link */ 1349 /* enable pcie gen2 link */
@@ -1438,7 +1438,7 @@ int cayman_suspend(struct radeon_device *rdev)
1438{ 1438{
1439 /* FIXME: we should wait for ring to be empty */ 1439 /* FIXME: we should wait for ring to be empty */
1440 cayman_cp_enable(rdev, false); 1440 cayman_cp_enable(rdev, false);
1441 rdev->cp.ready = false; 1441 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1442 evergreen_irq_suspend(rdev); 1442 evergreen_irq_suspend(rdev);
1443 radeon_wb_disable(rdev); 1443 radeon_wb_disable(rdev);
1444 cayman_pcie_gart_disable(rdev); 1444 cayman_pcie_gart_disable(rdev);
@@ -1455,7 +1455,7 @@ int cayman_suspend(struct radeon_device *rdev)
1455 */ 1455 */
1456int cayman_init(struct radeon_device *rdev) 1456int cayman_init(struct radeon_device *rdev)
1457{ 1457{
1458 struct radeon_cp *cp = &rdev->cp; 1458 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1459 int r; 1459 int r;
1460 1460
1461 /* This don't do much */ 1461 /* This don't do much */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 6c328115e66..6ca20d7bf62 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -811,7 +811,7 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
811void r100_fence_ring_emit(struct radeon_device *rdev, 811void r100_fence_ring_emit(struct radeon_device *rdev,
812 struct radeon_fence *fence) 812 struct radeon_fence *fence)
813{ 813{
814 struct radeon_cp *cp = &rdev->cp; 814 struct radeon_cp *cp = &rdev->cp[fence->ring];
815 815
816 /* We have to make sure that caches are flushed before 816 /* We have to make sure that caches are flushed before
817 * CPU might read something from VRAM. */ 817 * CPU might read something from VRAM. */
@@ -849,7 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
849 unsigned num_gpu_pages, 849 unsigned num_gpu_pages,
850 struct radeon_fence *fence) 850 struct radeon_fence *fence)
851{ 851{
852 struct radeon_cp *cp = &rdev->cp; 852 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
853 uint32_t cur_pages; 853 uint32_t cur_pages;
854 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 854 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
855 uint32_t pitch; 855 uint32_t pitch;
@@ -934,7 +934,7 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
934 934
935void r100_ring_start(struct radeon_device *rdev) 935void r100_ring_start(struct radeon_device *rdev)
936{ 936{
937 struct radeon_cp *cp = &rdev->cp; 937 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
938 int r; 938 int r;
939 939
940 r = radeon_ring_lock(rdev, cp, 2); 940 r = radeon_ring_lock(rdev, cp, 2);
@@ -1048,7 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
1048 1048
1049int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1049int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1050{ 1050{
1051 struct radeon_cp *cp = &rdev->cp; 1051 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1052 unsigned rb_bufsz; 1052 unsigned rb_bufsz;
1053 unsigned rb_blksz; 1053 unsigned rb_blksz;
1054 unsigned max_fetch; 1054 unsigned max_fetch;
@@ -1162,7 +1162,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1162 } 1162 }
1163 /* Disable ring */ 1163 /* Disable ring */
1164 r100_cp_disable(rdev); 1164 r100_cp_disable(rdev);
1165 radeon_ring_fini(rdev, &rdev->cp); 1165 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
1166 DRM_INFO("radeon: cp finalized\n"); 1166 DRM_INFO("radeon: cp finalized\n");
1167} 1167}
1168 1168
@@ -1170,7 +1170,7 @@ void r100_cp_disable(struct radeon_device *rdev)
1170{ 1170{
1171 /* Disable ring */ 1171 /* Disable ring */
1172 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1172 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1173 rdev->cp.ready = false; 1173 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1174 WREG32(RADEON_CP_CSQ_MODE, 0); 1174 WREG32(RADEON_CP_CSQ_MODE, 0);
1175 WREG32(RADEON_CP_CSQ_CNTL, 0); 1175 WREG32(RADEON_CP_CSQ_CNTL, 0);
1176 WREG32(R_000770_SCRATCH_UMSK, 0); 1176 WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -2587,7 +2587,7 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2587 struct drm_info_node *node = (struct drm_info_node *) m->private; 2587 struct drm_info_node *node = (struct drm_info_node *) m->private;
2588 struct drm_device *dev = node->minor->dev; 2588 struct drm_device *dev = node->minor->dev;
2589 struct radeon_device *rdev = dev->dev_private; 2589 struct radeon_device *rdev = dev->dev_private;
2590 struct radeon_cp *cp = &rdev->cp; 2590 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
2591 uint32_t rdp, wdp; 2591 uint32_t rdp, wdp;
2592 unsigned count, i, j; 2592 unsigned count, i, j;
2593 2593
@@ -3686,7 +3686,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
3686 3686
3687void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3687void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3688{ 3688{
3689 struct radeon_cp *cp = &rdev->cp; 3689 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
3690 3690
3691 radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1)); 3691 radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1));
3692 radeon_ring_write(cp, ib->gpu_addr); 3692 radeon_ring_write(cp, ib->gpu_addr);
@@ -3778,7 +3778,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
3778 /* Shutdown CP we shouldn't need to do that but better be safe than 3778 /* Shutdown CP we shouldn't need to do that but better be safe than
3779 * sorry 3779 * sorry
3780 */ 3780 */
3781 rdev->cp.ready = false; 3781 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
3782 WREG32(R_000740_CP_CSQ_CNTL, 0); 3782 WREG32(R_000740_CP_CSQ_CNTL, 0);
3783 3783
3784 /* Save few CRTC registers */ 3784 /* Save few CRTC registers */
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d84e633f72f..d59c727a8e0 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,7 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
87 unsigned num_gpu_pages, 87 unsigned num_gpu_pages,
88 struct radeon_fence *fence) 88 struct radeon_fence *fence)
89{ 89{
90 struct radeon_cp *cp = &rdev->cp; 90 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
91 uint32_t size; 91 uint32_t size;
92 uint32_t cur_size; 92 uint32_t cur_size;
93 int i, num_loops; 93 int i, num_loops;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cbb62fc3f2e..66ff35f394c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -175,7 +175,7 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
175void r300_fence_ring_emit(struct radeon_device *rdev, 175void r300_fence_ring_emit(struct radeon_device *rdev,
176 struct radeon_fence *fence) 176 struct radeon_fence *fence)
177{ 177{
178 struct radeon_cp *cp = &rdev->cp; 178 struct radeon_cp *cp = &rdev->cp[fence->ring];
179 179
180 /* Who ever call radeon_fence_emit should call ring_lock and ask 180 /* Who ever call radeon_fence_emit should call ring_lock and ask
181 * for enough space (today caller are ib schedule and buffer move) */ 181 * for enough space (today caller are ib schedule and buffer move) */
@@ -208,7 +208,7 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
208 208
209void r300_ring_start(struct radeon_device *rdev) 209void r300_ring_start(struct radeon_device *rdev)
210{ 210{
211 struct radeon_cp *cp = &rdev->cp; 211 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
212 unsigned gb_tile_config; 212 unsigned gb_tile_config;
213 int r; 213 int r;
214 214
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 4c0af4955f0..62e86043699 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,7 +199,7 @@ static void r420_clock_resume(struct radeon_device *rdev)
199 199
200static void r420_cp_errata_init(struct radeon_device *rdev) 200static void r420_cp_errata_init(struct radeon_device *rdev)
201{ 201{
202 struct radeon_cp *cp = &rdev->cp; 202 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
203 203
204 /* RV410 and R420 can lock up if CP DMA to host memory happens 204 /* RV410 and R420 can lock up if CP DMA to host memory happens
205 * while the 2D engine is busy. 205 * while the 2D engine is busy.
@@ -217,7 +217,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
217 217
218static void r420_cp_errata_fini(struct radeon_device *rdev) 218static void r420_cp_errata_fini(struct radeon_device *rdev)
219{ 219{
220 struct radeon_cp *cp = &rdev->cp; 220 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
221 221
222 /* Catch the RESYNC we dispatched all the way back, 222 /* Catch the RESYNC we dispatched all the way back,
223 * at the very beginning of the CP init. 223 * at the very beginning of the CP init.
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 59975317674..aaf8cd42943 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2144,7 +2144,7 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2144 2144
2145int r600_cp_start(struct radeon_device *rdev) 2145int r600_cp_start(struct radeon_device *rdev)
2146{ 2146{
2147 struct radeon_cp *cp = &rdev->cp; 2147 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
2148 int r; 2148 int r;
2149 uint32_t cp_me; 2149 uint32_t cp_me;
2150 2150
@@ -2174,7 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
2174 2174
2175int r600_cp_resume(struct radeon_device *rdev) 2175int r600_cp_resume(struct radeon_device *rdev)
2176{ 2176{
2177 struct radeon_cp *cp = &rdev->cp; 2177 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
2178 u32 tmp; 2178 u32 tmp;
2179 u32 rb_bufsz; 2179 u32 rb_bufsz;
2180 int r; 2180 int r;
@@ -2248,7 +2248,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned r
2248void r600_cp_fini(struct radeon_device *rdev) 2248void r600_cp_fini(struct radeon_device *rdev)
2249{ 2249{
2250 r600_cp_stop(rdev); 2250 r600_cp_stop(rdev);
2251 radeon_ring_fini(rdev, &rdev->cp); 2251 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
2252} 2252}
2253 2253
2254 2254
@@ -2271,7 +2271,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2271{ 2271{
2272 uint32_t scratch; 2272 uint32_t scratch;
2273 uint32_t tmp = 0; 2273 uint32_t tmp = 0;
2274 unsigned i; 2274 unsigned i, ridx = radeon_ring_index(rdev, cp);
2275 int r; 2275 int r;
2276 2276
2277 r = radeon_scratch_get(rdev, &scratch); 2277 r = radeon_scratch_get(rdev, &scratch);
@@ -2282,7 +2282,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2282 WREG32(scratch, 0xCAFEDEAD); 2282 WREG32(scratch, 0xCAFEDEAD);
2283 r = radeon_ring_lock(rdev, cp, 3); 2283 r = radeon_ring_lock(rdev, cp, 3);
2284 if (r) { 2284 if (r) {
2285 DRM_ERROR("radeon: cp failed to lock ring %p (%d).\n", cp, r); 2285 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
2286 radeon_scratch_free(rdev, scratch); 2286 radeon_scratch_free(rdev, scratch);
2287 return r; 2287 return r;
2288 } 2288 }
@@ -2297,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2297 DRM_UDELAY(1); 2297 DRM_UDELAY(1);
2298 } 2298 }
2299 if (i < rdev->usec_timeout) { 2299 if (i < rdev->usec_timeout) {
2300 DRM_INFO("ring test on %p succeeded in %d usecs\n", cp, i); 2300 DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
2301 } else { 2301 } else {
2302 DRM_ERROR("radeon: ring %p test failed (scratch(0x%04X)=0x%08X)\n", 2302 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2303 cp, scratch, tmp); 2303 ridx, scratch, tmp);
2304 r = -EINVAL; 2304 r = -EINVAL;
2305 } 2305 }
2306 radeon_scratch_free(rdev, scratch); 2306 radeon_scratch_free(rdev, scratch);
@@ -2310,7 +2310,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
2310void r600_fence_ring_emit(struct radeon_device *rdev, 2310void r600_fence_ring_emit(struct radeon_device *rdev,
2311 struct radeon_fence *fence) 2311 struct radeon_fence *fence)
2312{ 2312{
2313 struct radeon_cp *cp = &rdev->cp; 2313 struct radeon_cp *cp = &rdev->cp[fence->ring];
2314 2314
2315 if (rdev->wb.use_event) { 2315 if (rdev->wb.use_event) {
2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET + 2316 u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
@@ -2420,7 +2420,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2420 2420
2421int r600_startup(struct radeon_device *rdev) 2421int r600_startup(struct radeon_device *rdev)
2422{ 2422{
2423 struct radeon_cp *cp = &rdev->cp; 2423 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
2424 int r; 2424 int r;
2425 2425
2426 /* enable pcie gen2 link */ 2426 /* enable pcie gen2 link */
@@ -2534,7 +2534,7 @@ int r600_suspend(struct radeon_device *rdev)
2534 r600_audio_fini(rdev); 2534 r600_audio_fini(rdev);
2535 /* FIXME: we should wait for ring to be empty */ 2535 /* FIXME: we should wait for ring to be empty */
2536 r600_cp_stop(rdev); 2536 r600_cp_stop(rdev);
2537 rdev->cp.ready = false; 2537 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2538 r600_irq_suspend(rdev); 2538 r600_irq_suspend(rdev);
2539 radeon_wb_disable(rdev); 2539 radeon_wb_disable(rdev);
2540 r600_pcie_gart_disable(rdev); 2540 r600_pcie_gart_disable(rdev);
@@ -2609,8 +2609,8 @@ int r600_init(struct radeon_device *rdev)
2609 if (r) 2609 if (r)
2610 return r; 2610 return r;
2611 2611
2612 rdev->cp.ring_obj = NULL; 2612 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2613 r600_ring_init(rdev, &rdev->cp, 1024 * 1024); 2613 r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2614 2614
2615 rdev->ih.ring_obj = NULL; 2615 rdev->ih.ring_obj = NULL;
2616 r600_ih_ring_init(rdev, 64 * 1024); 2616 r600_ih_ring_init(rdev, 64 * 1024);
@@ -2677,7 +2677,7 @@ void r600_fini(struct radeon_device *rdev)
2677 */ 2677 */
2678void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 2678void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2679{ 2679{
2680 struct radeon_cp *cp = &rdev->cp; 2680 struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
2681 2681
2682 /* FIXME: implement */ 2682 /* FIXME: implement */
2683 radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2683 radeon_ring_write(cp, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -3518,7 +3518,7 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3518 struct drm_info_node *node = (struct drm_info_node *) m->private; 3518 struct drm_info_node *node = (struct drm_info_node *) m->private;
3519 struct drm_device *dev = node->minor->dev; 3519 struct drm_device *dev = node->minor->dev;
3520 struct radeon_device *rdev = dev->dev_private; 3520 struct radeon_device *rdev = dev->dev_private;
3521 struct radeon_cp *cp = &rdev->cp; 3521 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
3522 unsigned count, i, j; 3522 unsigned count, i, j;
3523 3523
3524 radeon_ring_free_size(rdev, cp); 3524 radeon_ring_free_size(rdev, cp);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 39ae19d38c2..62dd1c281c7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -50,7 +50,7 @@ static void
50set_render_target(struct radeon_device *rdev, int format, 50set_render_target(struct radeon_device *rdev, int format,
51 int w, int h, u64 gpu_addr) 51 int w, int h, u64 gpu_addr)
52{ 52{
53 struct radeon_cp *cp = &rdev->cp; 53 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
54 u32 cb_color_info; 54 u32 cb_color_info;
55 int pitch, slice; 55 int pitch, slice;
56 56
@@ -104,7 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
104 u32 sync_type, u32 size, 104 u32 sync_type, u32 size,
105 u64 mc_addr) 105 u64 mc_addr)
106{ 106{
107 struct radeon_cp *cp = &rdev->cp; 107 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
108 u32 cp_coher_size; 108 u32 cp_coher_size;
109 109
110 if (size == 0xffffffff) 110 if (size == 0xffffffff)
@@ -123,7 +123,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
123static void 123static void
124set_shaders(struct radeon_device *rdev) 124set_shaders(struct radeon_device *rdev)
125{ 125{
126 struct radeon_cp *cp = &rdev->cp; 126 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
127 u64 gpu_addr; 127 u64 gpu_addr;
128 u32 sq_pgm_resources; 128 u32 sq_pgm_resources;
129 129
@@ -170,7 +170,7 @@ set_shaders(struct radeon_device *rdev)
170static void 170static void
171set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) 171set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
172{ 172{
173 struct radeon_cp *cp = &rdev->cp; 173 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
174 u32 sq_vtx_constant_word2; 174 u32 sq_vtx_constant_word2;
175 175
176 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) | 176 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -207,7 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
207 int format, int w, int h, int pitch, 207 int format, int w, int h, int pitch,
208 u64 gpu_addr, u32 size) 208 u64 gpu_addr, u32 size)
209{ 209{
210 struct radeon_cp *cp = &rdev->cp; 210 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
211 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; 211 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
212 212
213 if (h < 1) 213 if (h < 1)
@@ -246,7 +246,7 @@ static void
246set_scissors(struct radeon_device *rdev, int x1, int y1, 246set_scissors(struct radeon_device *rdev, int x1, int y1,
247 int x2, int y2) 247 int x2, int y2)
248{ 248{
249 struct radeon_cp *cp = &rdev->cp; 249 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
250 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 250 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
251 radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); 251 radeon_ring_write(cp, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
252 radeon_ring_write(cp, (x1 << 0) | (y1 << 16)); 252 radeon_ring_write(cp, (x1 << 0) | (y1 << 16));
@@ -267,7 +267,7 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
267static void 267static void
268draw_auto(struct radeon_device *rdev) 268draw_auto(struct radeon_device *rdev)
269{ 269{
270 struct radeon_cp *cp = &rdev->cp; 270 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
271 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 271 radeon_ring_write(cp, PACKET3(PACKET3_SET_CONFIG_REG, 1));
272 radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 272 radeon_ring_write(cp, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
273 radeon_ring_write(cp, DI_PT_RECTLIST); 273 radeon_ring_write(cp, DI_PT_RECTLIST);
@@ -292,7 +292,7 @@ draw_auto(struct radeon_device *rdev)
292static void 292static void
293set_default_state(struct radeon_device *rdev) 293set_default_state(struct radeon_device *rdev)
294{ 294{
295 struct radeon_cp *cp = &rdev->cp; 295 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
296 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; 296 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
297 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; 297 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
298 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; 298 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -687,7 +687,7 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
687 687
688int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages) 688int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
689{ 689{
690 struct radeon_cp *cp = &rdev->cp; 690 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
691 int r; 691 int r;
692 int ring_size; 692 int ring_size;
693 int num_loops = 0; 693 int num_loops = 0;
@@ -727,7 +727,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
727 if (fence) 727 if (fence)
728 r = radeon_fence_emit(rdev, fence); 728 r = radeon_fence_emit(rdev, fence);
729 729
730 radeon_ring_unlock_commit(rdev, &rdev->cp); 730 radeon_ring_unlock_commit(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
731} 731}
732 732
733void r600_kms_blit_copy(struct radeon_device *rdev, 733void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index bbe88ec3951..76c58e9e477 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -547,6 +547,7 @@ struct r600_ih {
547 struct radeon_bo *ring_obj; 547 struct radeon_bo *ring_obj;
548 volatile uint32_t *ring; 548 volatile uint32_t *ring;
549 unsigned rptr; 549 unsigned rptr;
550 unsigned rptr_offs;
550 unsigned wptr; 551 unsigned wptr;
551 unsigned wptr_old; 552 unsigned wptr_old;
552 unsigned ring_size; 553 unsigned ring_size;
@@ -598,6 +599,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev);
598int radeon_ib_test(struct radeon_device *rdev); 599int radeon_ib_test(struct radeon_device *rdev);
599extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); 600extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
600/* Ring access between begin & end cannot sleep */ 601/* Ring access between begin & end cannot sleep */
602int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp);
601void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp); 603void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp);
602int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); 604int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
603int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw); 605int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw);
@@ -1284,9 +1286,7 @@ struct radeon_device {
1284 rwlock_t fence_lock; 1286 rwlock_t fence_lock;
1285 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 1287 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
1286 struct radeon_semaphore_driver semaphore_drv; 1288 struct radeon_semaphore_driver semaphore_drv;
1287 struct radeon_cp cp; 1289 struct radeon_cp cp[RADEON_NUM_RINGS];
1288 struct radeon_cp cp1;
1289 struct radeon_cp cp2;
1290 struct radeon_ib_pool ib_pool; 1290 struct radeon_ib_pool ib_pool;
1291 struct radeon_irq irq; 1291 struct radeon_irq irq;
1292 struct radeon_asic *asic; 1292 struct radeon_asic *asic;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 36296ad397a..023c156eddd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -718,7 +718,8 @@ int radeon_device_init(struct radeon_device *rdev,
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 radeon_mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 for (i = 0; i < RADEON_NUM_RINGS; ++i)
722 mutex_init(&rdev->cp[i].mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 723 mutex_init(&rdev->dc_hw_i2c_mutex);
723 if (rdev->family >= CHIP_R600) 724 if (rdev->family >= CHIP_R600)
724 spin_lock_init(&rdev->ih.lock); 725 spin_lock_init(&rdev->ih.lock);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 9ed0bb100bc..86f4eeaeba6 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -84,7 +84,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
84 return 0; 84 return 0;
85 } 85 }
86 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq); 86 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
87 if (!rdev->cp.ready) 87 if (!rdev->cp[fence->ring].ready)
88 /* FIXME: cp is not running assume everythings is done right 88 /* FIXME: cp is not running assume everythings is done right
89 * away 89 * away
90 */ 90 */
@@ -269,7 +269,7 @@ retry:
269 * if we experiencing a lockup the value doesn't change 269 * if we experiencing a lockup the value doesn't change
270 */ 270 */
271 if (seq == rdev->fence_drv[fence->ring].last_seq && 271 if (seq == rdev->fence_drv[fence->ring].last_seq &&
272 radeon_gpu_is_lockup(rdev, &rdev->cp)) { 272 radeon_gpu_is_lockup(rdev, &rdev->cp[fence->ring])) {
273 /* good news we believe it's a lockup */ 273 /* good news we believe it's a lockup */
274 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 274 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
275 fence->seq, seq); 275 fence->seq, seq);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 136772ccfe7..1ce8fa71cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -152,6 +152,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
152 struct radeon_device *rdev = dev->dev_private; 152 struct radeon_device *rdev = dev->dev_private;
153 struct drm_radeon_gem_info *args = data; 153 struct drm_radeon_gem_info *args = data;
154 struct ttm_mem_type_manager *man; 154 struct ttm_mem_type_manager *man;
155 unsigned i;
155 156
156 man = &rdev->mman.bdev.man[TTM_PL_VRAM]; 157 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
157 158
@@ -161,7 +162,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
161 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
162 args->vram_visible -= radeon_fbdev_total_size(rdev); 163 args->vram_visible -= radeon_fbdev_total_size(rdev);
163 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024; 164 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
164 args->gart_size -= rdev->cp.ring_size; 165 for(i = 0; i < RADEON_NUM_RINGS; ++i)
166 args->gart_size -= rdev->cp[i].ring_size;
165 return 0; 167 return 0;
166} 168}
167 169
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 73b6714d615..50b632ac823 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,8 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
252 252
253 mutex_lock(&rdev->ddev->struct_mutex); 253 mutex_lock(&rdev->ddev->struct_mutex);
254 mutex_lock(&rdev->vram_mutex); 254 mutex_lock(&rdev->vram_mutex);
255 if (rdev->cp.ring_obj) 255 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
256 mutex_lock(&rdev->cp.mutex); 256 if (rdev->cp[i].ring_obj)
257 mutex_lock(&rdev->cp[i].mutex);
258 }
257 259
258 /* gui idle int has issues on older chips it seems */ 260 /* gui idle int has issues on older chips it seems */
259 if (rdev->family >= CHIP_R600) { 261 if (rdev->family >= CHIP_R600) {
@@ -269,11 +271,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
269 radeon_irq_set(rdev); 271 radeon_irq_set(rdev);
270 } 272 }
271 } else { 273 } else {
272 struct radeon_cp *cp = &rdev->cp; 274 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
273 if (cp->ready) { 275 if (cp->ready) {
274 struct radeon_fence *fence; 276 struct radeon_fence *fence;
275 radeon_ring_alloc(rdev, cp, 64); 277 radeon_ring_alloc(rdev, cp, 64);
276 radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX); 278 radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, cp));
277 radeon_fence_emit(rdev, fence); 279 radeon_fence_emit(rdev, fence);
278 radeon_ring_commit(rdev, cp); 280 radeon_ring_commit(rdev, cp);
279 radeon_fence_wait(fence, false); 281 radeon_fence_wait(fence, false);
@@ -309,8 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
309 311
310 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 312 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
311 313
312 if (rdev->cp.ring_obj) 314 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
313 mutex_unlock(&rdev->cp.mutex); 315 if (rdev->cp[i].ring_obj)
316 mutex_unlock(&rdev->cp[i].mutex);
317 }
314 mutex_unlock(&rdev->vram_mutex); 318 mutex_unlock(&rdev->vram_mutex);
315 mutex_unlock(&rdev->ddev->struct_mutex); 319 mutex_unlock(&rdev->ddev->struct_mutex);
316} 320}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 209834dbf18..d96b13af5e9 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -178,7 +178,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
178 178
179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) 179int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
180{ 180{
181 struct radeon_cp *cp = &rdev->cp; 181 struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
182 int r = 0; 182 int r = 0;
183 183
184 if (!ib->length_dw || !cp->ready) { 184 if (!ib->length_dw || !cp->ready) {
@@ -284,6 +284,21 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
284/* 284/*
285 * Ring. 285 * Ring.
286 */ 286 */
287int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
288{
289 /* r1xx-r5xx only has CP ring */
290 if (rdev->family < CHIP_R600)
291 return RADEON_RING_TYPE_GFX_INDEX;
292
293 if (rdev->family >= CHIP_CAYMAN) {
294 if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
295 return CAYMAN_RING_TYPE_CP1_INDEX;
296 else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
297 return CAYMAN_RING_TYPE_CP2_INDEX;
298 }
299 return RADEON_RING_TYPE_GFX_INDEX;
300}
301
287void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp) 302void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
288{ 303{
289 if (rdev->wb.enabled) 304 if (rdev->wb.enabled)
@@ -312,7 +327,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned
312 if (ndw < cp->ring_free_dw) { 327 if (ndw < cp->ring_free_dw) {
313 break; 328 break;
314 } 329 }
315 r = radeon_fence_wait_next(rdev, RADEON_RING_TYPE_GFX_INDEX); 330 r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
316 if (r) 331 if (r)
317 return r; 332 return r;
318 } 333 }
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 064694a6782..bf4789eed0b 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -121,13 +121,13 @@ int radeon_semaphore_create(struct radeon_device *rdev,
121void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, 121void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
122 struct radeon_semaphore *semaphore) 122 struct radeon_semaphore *semaphore)
123{ 123{
124 radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, false); 124 radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, false);
125} 125}
126 126
127void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 127void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
128 struct radeon_semaphore *semaphore) 128 struct radeon_semaphore *semaphore)
129{ 129{
130 radeon_semaphore_ring_emit(rdev, &rdev->cp, semaphore, true); 130 radeon_semaphore_ring_emit(rdev, &rdev->cp[ring], semaphore, true);
131} 131}
132 132
133void radeon_semaphore_free(struct radeon_device *rdev, 133void radeon_semaphore_free(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index ee6c160ffae..160e7df7755 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -43,7 +43,8 @@ void radeon_test_moves(struct radeon_device *rdev)
43 * (Total GTT - IB pool - writeback page - ring buffers) / test size 43 * (Total GTT - IB pool - writeback page - ring buffers) / test size
44 */ 44 */
45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024; 45 n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
46 n -= rdev->cp.ring_size; 46 for (i = 0; i < RADEON_NUM_RINGS; ++i)
47 n -= rdev->cp[i].ring_size;
47 if (rdev->wb.wb_obj) 48 if (rdev->wb.wb_obj)
48 n -= RADEON_GPU_PAGE_SIZE; 49 n -= RADEON_GPU_PAGE_SIZE;
49 if (rdev->ih.ring_obj) 50 if (rdev->ih.ring_obj)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 112ecaa6362..0be15bf38d3 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -188,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
188 rbo = container_of(bo, struct radeon_bo, tbo); 188 rbo = container_of(bo, struct radeon_bo, tbo);
189 switch (bo->mem.mem_type) { 189 switch (bo->mem.mem_type) {
190 case TTM_PL_VRAM: 190 case TTM_PL_VRAM:
191 if (rbo->rdev->cp.ready == false) 191 if (rbo->rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready == false)
192 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); 192 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
193 else 193 else
194 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 194 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -255,7 +255,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
255 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); 255 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
256 return -EINVAL; 256 return -EINVAL;
257 } 257 }
258 if (!rdev->cp.ready) { 258 if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready) {
259 DRM_ERROR("Trying to move memory with CP turned off.\n"); 259 DRM_ERROR("Trying to move memory with CP turned off.\n");
260 return -EINVAL; 260 return -EINVAL;
261 } 261 }
@@ -380,7 +380,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
380 radeon_move_null(bo, new_mem); 380 radeon_move_null(bo, new_mem);
381 return 0; 381 return 0;
382 } 382 }
383 if (!rdev->cp.ready || rdev->asic->copy == NULL) { 383 if (!rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
384 /* use memcpy */ 384 /* use memcpy */
385 goto memcpy; 385 goto memcpy;
386 } 386 }
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8fe13ba8143..8a935987d02 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -55,7 +55,7 @@ void rv515_debugfs(struct radeon_device *rdev)
55 55
56void rv515_ring_start(struct radeon_device *rdev) 56void rv515_ring_start(struct radeon_device *rdev)
57{ 57{
58 struct radeon_cp *cp = &rdev->cp; 58 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
59 int r; 59 int r;
60 60
61 r = radeon_ring_lock(rdev, cp, 64); 61 r = radeon_ring_lock(rdev, cp, 64);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a2c60598d0f..9e4c0418f54 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
357void r700_cp_fini(struct radeon_device *rdev) 357void r700_cp_fini(struct radeon_device *rdev)
358{ 358{
359 r700_cp_stop(rdev); 359 r700_cp_stop(rdev);
360 radeon_ring_fini(rdev, &rdev->cp); 360 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
361} 361}
362 362
363/* 363/*
@@ -1043,7 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
1043 1043
1044static int rv770_startup(struct radeon_device *rdev) 1044static int rv770_startup(struct radeon_device *rdev)
1045{ 1045{
1046 struct radeon_cp *cp = &rdev->cp; 1046 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1047 int r; 1047 int r;
1048 1048
1049 /* enable pcie gen2 link */ 1049 /* enable pcie gen2 link */
@@ -1144,7 +1144,7 @@ int rv770_suspend(struct radeon_device *rdev)
1144 r600_audio_fini(rdev); 1144 r600_audio_fini(rdev);
1145 /* FIXME: we should wait for ring to be empty */ 1145 /* FIXME: we should wait for ring to be empty */
1146 r700_cp_stop(rdev); 1146 r700_cp_stop(rdev);
1147 rdev->cp.ready = false; 1147 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1148 r600_irq_suspend(rdev); 1148 r600_irq_suspend(rdev);
1149 radeon_wb_disable(rdev); 1149 radeon_wb_disable(rdev);
1150 rv770_pcie_gart_disable(rdev); 1150 rv770_pcie_gart_disable(rdev);
@@ -1217,8 +1217,8 @@ int rv770_init(struct radeon_device *rdev)
1217 if (r) 1217 if (r)
1218 return r; 1218 return r;
1219 1219
1220 rdev->cp.ring_obj = NULL; 1220 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1221 r600_ring_init(rdev, &rdev->cp, 1024 * 1024); 1221 r600_ring_init(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1222 1222
1223 rdev->ih.ring_obj = NULL; 1223 rdev->ih.ring_obj = NULL;
1224 r600_ih_ring_init(rdev, 64 * 1024); 1224 r600_ih_ring_init(rdev, 64 * 1024);