aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r100.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r--drivers/gpu/drm/radeon/r100.c149
1 files changed, 79 insertions, 70 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 2f18163e5e32..271cee7f817c 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -811,30 +811,33 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
811void r100_fence_ring_emit(struct radeon_device *rdev, 811void r100_fence_ring_emit(struct radeon_device *rdev,
812 struct radeon_fence *fence) 812 struct radeon_fence *fence)
813{ 813{
814 struct radeon_cp *cp = &rdev->cp;
815
814 /* We have to make sure that caches are flushed before 816 /* We have to make sure that caches are flushed before
815 * CPU might read something from VRAM. */ 817 * CPU might read something from VRAM. */
816 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0)); 818 radeon_ring_write(cp, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
817 radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL); 819 radeon_ring_write(cp, RADEON_RB3D_DC_FLUSH_ALL);
818 radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); 820 radeon_ring_write(cp, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
819 radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); 821 radeon_ring_write(cp, RADEON_RB3D_ZC_FLUSH_ALL);
820 /* Wait until IDLE & CLEAN */ 822 /* Wait until IDLE & CLEAN */
821 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 823 radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
822 radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); 824 radeon_ring_write(cp, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
823 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 825 radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
824 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | 826 radeon_ring_write(cp, rdev->config.r100.hdp_cntl |
825 RADEON_HDP_READ_BUFFER_INVALIDATE); 827 RADEON_HDP_READ_BUFFER_INVALIDATE);
826 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); 828 radeon_ring_write(cp, PACKET0(RADEON_HOST_PATH_CNTL, 0));
827 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); 829 radeon_ring_write(cp, rdev->config.r100.hdp_cntl);
828 /* Emit fence sequence & fire IRQ */ 830 /* Emit fence sequence & fire IRQ */
829 radeon_ring_write(rdev, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); 831 radeon_ring_write(cp, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
830 radeon_ring_write(rdev, fence->seq); 832 radeon_ring_write(cp, fence->seq);
831 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0)); 833 radeon_ring_write(cp, PACKET0(RADEON_GEN_INT_STATUS, 0));
832 radeon_ring_write(rdev, RADEON_SW_INT_FIRE); 834 radeon_ring_write(cp, RADEON_SW_INT_FIRE);
833} 835}
834 836
835void r100_semaphore_ring_emit(struct radeon_device *rdev, 837void r100_semaphore_ring_emit(struct radeon_device *rdev,
838 struct radeon_cp *cp,
836 struct radeon_semaphore *semaphore, 839 struct radeon_semaphore *semaphore,
837 unsigned ring, bool emit_wait) 840 bool emit_wait)
838{ 841{
839 /* Unused on older asics, since we don't have semaphores or multiple rings */ 842 /* Unused on older asics, since we don't have semaphores or multiple rings */
840 BUG(); 843 BUG();
@@ -846,6 +849,7 @@ int r100_copy_blit(struct radeon_device *rdev,
846 unsigned num_gpu_pages, 849 unsigned num_gpu_pages,
847 struct radeon_fence *fence) 850 struct radeon_fence *fence)
848{ 851{
852 struct radeon_cp *cp = &rdev->cp;
849 uint32_t cur_pages; 853 uint32_t cur_pages;
850 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE; 854 uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
851 uint32_t pitch; 855 uint32_t pitch;
@@ -863,7 +867,7 @@ int r100_copy_blit(struct radeon_device *rdev,
863 867
864 /* Ask for enough room for blit + flush + fence */ 868 /* Ask for enough room for blit + flush + fence */
865 ndw = 64 + (10 * num_loops); 869 ndw = 64 + (10 * num_loops);
866 r = radeon_ring_lock(rdev, ndw); 870 r = radeon_ring_lock(rdev, cp, ndw);
867 if (r) { 871 if (r) {
868 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); 872 DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
869 return -EINVAL; 873 return -EINVAL;
@@ -877,8 +881,8 @@ int r100_copy_blit(struct radeon_device *rdev,
877 881
878 /* pages are in Y direction - height 882 /* pages are in Y direction - height
879 page width in X direction - width */ 883 page width in X direction - width */
880 radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8)); 884 radeon_ring_write(cp, PACKET3(PACKET3_BITBLT_MULTI, 8));
881 radeon_ring_write(rdev, 885 radeon_ring_write(cp,
882 RADEON_GMC_SRC_PITCH_OFFSET_CNTL | 886 RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
883 RADEON_GMC_DST_PITCH_OFFSET_CNTL | 887 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
884 RADEON_GMC_SRC_CLIPPING | 888 RADEON_GMC_SRC_CLIPPING |
@@ -890,26 +894,26 @@ int r100_copy_blit(struct radeon_device *rdev,
890 RADEON_DP_SRC_SOURCE_MEMORY | 894 RADEON_DP_SRC_SOURCE_MEMORY |
891 RADEON_GMC_CLR_CMP_CNTL_DIS | 895 RADEON_GMC_CLR_CMP_CNTL_DIS |
892 RADEON_GMC_WR_MSK_DIS); 896 RADEON_GMC_WR_MSK_DIS);
893 radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10)); 897 radeon_ring_write(cp, (pitch << 22) | (src_offset >> 10));
894 radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10)); 898 radeon_ring_write(cp, (pitch << 22) | (dst_offset >> 10));
895 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 899 radeon_ring_write(cp, (0x1fff) | (0x1fff << 16));
896 radeon_ring_write(rdev, 0); 900 radeon_ring_write(cp, 0);
897 radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); 901 radeon_ring_write(cp, (0x1fff) | (0x1fff << 16));
898 radeon_ring_write(rdev, num_gpu_pages); 902 radeon_ring_write(cp, num_gpu_pages);
899 radeon_ring_write(rdev, num_gpu_pages); 903 radeon_ring_write(cp, num_gpu_pages);
900 radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); 904 radeon_ring_write(cp, cur_pages | (stride_pixels << 16));
901 } 905 }
902 radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); 906 radeon_ring_write(cp, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
903 radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL); 907 radeon_ring_write(cp, RADEON_RB2D_DC_FLUSH_ALL);
904 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); 908 radeon_ring_write(cp, PACKET0(RADEON_WAIT_UNTIL, 0));
905 radeon_ring_write(rdev, 909 radeon_ring_write(cp,
906 RADEON_WAIT_2D_IDLECLEAN | 910 RADEON_WAIT_2D_IDLECLEAN |
907 RADEON_WAIT_HOST_IDLECLEAN | 911 RADEON_WAIT_HOST_IDLECLEAN |
908 RADEON_WAIT_DMA_GUI_IDLE); 912 RADEON_WAIT_DMA_GUI_IDLE);
909 if (fence) { 913 if (fence) {
910 r = radeon_fence_emit(rdev, fence); 914 r = radeon_fence_emit(rdev, fence);
911 } 915 }
912 radeon_ring_unlock_commit(rdev); 916 radeon_ring_unlock_commit(rdev, cp);
913 return r; 917 return r;
914} 918}
915 919
@@ -930,19 +934,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
930 934
931void r100_ring_start(struct radeon_device *rdev) 935void r100_ring_start(struct radeon_device *rdev)
932{ 936{
937 struct radeon_cp *cp = &rdev->cp;
933 int r; 938 int r;
934 939
935 r = radeon_ring_lock(rdev, 2); 940 r = radeon_ring_lock(rdev, cp, 2);
936 if (r) { 941 if (r) {
937 return; 942 return;
938 } 943 }
939 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); 944 radeon_ring_write(cp, PACKET0(RADEON_ISYNC_CNTL, 0));
940 radeon_ring_write(rdev, 945 radeon_ring_write(cp,
941 RADEON_ISYNC_ANY2D_IDLE3D | 946 RADEON_ISYNC_ANY2D_IDLE3D |
942 RADEON_ISYNC_ANY3D_IDLE2D | 947 RADEON_ISYNC_ANY3D_IDLE2D |
943 RADEON_ISYNC_WAIT_IDLEGUI | 948 RADEON_ISYNC_WAIT_IDLEGUI |
944 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 949 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
945 radeon_ring_unlock_commit(rdev); 950 radeon_ring_unlock_commit(rdev, cp);
946} 951}
947 952
948 953
@@ -1043,6 +1048,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
1043 1048
1044int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) 1049int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1045{ 1050{
1051 struct radeon_cp *cp = &rdev->cp;
1046 unsigned rb_bufsz; 1052 unsigned rb_bufsz;
1047 unsigned rb_blksz; 1053 unsigned rb_blksz;
1048 unsigned max_fetch; 1054 unsigned max_fetch;
@@ -1068,7 +1074,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1068 rb_bufsz = drm_order(ring_size / 8); 1074 rb_bufsz = drm_order(ring_size / 8);
1069 ring_size = (1 << (rb_bufsz + 1)) * 4; 1075 ring_size = (1 << (rb_bufsz + 1)) * 4;
1070 r100_cp_load_microcode(rdev); 1076 r100_cp_load_microcode(rdev);
1071 r = radeon_ring_init(rdev, ring_size); 1077 r = radeon_ring_init(rdev, cp, ring_size);
1072 if (r) { 1078 if (r) {
1073 return r; 1079 return r;
1074 } 1080 }
@@ -1077,7 +1083,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1077 rb_blksz = 9; 1083 rb_blksz = 9;
1078 /* cp will read 128bytes at a time (4 dwords) */ 1084 /* cp will read 128bytes at a time (4 dwords) */
1079 max_fetch = 1; 1085 max_fetch = 1;
1080 rdev->cp.align_mask = 16 - 1; 1086 cp->align_mask = 16 - 1;
1081 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */ 1087 /* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
1082 pre_write_timer = 64; 1088 pre_write_timer = 64;
1083 /* Force CP_RB_WPTR write if written more than one time before the 1089 /* Force CP_RB_WPTR write if written more than one time before the
@@ -1107,13 +1113,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1107 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE); 1113 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
1108 1114
1109 /* Set ring address */ 1115 /* Set ring address */
1110 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr); 1116 DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)cp->gpu_addr);
1111 WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr); 1117 WREG32(RADEON_CP_RB_BASE, cp->gpu_addr);
1112 /* Force read & write ptr to 0 */ 1118 /* Force read & write ptr to 0 */
1113 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); 1119 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
1114 WREG32(RADEON_CP_RB_RPTR_WR, 0); 1120 WREG32(RADEON_CP_RB_RPTR_WR, 0);
1115 rdev->cp.wptr = 0; 1121 cp->wptr = 0;
1116 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 1122 WREG32(RADEON_CP_RB_WPTR, cp->wptr);
1117 1123
1118 /* set the wb address whether it's enabled or not */ 1124 /* set the wb address whether it's enabled or not */
1119 WREG32(R_00070C_CP_RB_RPTR_ADDR, 1125 WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1129,7 +1135,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1129 1135
1130 WREG32(RADEON_CP_RB_CNTL, tmp); 1136 WREG32(RADEON_CP_RB_CNTL, tmp);
1131 udelay(10); 1137 udelay(10);
1132 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 1138 cp->rptr = RREG32(RADEON_CP_RB_RPTR);
1133 /* Set cp mode to bus mastering & enable cp*/ 1139 /* Set cp mode to bus mastering & enable cp*/
1134 WREG32(RADEON_CP_CSQ_MODE, 1140 WREG32(RADEON_CP_CSQ_MODE,
1135 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 1141 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1138,12 +1144,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1138 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D); 1144 WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
1139 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM); 1145 WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
1140 radeon_ring_start(rdev); 1146 radeon_ring_start(rdev);
1141 r = radeon_ring_test(rdev); 1147 r = radeon_ring_test(rdev, cp);
1142 if (r) { 1148 if (r) {
1143 DRM_ERROR("radeon: cp isn't working (%d).\n", r); 1149 DRM_ERROR("radeon: cp isn't working (%d).\n", r);
1144 return r; 1150 return r;
1145 } 1151 }
1146 rdev->cp.ready = true; 1152 cp->ready = true;
1147 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); 1153 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1148 return 0; 1154 return 0;
1149} 1155}
@@ -1155,7 +1161,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1155 } 1161 }
1156 /* Disable ring */ 1162 /* Disable ring */
1157 r100_cp_disable(rdev); 1163 r100_cp_disable(rdev);
1158 radeon_ring_fini(rdev); 1164 radeon_ring_fini(rdev, &rdev->cp);
1159 DRM_INFO("radeon: cp finalized\n"); 1165 DRM_INFO("radeon: cp finalized\n");
1160} 1166}
1161 1167
@@ -1173,9 +1179,9 @@ void r100_cp_disable(struct radeon_device *rdev)
1173 } 1179 }
1174} 1180}
1175 1181
1176void r100_cp_commit(struct radeon_device *rdev) 1182void r100_cp_commit(struct radeon_device *rdev, struct radeon_cp *cp)
1177{ 1183{
1178 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 1184 WREG32(RADEON_CP_RB_WPTR, cp->wptr);
1179 (void)RREG32(RADEON_CP_RB_WPTR); 1185 (void)RREG32(RADEON_CP_RB_WPTR);
1180} 1186}
1181 1187
@@ -2160,26 +2166,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
2160 return false; 2166 return false;
2161} 2167}
2162 2168
2163bool r100_gpu_is_lockup(struct radeon_device *rdev) 2169bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
2164{ 2170{
2165 u32 rbbm_status; 2171 u32 rbbm_status;
2166 int r; 2172 int r;
2167 2173
2168 rbbm_status = RREG32(R_000E40_RBBM_STATUS); 2174 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2169 if (!G_000E40_GUI_ACTIVE(rbbm_status)) { 2175 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2170 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp); 2176 r100_gpu_lockup_update(&rdev->config.r100.lockup, cp);
2171 return false; 2177 return false;
2172 } 2178 }
2173 /* force CP activities */ 2179 /* force CP activities */
2174 r = radeon_ring_lock(rdev, 2); 2180 r = radeon_ring_lock(rdev, cp, 2);
2175 if (!r) { 2181 if (!r) {
2176 /* PACKET2 NOP */ 2182 /* PACKET2 NOP */
2177 radeon_ring_write(rdev, 0x80000000); 2183 radeon_ring_write(cp, 0x80000000);
2178 radeon_ring_write(rdev, 0x80000000); 2184 radeon_ring_write(cp, 0x80000000);
2179 radeon_ring_unlock_commit(rdev); 2185 radeon_ring_unlock_commit(rdev, cp);
2180 } 2186 }
2181 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 2187 cp->rptr = RREG32(RADEON_CP_RB_RPTR);
2182 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp); 2188 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, cp);
2183} 2189}
2184 2190
2185void r100_bm_disable(struct radeon_device *rdev) 2191void r100_bm_disable(struct radeon_device *rdev)
@@ -2587,21 +2593,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
2587 struct drm_info_node *node = (struct drm_info_node *) m->private; 2593 struct drm_info_node *node = (struct drm_info_node *) m->private;
2588 struct drm_device *dev = node->minor->dev; 2594 struct drm_device *dev = node->minor->dev;
2589 struct radeon_device *rdev = dev->dev_private; 2595 struct radeon_device *rdev = dev->dev_private;
2596 struct radeon_cp *cp = &rdev->cp;
2590 uint32_t rdp, wdp; 2597 uint32_t rdp, wdp;
2591 unsigned count, i, j; 2598 unsigned count, i, j;
2592 2599
2593 radeon_ring_free_size(rdev); 2600 radeon_ring_free_size(rdev, cp);
2594 rdp = RREG32(RADEON_CP_RB_RPTR); 2601 rdp = RREG32(RADEON_CP_RB_RPTR);
2595 wdp = RREG32(RADEON_CP_RB_WPTR); 2602 wdp = RREG32(RADEON_CP_RB_WPTR);
2596 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; 2603 count = (rdp + cp->ring_size - wdp) & cp->ptr_mask;
2597 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT)); 2604 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2598 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2605 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2599 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2606 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2600 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2607 seq_printf(m, "%u free dwords in ring\n", cp->ring_free_dw);
2601 seq_printf(m, "%u dwords in ring\n", count); 2608 seq_printf(m, "%u dwords in ring\n", count);
2602 for (j = 0; j <= count; j++) { 2609 for (j = 0; j <= count; j++) {
2603 i = (rdp + j) & rdev->cp.ptr_mask; 2610 i = (rdp + j) & cp->ptr_mask;
2604 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2611 seq_printf(m, "r[%04d]=0x%08x\n", i, cp->ring[i]);
2605 } 2612 }
2606 return 0; 2613 return 0;
2607} 2614}
@@ -3643,7 +3650,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3643 } 3650 }
3644} 3651}
3645 3652
3646int r100_ring_test(struct radeon_device *rdev) 3653int r100_ring_test(struct radeon_device *rdev, struct radeon_cp *cp)
3647{ 3654{
3648 uint32_t scratch; 3655 uint32_t scratch;
3649 uint32_t tmp = 0; 3656 uint32_t tmp = 0;
@@ -3656,15 +3663,15 @@ int r100_ring_test(struct radeon_device *rdev)
3656 return r; 3663 return r;
3657 } 3664 }
3658 WREG32(scratch, 0xCAFEDEAD); 3665 WREG32(scratch, 0xCAFEDEAD);
3659 r = radeon_ring_lock(rdev, 2); 3666 r = radeon_ring_lock(rdev, cp, 2);
3660 if (r) { 3667 if (r) {
3661 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); 3668 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
3662 radeon_scratch_free(rdev, scratch); 3669 radeon_scratch_free(rdev, scratch);
3663 return r; 3670 return r;
3664 } 3671 }
3665 radeon_ring_write(rdev, PACKET0(scratch, 0)); 3672 radeon_ring_write(cp, PACKET0(scratch, 0));
3666 radeon_ring_write(rdev, 0xDEADBEEF); 3673 radeon_ring_write(cp, 0xDEADBEEF);
3667 radeon_ring_unlock_commit(rdev); 3674 radeon_ring_unlock_commit(rdev, cp);
3668 for (i = 0; i < rdev->usec_timeout; i++) { 3675 for (i = 0; i < rdev->usec_timeout; i++) {
3669 tmp = RREG32(scratch); 3676 tmp = RREG32(scratch);
3670 if (tmp == 0xDEADBEEF) { 3677 if (tmp == 0xDEADBEEF) {
@@ -3685,9 +3692,11 @@ int r100_ring_test(struct radeon_device *rdev)
3685 3692
3686void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3693void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3687{ 3694{
3688 radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); 3695 struct radeon_cp *cp = &rdev->cp;
3689 radeon_ring_write(rdev, ib->gpu_addr); 3696
3690 radeon_ring_write(rdev, ib->length_dw); 3697 radeon_ring_write(cp, PACKET0(RADEON_CP_IB_BASE, 1));
3698 radeon_ring_write(cp, ib->gpu_addr);
3699 radeon_ring_write(cp, ib->length_dw);
3691} 3700}
3692 3701
3693int r100_ib_test(struct radeon_device *rdev) 3702int r100_ib_test(struct radeon_device *rdev)
@@ -3704,7 +3713,7 @@ int r100_ib_test(struct radeon_device *rdev)
3704 return r; 3713 return r;
3705 } 3714 }
3706 WREG32(scratch, 0xCAFEDEAD); 3715 WREG32(scratch, 0xCAFEDEAD);
3707 r = radeon_ib_get(rdev, &ib); 3716 r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib);
3708 if (r) { 3717 if (r) {
3709 return r; 3718 return r;
3710 } 3719 }