diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/ni.c')
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 395 |
1 files changed, 302 insertions, 93 deletions
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 0e5799857465..321137295400 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
934 | 934 | ||
935 | int cayman_pcie_gart_enable(struct radeon_device *rdev) | 935 | int cayman_pcie_gart_enable(struct radeon_device *rdev) |
936 | { | 936 | { |
937 | int r; | 937 | int i, r; |
938 | 938 | ||
939 | if (rdev->gart.robj == NULL) { | 939 | if (rdev->gart.robj == NULL) { |
940 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); | 940 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
945 | return r; | 945 | return r; |
946 | radeon_gart_restore(rdev); | 946 | radeon_gart_restore(rdev); |
947 | /* Setup TLB control */ | 947 | /* Setup TLB control */ |
948 | WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB | | 948 | WREG32(MC_VM_MX_L1_TLB_CNTL, |
949 | (0xA << 7) | | ||
950 | ENABLE_L1_TLB | | ||
949 | ENABLE_L1_FRAGMENT_PROCESSING | | 951 | ENABLE_L1_FRAGMENT_PROCESSING | |
950 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | | 952 | SYSTEM_ACCESS_MODE_NOT_IN_SYS | |
953 | ENABLE_ADVANCED_DRIVER_MODEL | | ||
951 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); | 954 | SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU); |
952 | /* Setup L2 cache */ | 955 | /* Setup L2 cache */ |
953 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | | 956 | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
967 | WREG32(VM_CONTEXT0_CNTL2, 0); | 970 | WREG32(VM_CONTEXT0_CNTL2, 0); |
968 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 971 | WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | |
969 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 972 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
970 | /* disable context1-7 */ | 973 | |
974 | WREG32(0x15D4, 0); | ||
975 | WREG32(0x15D8, 0); | ||
976 | WREG32(0x15DC, 0); | ||
977 | |||
978 | /* empty context1-7 */ | ||
979 | for (i = 1; i < 8; i++) { | ||
980 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); | ||
981 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); | ||
982 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), | ||
983 | rdev->gart.table_addr >> 12); | ||
984 | } | ||
985 | |||
986 | /* enable context1-7 */ | ||
987 | WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | ||
988 | (u32)(rdev->dummy_page.addr >> 12)); | ||
971 | WREG32(VM_CONTEXT1_CNTL2, 0); | 989 | WREG32(VM_CONTEXT1_CNTL2, 0); |
972 | WREG32(VM_CONTEXT1_CNTL, 0); | 990 | WREG32(VM_CONTEXT1_CNTL, 0); |
991 | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | ||
992 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | ||
973 | 993 | ||
974 | cayman_pcie_gart_tlb_flush(rdev); | 994 | cayman_pcie_gart_tlb_flush(rdev); |
975 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 995 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev) | |||
1006 | radeon_gart_fini(rdev); | 1026 | radeon_gart_fini(rdev); |
1007 | } | 1027 | } |
1008 | 1028 | ||
1029 | void cayman_cp_int_cntl_setup(struct radeon_device *rdev, | ||
1030 | int ring, u32 cp_int_cntl) | ||
1031 | { | ||
1032 | u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3; | ||
1033 | |||
1034 | WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3)); | ||
1035 | WREG32(CP_INT_CNTL, cp_int_cntl); | ||
1036 | } | ||
1037 | |||
1009 | /* | 1038 | /* |
1010 | * CP. | 1039 | * CP. |
1011 | */ | 1040 | */ |
1041 | void cayman_fence_ring_emit(struct radeon_device *rdev, | ||
1042 | struct radeon_fence *fence) | ||
1043 | { | ||
1044 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | ||
1045 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | ||
1046 | |||
1047 | /* flush read cache over gart for this vmid */ | ||
1048 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
1049 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
1050 | radeon_ring_write(ring, 0); | ||
1051 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
1052 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); | ||
1053 | radeon_ring_write(ring, 0xFFFFFFFF); | ||
1054 | radeon_ring_write(ring, 0); | ||
1055 | radeon_ring_write(ring, 10); /* poll interval */ | ||
1056 | /* EVENT_WRITE_EOP - flush caches, send int */ | ||
1057 | radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4)); | ||
1058 | radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5)); | ||
1059 | radeon_ring_write(ring, addr & 0xffffffff); | ||
1060 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2)); | ||
1061 | radeon_ring_write(ring, fence->seq); | ||
1062 | radeon_ring_write(ring, 0); | ||
1063 | } | ||
1064 | |||
1065 | void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | ||
1066 | { | ||
1067 | struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; | ||
1068 | |||
1069 | /* set to DX10/11 mode */ | ||
1070 | radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); | ||
1071 | radeon_ring_write(ring, 1); | ||
1072 | radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); | ||
1073 | radeon_ring_write(ring, | ||
1074 | #ifdef __BIG_ENDIAN | ||
1075 | (2 << 0) | | ||
1076 | #endif | ||
1077 | (ib->gpu_addr & 0xFFFFFFFC)); | ||
1078 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); | ||
1079 | radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); | ||
1080 | |||
1081 | /* flush read cache over gart for this vmid */ | ||
1082 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | ||
1083 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | ||
1084 | radeon_ring_write(ring, ib->vm_id); | ||
1085 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | ||
1086 | radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA); | ||
1087 | radeon_ring_write(ring, 0xFFFFFFFF); | ||
1088 | radeon_ring_write(ring, 0); | ||
1089 | radeon_ring_write(ring, 10); /* poll interval */ | ||
1090 | } | ||
1091 | |||
1012 | static void cayman_cp_enable(struct radeon_device *rdev, bool enable) | 1092 | static void cayman_cp_enable(struct radeon_device *rdev, bool enable) |
1013 | { | 1093 | { |
1014 | if (enable) | 1094 | if (enable) |
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev) | |||
1049 | 1129 | ||
1050 | static int cayman_cp_start(struct radeon_device *rdev) | 1130 | static int cayman_cp_start(struct radeon_device *rdev) |
1051 | { | 1131 | { |
1132 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
1052 | int r, i; | 1133 | int r, i; |
1053 | 1134 | ||
1054 | r = radeon_ring_lock(rdev, 7); | 1135 | r = radeon_ring_lock(rdev, ring, 7); |
1055 | if (r) { | 1136 | if (r) { |
1056 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1137 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1057 | return r; | 1138 | return r; |
1058 | } | 1139 | } |
1059 | radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); | 1140 | radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5)); |
1060 | radeon_ring_write(rdev, 0x1); | 1141 | radeon_ring_write(ring, 0x1); |
1061 | radeon_ring_write(rdev, 0x0); | 1142 | radeon_ring_write(ring, 0x0); |
1062 | radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1); | 1143 | radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1); |
1063 | radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); | 1144 | radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); |
1064 | radeon_ring_write(rdev, 0); | 1145 | radeon_ring_write(ring, 0); |
1065 | radeon_ring_write(rdev, 0); | 1146 | radeon_ring_write(ring, 0); |
1066 | radeon_ring_unlock_commit(rdev); | 1147 | radeon_ring_unlock_commit(rdev, ring); |
1067 | 1148 | ||
1068 | cayman_cp_enable(rdev, true); | 1149 | cayman_cp_enable(rdev, true); |
1069 | 1150 | ||
1070 | r = radeon_ring_lock(rdev, cayman_default_size + 19); | 1151 | r = radeon_ring_lock(rdev, ring, cayman_default_size + 19); |
1071 | if (r) { | 1152 | if (r) { |
1072 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); | 1153 | DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
1073 | return r; | 1154 | return r; |
1074 | } | 1155 | } |
1075 | 1156 | ||
1076 | /* setup clear context state */ | 1157 | /* setup clear context state */ |
1077 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1158 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1078 | radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); | 1159 | radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE); |
1079 | 1160 | ||
1080 | for (i = 0; i < cayman_default_size; i++) | 1161 | for (i = 0; i < cayman_default_size; i++) |
1081 | radeon_ring_write(rdev, cayman_default_state[i]); | 1162 | radeon_ring_write(ring, cayman_default_state[i]); |
1082 | 1163 | ||
1083 | radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); | 1164 | radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); |
1084 | radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE); | 1165 | radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE); |
1085 | 1166 | ||
1086 | /* set clear context state */ | 1167 | /* set clear context state */ |
1087 | radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); | 1168 | radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0)); |
1088 | radeon_ring_write(rdev, 0); | 1169 | radeon_ring_write(ring, 0); |
1089 | 1170 | ||
1090 | /* SQ_VTX_BASE_VTX_LOC */ | 1171 | /* SQ_VTX_BASE_VTX_LOC */ |
1091 | radeon_ring_write(rdev, 0xc0026f00); | 1172 | radeon_ring_write(ring, 0xc0026f00); |
1092 | radeon_ring_write(rdev, 0x00000000); | 1173 | radeon_ring_write(ring, 0x00000000); |
1093 | radeon_ring_write(rdev, 0x00000000); | 1174 | radeon_ring_write(ring, 0x00000000); |
1094 | radeon_ring_write(rdev, 0x00000000); | 1175 | radeon_ring_write(ring, 0x00000000); |
1095 | 1176 | ||
1096 | /* Clear consts */ | 1177 | /* Clear consts */ |
1097 | radeon_ring_write(rdev, 0xc0036f00); | 1178 | radeon_ring_write(ring, 0xc0036f00); |
1098 | radeon_ring_write(rdev, 0x00000bc4); | 1179 | radeon_ring_write(ring, 0x00000bc4); |
1099 | radeon_ring_write(rdev, 0xffffffff); | 1180 | radeon_ring_write(ring, 0xffffffff); |
1100 | radeon_ring_write(rdev, 0xffffffff); | 1181 | radeon_ring_write(ring, 0xffffffff); |
1101 | radeon_ring_write(rdev, 0xffffffff); | 1182 | radeon_ring_write(ring, 0xffffffff); |
1102 | 1183 | ||
1103 | radeon_ring_write(rdev, 0xc0026900); | 1184 | radeon_ring_write(ring, 0xc0026900); |
1104 | radeon_ring_write(rdev, 0x00000316); | 1185 | radeon_ring_write(ring, 0x00000316); |
1105 | radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ | 1186 | radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ |
1106 | radeon_ring_write(rdev, 0x00000010); /* */ | 1187 | radeon_ring_write(ring, 0x00000010); /* */ |
1107 | 1188 | ||
1108 | radeon_ring_unlock_commit(rdev); | 1189 | radeon_ring_unlock_commit(rdev, ring); |
1109 | 1190 | ||
1110 | /* XXX init other rings */ | 1191 | /* XXX init other rings */ |
1111 | 1192 | ||
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev) | |||
1115 | static void cayman_cp_fini(struct radeon_device *rdev) | 1196 | static void cayman_cp_fini(struct radeon_device *rdev) |
1116 | { | 1197 | { |
1117 | cayman_cp_enable(rdev, false); | 1198 | cayman_cp_enable(rdev, false); |
1118 | radeon_ring_fini(rdev); | 1199 | radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1119 | } | 1200 | } |
1120 | 1201 | ||
1121 | int cayman_cp_resume(struct radeon_device *rdev) | 1202 | int cayman_cp_resume(struct radeon_device *rdev) |
1122 | { | 1203 | { |
1204 | struct radeon_ring *ring; | ||
1123 | u32 tmp; | 1205 | u32 tmp; |
1124 | u32 rb_bufsz; | 1206 | u32 rb_bufsz; |
1125 | int r; | 1207 | int r; |
@@ -1136,7 +1218,7 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1136 | WREG32(GRBM_SOFT_RESET, 0); | 1218 | WREG32(GRBM_SOFT_RESET, 0); |
1137 | RREG32(GRBM_SOFT_RESET); | 1219 | RREG32(GRBM_SOFT_RESET); |
1138 | 1220 | ||
1139 | WREG32(CP_SEM_WAIT_TIMER, 0x4); | 1221 | WREG32(CP_SEM_WAIT_TIMER, 0x0); |
1140 | 1222 | ||
1141 | /* Set the write pointer delay */ | 1223 | /* Set the write pointer delay */ |
1142 | WREG32(CP_RB_WPTR_DELAY, 0); | 1224 | WREG32(CP_RB_WPTR_DELAY, 0); |
@@ -1145,7 +1227,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1145 | 1227 | ||
1146 | /* ring 0 - compute and gfx */ | 1228 | /* ring 0 - compute and gfx */ |
1147 | /* Set ring buffer size */ | 1229 | /* Set ring buffer size */ |
1148 | rb_bufsz = drm_order(rdev->cp.ring_size / 8); | 1230 | ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
1231 | rb_bufsz = drm_order(ring->ring_size / 8); | ||
1149 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1232 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1150 | #ifdef __BIG_ENDIAN | 1233 | #ifdef __BIG_ENDIAN |
1151 | tmp |= BUF_SWAP_32BIT; | 1234 | tmp |= BUF_SWAP_32BIT; |
@@ -1154,8 +1237,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1154 | 1237 | ||
1155 | /* Initialize the ring buffer's read and write pointers */ | 1238 | /* Initialize the ring buffer's read and write pointers */ |
1156 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); | 1239 | WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); |
1157 | rdev->cp.wptr = 0; | 1240 | ring->wptr = 0; |
1158 | WREG32(CP_RB0_WPTR, rdev->cp.wptr); | 1241 | WREG32(CP_RB0_WPTR, ring->wptr); |
1159 | 1242 | ||
1160 | /* set the wb address wether it's enabled or not */ | 1243 | /* set the wb address wether it's enabled or not */ |
1161 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); | 1244 | WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1172,13 +1255,14 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1172 | mdelay(1); | 1255 | mdelay(1); |
1173 | WREG32(CP_RB0_CNTL, tmp); | 1256 | WREG32(CP_RB0_CNTL, tmp); |
1174 | 1257 | ||
1175 | WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); | 1258 | WREG32(CP_RB0_BASE, ring->gpu_addr >> 8); |
1176 | 1259 | ||
1177 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | 1260 | ring->rptr = RREG32(CP_RB0_RPTR); |
1178 | 1261 | ||
1179 | /* ring1 - compute only */ | 1262 | /* ring1 - compute only */ |
1180 | /* Set ring buffer size */ | 1263 | /* Set ring buffer size */ |
1181 | rb_bufsz = drm_order(rdev->cp1.ring_size / 8); | 1264 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; |
1265 | rb_bufsz = drm_order(ring->ring_size / 8); | ||
1182 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1266 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1183 | #ifdef __BIG_ENDIAN | 1267 | #ifdef __BIG_ENDIAN |
1184 | tmp |= BUF_SWAP_32BIT; | 1268 | tmp |= BUF_SWAP_32BIT; |
@@ -1187,8 +1271,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1187 | 1271 | ||
1188 | /* Initialize the ring buffer's read and write pointers */ | 1272 | /* Initialize the ring buffer's read and write pointers */ |
1189 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); | 1273 | WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); |
1190 | rdev->cp1.wptr = 0; | 1274 | ring->wptr = 0; |
1191 | WREG32(CP_RB1_WPTR, rdev->cp1.wptr); | 1275 | WREG32(CP_RB1_WPTR, ring->wptr); |
1192 | 1276 | ||
1193 | /* set the wb address wether it's enabled or not */ | 1277 | /* set the wb address wether it's enabled or not */ |
1194 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); | 1278 | WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1197,13 +1281,14 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1197 | mdelay(1); | 1281 | mdelay(1); |
1198 | WREG32(CP_RB1_CNTL, tmp); | 1282 | WREG32(CP_RB1_CNTL, tmp); |
1199 | 1283 | ||
1200 | WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); | 1284 | WREG32(CP_RB1_BASE, ring->gpu_addr >> 8); |
1201 | 1285 | ||
1202 | rdev->cp1.rptr = RREG32(CP_RB1_RPTR); | 1286 | ring->rptr = RREG32(CP_RB1_RPTR); |
1203 | 1287 | ||
1204 | /* ring2 - compute only */ | 1288 | /* ring2 - compute only */ |
1205 | /* Set ring buffer size */ | 1289 | /* Set ring buffer size */ |
1206 | rb_bufsz = drm_order(rdev->cp2.ring_size / 8); | 1290 | ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; |
1291 | rb_bufsz = drm_order(ring->ring_size / 8); | ||
1207 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; | 1292 | tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz; |
1208 | #ifdef __BIG_ENDIAN | 1293 | #ifdef __BIG_ENDIAN |
1209 | tmp |= BUF_SWAP_32BIT; | 1294 | tmp |= BUF_SWAP_32BIT; |
@@ -1212,8 +1297,8 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1212 | 1297 | ||
1213 | /* Initialize the ring buffer's read and write pointers */ | 1298 | /* Initialize the ring buffer's read and write pointers */ |
1214 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); | 1299 | WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); |
1215 | rdev->cp2.wptr = 0; | 1300 | ring->wptr = 0; |
1216 | WREG32(CP_RB2_WPTR, rdev->cp2.wptr); | 1301 | WREG32(CP_RB2_WPTR, ring->wptr); |
1217 | 1302 | ||
1218 | /* set the wb address wether it's enabled or not */ | 1303 | /* set the wb address wether it's enabled or not */ |
1219 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); | 1304 | WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); |
@@ -1222,28 +1307,28 @@ int cayman_cp_resume(struct radeon_device *rdev) | |||
1222 | mdelay(1); | 1307 | mdelay(1); |
1223 | WREG32(CP_RB2_CNTL, tmp); | 1308 | WREG32(CP_RB2_CNTL, tmp); |
1224 | 1309 | ||
1225 | WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); | 1310 | WREG32(CP_RB2_BASE, ring->gpu_addr >> 8); |
1226 | 1311 | ||
1227 | rdev->cp2.rptr = RREG32(CP_RB2_RPTR); | 1312 | ring->rptr = RREG32(CP_RB2_RPTR); |
1228 | 1313 | ||
1229 | /* start the rings */ | 1314 | /* start the rings */ |
1230 | cayman_cp_start(rdev); | 1315 | cayman_cp_start(rdev); |
1231 | rdev->cp.ready = true; | 1316 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true; |
1232 | rdev->cp1.ready = true; | 1317 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1233 | rdev->cp2.ready = true; | 1318 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1234 | /* this only test cp0 */ | 1319 | /* this only test cp0 */ |
1235 | r = radeon_ring_test(rdev); | 1320 | r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]); |
1236 | if (r) { | 1321 | if (r) { |
1237 | rdev->cp.ready = false; | 1322 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1238 | rdev->cp1.ready = false; | 1323 | rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; |
1239 | rdev->cp2.ready = false; | 1324 | rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; |
1240 | return r; | 1325 | return r; |
1241 | } | 1326 | } |
1242 | 1327 | ||
1243 | return 0; | 1328 | return 0; |
1244 | } | 1329 | } |
1245 | 1330 | ||
1246 | bool cayman_gpu_is_lockup(struct radeon_device *rdev) | 1331 | bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) |
1247 | { | 1332 | { |
1248 | u32 srbm_status; | 1333 | u32 srbm_status; |
1249 | u32 grbm_status; | 1334 | u32 grbm_status; |
@@ -1256,20 +1341,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev) | |||
1256 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); | 1341 | grbm_status_se0 = RREG32(GRBM_STATUS_SE0); |
1257 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); | 1342 | grbm_status_se1 = RREG32(GRBM_STATUS_SE1); |
1258 | if (!(grbm_status & GUI_ACTIVE)) { | 1343 | if (!(grbm_status & GUI_ACTIVE)) { |
1259 | r100_gpu_lockup_update(lockup, &rdev->cp); | 1344 | r100_gpu_lockup_update(lockup, ring); |
1260 | return false; | 1345 | return false; |
1261 | } | 1346 | } |
1262 | /* force CP activities */ | 1347 | /* force CP activities */ |
1263 | r = radeon_ring_lock(rdev, 2); | 1348 | r = radeon_ring_lock(rdev, ring, 2); |
1264 | if (!r) { | 1349 | if (!r) { |
1265 | /* PACKET2 NOP */ | 1350 | /* PACKET2 NOP */ |
1266 | radeon_ring_write(rdev, 0x80000000); | 1351 | radeon_ring_write(ring, 0x80000000); |
1267 | radeon_ring_write(rdev, 0x80000000); | 1352 | radeon_ring_write(ring, 0x80000000); |
1268 | radeon_ring_unlock_commit(rdev); | 1353 | radeon_ring_unlock_commit(rdev, ring); |
1269 | } | 1354 | } |
1270 | /* XXX deal with CP0,1,2 */ | 1355 | /* XXX deal with CP0,1,2 */ |
1271 | rdev->cp.rptr = RREG32(CP_RB0_RPTR); | 1356 | ring->rptr = RREG32(ring->rptr_reg); |
1272 | return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp); | 1357 | return r100_gpu_cp_is_lockup(rdev, lockup, ring); |
1273 | } | 1358 | } |
1274 | 1359 | ||
1275 | static int cayman_gpu_soft_reset(struct radeon_device *rdev) | 1360 | static int cayman_gpu_soft_reset(struct radeon_device *rdev) |
@@ -1289,6 +1374,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) | |||
1289 | RREG32(GRBM_STATUS_SE1)); | 1374 | RREG32(GRBM_STATUS_SE1)); |
1290 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | 1375 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", |
1291 | RREG32(SRBM_STATUS)); | 1376 | RREG32(SRBM_STATUS)); |
1377 | dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1378 | RREG32(0x14F8)); | ||
1379 | dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1380 | RREG32(0x14D8)); | ||
1381 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
1382 | RREG32(0x14FC)); | ||
1383 | dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
1384 | RREG32(0x14DC)); | ||
1385 | |||
1292 | evergreen_mc_stop(rdev, &save); | 1386 | evergreen_mc_stop(rdev, &save); |
1293 | if (evergreen_mc_wait_for_idle(rdev)) { | 1387 | if (evergreen_mc_wait_for_idle(rdev)) { |
1294 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | 1388 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); |
@@ -1319,6 +1413,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev) | |||
1319 | (void)RREG32(GRBM_SOFT_RESET); | 1413 | (void)RREG32(GRBM_SOFT_RESET); |
1320 | /* Wait a little for things to settle down */ | 1414 | /* Wait a little for things to settle down */ |
1321 | udelay(50); | 1415 | udelay(50); |
1416 | |||
1322 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | 1417 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", |
1323 | RREG32(GRBM_STATUS)); | 1418 | RREG32(GRBM_STATUS)); |
1324 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | 1419 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", |
@@ -1338,6 +1433,7 @@ int cayman_asic_reset(struct radeon_device *rdev) | |||
1338 | 1433 | ||
1339 | static int cayman_startup(struct radeon_device *rdev) | 1434 | static int cayman_startup(struct radeon_device *rdev) |
1340 | { | 1435 | { |
1436 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
1341 | int r; | 1437 | int r; |
1342 | 1438 | ||
1343 | /* enable pcie gen2 link */ | 1439 | /* enable pcie gen2 link */ |
@@ -1378,6 +1474,24 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1378 | if (r) | 1474 | if (r) |
1379 | return r; | 1475 | return r; |
1380 | 1476 | ||
1477 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1478 | if (r) { | ||
1479 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | ||
1480 | return r; | ||
1481 | } | ||
1482 | |||
1483 | r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX); | ||
1484 | if (r) { | ||
1485 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | ||
1486 | return r; | ||
1487 | } | ||
1488 | |||
1489 | r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX); | ||
1490 | if (r) { | ||
1491 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r); | ||
1492 | return r; | ||
1493 | } | ||
1494 | |||
1381 | /* Enable IRQ */ | 1495 | /* Enable IRQ */ |
1382 | r = r600_irq_init(rdev); | 1496 | r = r600_irq_init(rdev); |
1383 | if (r) { | 1497 | if (r) { |
@@ -1387,7 +1501,9 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1387 | } | 1501 | } |
1388 | evergreen_irq_set(rdev); | 1502 | evergreen_irq_set(rdev); |
1389 | 1503 | ||
1390 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 1504 | r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, |
1505 | CP_RB0_RPTR, CP_RB0_WPTR, | ||
1506 | 0, 0xfffff, RADEON_CP_PACKET2); | ||
1391 | if (r) | 1507 | if (r) |
1392 | return r; | 1508 | return r; |
1393 | r = cayman_cp_load_microcode(rdev); | 1509 | r = cayman_cp_load_microcode(rdev); |
@@ -1397,6 +1513,21 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1397 | if (r) | 1513 | if (r) |
1398 | return r; | 1514 | return r; |
1399 | 1515 | ||
1516 | r = radeon_ib_pool_start(rdev); | ||
1517 | if (r) | ||
1518 | return r; | ||
1519 | |||
1520 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1521 | if (r) { | ||
1522 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1523 | rdev->accel_working = false; | ||
1524 | return r; | ||
1525 | } | ||
1526 | |||
1527 | r = radeon_vm_manager_start(rdev); | ||
1528 | if (r) | ||
1529 | return r; | ||
1530 | |||
1400 | return 0; | 1531 | return 0; |
1401 | } | 1532 | } |
1402 | 1533 | ||
@@ -1411,32 +1542,26 @@ int cayman_resume(struct radeon_device *rdev) | |||
1411 | /* post card */ | 1542 | /* post card */ |
1412 | atom_asic_init(rdev->mode_info.atom_context); | 1543 | atom_asic_init(rdev->mode_info.atom_context); |
1413 | 1544 | ||
1545 | rdev->accel_working = true; | ||
1414 | r = cayman_startup(rdev); | 1546 | r = cayman_startup(rdev); |
1415 | if (r) { | 1547 | if (r) { |
1416 | DRM_ERROR("cayman startup failed on resume\n"); | 1548 | DRM_ERROR("cayman startup failed on resume\n"); |
1417 | return r; | 1549 | return r; |
1418 | } | 1550 | } |
1419 | |||
1420 | r = r600_ib_test(rdev); | ||
1421 | if (r) { | ||
1422 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | ||
1423 | return r; | ||
1424 | } | ||
1425 | |||
1426 | return r; | 1551 | return r; |
1427 | |||
1428 | } | 1552 | } |
1429 | 1553 | ||
1430 | int cayman_suspend(struct radeon_device *rdev) | 1554 | int cayman_suspend(struct radeon_device *rdev) |
1431 | { | 1555 | { |
1432 | /* FIXME: we should wait for ring to be empty */ | 1556 | /* FIXME: we should wait for ring to be empty */ |
1557 | radeon_ib_pool_suspend(rdev); | ||
1558 | radeon_vm_manager_suspend(rdev); | ||
1559 | r600_blit_suspend(rdev); | ||
1433 | cayman_cp_enable(rdev, false); | 1560 | cayman_cp_enable(rdev, false); |
1434 | rdev->cp.ready = false; | 1561 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1435 | evergreen_irq_suspend(rdev); | 1562 | evergreen_irq_suspend(rdev); |
1436 | radeon_wb_disable(rdev); | 1563 | radeon_wb_disable(rdev); |
1437 | cayman_pcie_gart_disable(rdev); | 1564 | cayman_pcie_gart_disable(rdev); |
1438 | r600_blit_suspend(rdev); | ||
1439 | |||
1440 | return 0; | 1565 | return 0; |
1441 | } | 1566 | } |
1442 | 1567 | ||
@@ -1448,6 +1573,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
1448 | */ | 1573 | */ |
1449 | int cayman_init(struct radeon_device *rdev) | 1574 | int cayman_init(struct radeon_device *rdev) |
1450 | { | 1575 | { |
1576 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
1451 | int r; | 1577 | int r; |
1452 | 1578 | ||
1453 | /* This don't do much */ | 1579 | /* This don't do much */ |
@@ -1500,8 +1626,8 @@ int cayman_init(struct radeon_device *rdev) | |||
1500 | if (r) | 1626 | if (r) |
1501 | return r; | 1627 | return r; |
1502 | 1628 | ||
1503 | rdev->cp.ring_obj = NULL; | 1629 | ring->ring_obj = NULL; |
1504 | r600_ring_init(rdev, 1024 * 1024); | 1630 | r600_ring_init(rdev, ring, 1024 * 1024); |
1505 | 1631 | ||
1506 | rdev->ih.ring_obj = NULL; | 1632 | rdev->ih.ring_obj = NULL; |
1507 | r600_ih_ring_init(rdev, 64 * 1024); | 1633 | r600_ih_ring_init(rdev, 64 * 1024); |
@@ -1510,29 +1636,29 @@ int cayman_init(struct radeon_device *rdev) | |||
1510 | if (r) | 1636 | if (r) |
1511 | return r; | 1637 | return r; |
1512 | 1638 | ||
1639 | r = radeon_ib_pool_init(rdev); | ||
1513 | rdev->accel_working = true; | 1640 | rdev->accel_working = true; |
1641 | if (r) { | ||
1642 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
1643 | rdev->accel_working = false; | ||
1644 | } | ||
1645 | r = radeon_vm_manager_init(rdev); | ||
1646 | if (r) { | ||
1647 | dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r); | ||
1648 | } | ||
1649 | |||
1514 | r = cayman_startup(rdev); | 1650 | r = cayman_startup(rdev); |
1515 | if (r) { | 1651 | if (r) { |
1516 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 1652 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1517 | cayman_cp_fini(rdev); | 1653 | cayman_cp_fini(rdev); |
1518 | r600_irq_fini(rdev); | 1654 | r600_irq_fini(rdev); |
1519 | radeon_wb_fini(rdev); | 1655 | radeon_wb_fini(rdev); |
1656 | r100_ib_fini(rdev); | ||
1657 | radeon_vm_manager_fini(rdev); | ||
1520 | radeon_irq_kms_fini(rdev); | 1658 | radeon_irq_kms_fini(rdev); |
1521 | cayman_pcie_gart_fini(rdev); | 1659 | cayman_pcie_gart_fini(rdev); |
1522 | rdev->accel_working = false; | 1660 | rdev->accel_working = false; |
1523 | } | 1661 | } |
1524 | if (rdev->accel_working) { | ||
1525 | r = radeon_ib_pool_init(rdev); | ||
1526 | if (r) { | ||
1527 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | ||
1528 | rdev->accel_working = false; | ||
1529 | } | ||
1530 | r = r600_ib_test(rdev); | ||
1531 | if (r) { | ||
1532 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1533 | rdev->accel_working = false; | ||
1534 | } | ||
1535 | } | ||
1536 | 1662 | ||
1537 | /* Don't start up if the MC ucode is missing. | 1663 | /* Don't start up if the MC ucode is missing. |
1538 | * The default clocks and voltages before the MC ucode | 1664 | * The default clocks and voltages before the MC ucode |
@@ -1552,11 +1678,13 @@ void cayman_fini(struct radeon_device *rdev) | |||
1552 | cayman_cp_fini(rdev); | 1678 | cayman_cp_fini(rdev); |
1553 | r600_irq_fini(rdev); | 1679 | r600_irq_fini(rdev); |
1554 | radeon_wb_fini(rdev); | 1680 | radeon_wb_fini(rdev); |
1555 | radeon_ib_pool_fini(rdev); | 1681 | radeon_vm_manager_fini(rdev); |
1682 | r100_ib_fini(rdev); | ||
1556 | radeon_irq_kms_fini(rdev); | 1683 | radeon_irq_kms_fini(rdev); |
1557 | cayman_pcie_gart_fini(rdev); | 1684 | cayman_pcie_gart_fini(rdev); |
1558 | r600_vram_scratch_fini(rdev); | 1685 | r600_vram_scratch_fini(rdev); |
1559 | radeon_gem_fini(rdev); | 1686 | radeon_gem_fini(rdev); |
1687 | radeon_semaphore_driver_fini(rdev); | ||
1560 | radeon_fence_driver_fini(rdev); | 1688 | radeon_fence_driver_fini(rdev); |
1561 | radeon_bo_fini(rdev); | 1689 | radeon_bo_fini(rdev); |
1562 | radeon_atombios_fini(rdev); | 1690 | radeon_atombios_fini(rdev); |
@@ -1564,3 +1692,84 @@ void cayman_fini(struct radeon_device *rdev) | |||
1564 | rdev->bios = NULL; | 1692 | rdev->bios = NULL; |
1565 | } | 1693 | } |
1566 | 1694 | ||
1695 | /* | ||
1696 | * vm | ||
1697 | */ | ||
1698 | int cayman_vm_init(struct radeon_device *rdev) | ||
1699 | { | ||
1700 | /* number of VMs */ | ||
1701 | rdev->vm_manager.nvm = 8; | ||
1702 | /* base offset of vram pages */ | ||
1703 | rdev->vm_manager.vram_base_offset = 0; | ||
1704 | return 0; | ||
1705 | } | ||
1706 | |||
1707 | void cayman_vm_fini(struct radeon_device *rdev) | ||
1708 | { | ||
1709 | } | ||
1710 | |||
1711 | int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) | ||
1712 | { | ||
1713 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0); | ||
1714 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn); | ||
1715 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); | ||
1716 | /* flush hdp cache */ | ||
1717 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
1718 | /* bits 0-7 are the VM contexts0-7 */ | ||
1719 | WREG32(VM_INVALIDATE_REQUEST, 1 << id); | ||
1720 | return 0; | ||
1721 | } | ||
1722 | |||
1723 | void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) | ||
1724 | { | ||
1725 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0); | ||
1726 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0); | ||
1727 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); | ||
1728 | /* flush hdp cache */ | ||
1729 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
1730 | /* bits 0-7 are the VM contexts0-7 */ | ||
1731 | WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); | ||
1732 | } | ||
1733 | |||
1734 | void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) | ||
1735 | { | ||
1736 | if (vm->id == -1) | ||
1737 | return; | ||
1738 | |||
1739 | /* flush hdp cache */ | ||
1740 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
1741 | /* bits 0-7 are the VM contexts0-7 */ | ||
1742 | WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); | ||
1743 | } | ||
1744 | |||
1745 | #define R600_PTE_VALID (1 << 0) | ||
1746 | #define R600_PTE_SYSTEM (1 << 1) | ||
1747 | #define R600_PTE_SNOOPED (1 << 2) | ||
1748 | #define R600_PTE_READABLE (1 << 5) | ||
1749 | #define R600_PTE_WRITEABLE (1 << 6) | ||
1750 | |||
1751 | uint32_t cayman_vm_page_flags(struct radeon_device *rdev, | ||
1752 | struct radeon_vm *vm, | ||
1753 | uint32_t flags) | ||
1754 | { | ||
1755 | uint32_t r600_flags = 0; | ||
1756 | |||
1757 | r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; | ||
1758 | r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; | ||
1759 | r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; | ||
1760 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
1761 | r600_flags |= R600_PTE_SYSTEM; | ||
1762 | r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; | ||
1763 | } | ||
1764 | return r600_flags; | ||
1765 | } | ||
1766 | |||
1767 | void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm, | ||
1768 | unsigned pfn, uint64_t addr, uint32_t flags) | ||
1769 | { | ||
1770 | void __iomem *ptr = (void *)vm->pt; | ||
1771 | |||
1772 | addr = addr & 0xFFFFFFFFFFFFF000ULL; | ||
1773 | addr |= flags; | ||
1774 | writeq(addr, ptr + (pfn * 8)); | ||
1775 | } | ||