aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
commit3ef884b4c04e857c283cc77ca70ad8f638d94b0e (patch)
treec8c5b872e836e6ffe8bd08ab3477f9e8260575ed /drivers/gpu/drm/radeon/r600.c
parent4e5df8069b0e4e36c6b528b3be7da298e6f454cd (diff)
parent4361e52ad0372e6fd2240a2207b49a4de1f45ca9 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits) drm/radeon/kms: fix warning about cur_placement being uninitialised. drm/ttm: Print debug information on memory manager when eviction fails drm: Add memory manager debug function drm/radeon/kms: restore surface registers on resume. drm/radeon/kms/r600/r700: fallback gracefully on ucode failure drm/ttm: Initialize eviction placement in case the driver callback doesn't drm/radeon/kms: cleanup structure and module if initialization fails drm/radeon/kms: actualy set the eviction placements we choose drm/radeon/kms: Fix NULL ptr dereference drm/radeon/kms/avivo: add support for new pll selection algo drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup drm/radeon/kms: fix return value from fence function. drm/radeon: Remove tests for -ERESTART from the TTM code. drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART. drm/radeon/kms: Convert radeon to new TTM validation API (V2) drm/ttm: Rework validation & memory space allocation (V3) drm: Add search/get functions to get a block in a specific range drm/radeon/kms: fix avivo tiling regression since radeon object rework drm/i915: Remove a debugging printk from hangcheck drm/radeon/kms: make sure i2c id matches ...
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c1147
1 files changed, 1084 insertions, 63 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed24358f..36656bd110bf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
41#define RLC_UCODE_SIZE 768
41#define R700_PFP_UCODE_SIZE 848 42#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360 43#define R700_PM4_UCODE_SIZE 1360
44#define R700_RLC_UCODE_SIZE 1024
43 45
44/* Firmware Names */ 46/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin"); 47MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin"); 64MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/R600_rlc.bin");
68MODULE_FIRMWARE("radeon/R700_rlc.bin");
65 69
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 70int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 71
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev); 74void r600_gpu_init(struct radeon_device *rdev);
71void r600_fini(struct radeon_device *rdev); 75void r600_fini(struct radeon_device *rdev);
72 76
77/* hpd for digital panel detect/disconnect */
78bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
79{
80 bool connected = false;
81
82 if (ASIC_IS_DCE3(rdev)) {
83 switch (hpd) {
84 case RADEON_HPD_1:
85 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
86 connected = true;
87 break;
88 case RADEON_HPD_2:
89 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
90 connected = true;
91 break;
92 case RADEON_HPD_3:
93 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
94 connected = true;
95 break;
96 case RADEON_HPD_4:
97 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
98 connected = true;
99 break;
100 /* DCE 3.2 */
101 case RADEON_HPD_5:
102 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
103 connected = true;
104 break;
105 case RADEON_HPD_6:
106 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
107 connected = true;
108 break;
109 default:
110 break;
111 }
112 } else {
113 switch (hpd) {
114 case RADEON_HPD_1:
115 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
116 connected = true;
117 break;
118 case RADEON_HPD_2:
119 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
120 connected = true;
121 break;
122 case RADEON_HPD_3:
123 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
124 connected = true;
125 break;
126 default:
127 break;
128 }
129 }
130 return connected;
131}
132
133void r600_hpd_set_polarity(struct radeon_device *rdev,
134 enum radeon_hpd_id hpd)
135{
136 u32 tmp;
137 bool connected = r600_hpd_sense(rdev, hpd);
138
139 if (ASIC_IS_DCE3(rdev)) {
140 switch (hpd) {
141 case RADEON_HPD_1:
142 tmp = RREG32(DC_HPD1_INT_CONTROL);
143 if (connected)
144 tmp &= ~DC_HPDx_INT_POLARITY;
145 else
146 tmp |= DC_HPDx_INT_POLARITY;
147 WREG32(DC_HPD1_INT_CONTROL, tmp);
148 break;
149 case RADEON_HPD_2:
150 tmp = RREG32(DC_HPD2_INT_CONTROL);
151 if (connected)
152 tmp &= ~DC_HPDx_INT_POLARITY;
153 else
154 tmp |= DC_HPDx_INT_POLARITY;
155 WREG32(DC_HPD2_INT_CONTROL, tmp);
156 break;
157 case RADEON_HPD_3:
158 tmp = RREG32(DC_HPD3_INT_CONTROL);
159 if (connected)
160 tmp &= ~DC_HPDx_INT_POLARITY;
161 else
162 tmp |= DC_HPDx_INT_POLARITY;
163 WREG32(DC_HPD3_INT_CONTROL, tmp);
164 break;
165 case RADEON_HPD_4:
166 tmp = RREG32(DC_HPD4_INT_CONTROL);
167 if (connected)
168 tmp &= ~DC_HPDx_INT_POLARITY;
169 else
170 tmp |= DC_HPDx_INT_POLARITY;
171 WREG32(DC_HPD4_INT_CONTROL, tmp);
172 break;
173 case RADEON_HPD_5:
174 tmp = RREG32(DC_HPD5_INT_CONTROL);
175 if (connected)
176 tmp &= ~DC_HPDx_INT_POLARITY;
177 else
178 tmp |= DC_HPDx_INT_POLARITY;
179 WREG32(DC_HPD5_INT_CONTROL, tmp);
180 break;
181 /* DCE 3.2 */
182 case RADEON_HPD_6:
183 tmp = RREG32(DC_HPD6_INT_CONTROL);
184 if (connected)
185 tmp &= ~DC_HPDx_INT_POLARITY;
186 else
187 tmp |= DC_HPDx_INT_POLARITY;
188 WREG32(DC_HPD6_INT_CONTROL, tmp);
189 break;
190 default:
191 break;
192 }
193 } else {
194 switch (hpd) {
195 case RADEON_HPD_1:
196 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
197 if (connected)
198 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
199 else
200 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
201 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
202 break;
203 case RADEON_HPD_2:
204 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
205 if (connected)
206 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
207 else
208 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
209 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
210 break;
211 case RADEON_HPD_3:
212 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
213 if (connected)
214 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
215 else
216 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
217 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
218 break;
219 default:
220 break;
221 }
222 }
223}
224
225void r600_hpd_init(struct radeon_device *rdev)
226{
227 struct drm_device *dev = rdev->ddev;
228 struct drm_connector *connector;
229
230 if (ASIC_IS_DCE3(rdev)) {
231 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
232 if (ASIC_IS_DCE32(rdev))
233 tmp |= DC_HPDx_EN;
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
236 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
237 switch (radeon_connector->hpd.hpd) {
238 case RADEON_HPD_1:
239 WREG32(DC_HPD1_CONTROL, tmp);
240 rdev->irq.hpd[0] = true;
241 break;
242 case RADEON_HPD_2:
243 WREG32(DC_HPD2_CONTROL, tmp);
244 rdev->irq.hpd[1] = true;
245 break;
246 case RADEON_HPD_3:
247 WREG32(DC_HPD3_CONTROL, tmp);
248 rdev->irq.hpd[2] = true;
249 break;
250 case RADEON_HPD_4:
251 WREG32(DC_HPD4_CONTROL, tmp);
252 rdev->irq.hpd[3] = true;
253 break;
254 /* DCE 3.2 */
255 case RADEON_HPD_5:
256 WREG32(DC_HPD5_CONTROL, tmp);
257 rdev->irq.hpd[4] = true;
258 break;
259 case RADEON_HPD_6:
260 WREG32(DC_HPD6_CONTROL, tmp);
261 rdev->irq.hpd[5] = true;
262 break;
263 default:
264 break;
265 }
266 }
267 } else {
268 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
269 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
270 switch (radeon_connector->hpd.hpd) {
271 case RADEON_HPD_1:
272 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
273 rdev->irq.hpd[0] = true;
274 break;
275 case RADEON_HPD_2:
276 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
277 rdev->irq.hpd[1] = true;
278 break;
279 case RADEON_HPD_3:
280 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
281 rdev->irq.hpd[2] = true;
282 break;
283 default:
284 break;
285 }
286 }
287 }
288 r600_irq_set(rdev);
289}
290
291void r600_hpd_fini(struct radeon_device *rdev)
292{
293 struct drm_device *dev = rdev->ddev;
294 struct drm_connector *connector;
295
296 if (ASIC_IS_DCE3(rdev)) {
297 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299 switch (radeon_connector->hpd.hpd) {
300 case RADEON_HPD_1:
301 WREG32(DC_HPD1_CONTROL, 0);
302 rdev->irq.hpd[0] = false;
303 break;
304 case RADEON_HPD_2:
305 WREG32(DC_HPD2_CONTROL, 0);
306 rdev->irq.hpd[1] = false;
307 break;
308 case RADEON_HPD_3:
309 WREG32(DC_HPD3_CONTROL, 0);
310 rdev->irq.hpd[2] = false;
311 break;
312 case RADEON_HPD_4:
313 WREG32(DC_HPD4_CONTROL, 0);
314 rdev->irq.hpd[3] = false;
315 break;
316 /* DCE 3.2 */
317 case RADEON_HPD_5:
318 WREG32(DC_HPD5_CONTROL, 0);
319 rdev->irq.hpd[4] = false;
320 break;
321 case RADEON_HPD_6:
322 WREG32(DC_HPD6_CONTROL, 0);
323 rdev->irq.hpd[5] = false;
324 break;
325 default:
326 break;
327 }
328 }
329 } else {
330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332 switch (radeon_connector->hpd.hpd) {
333 case RADEON_HPD_1:
334 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
335 rdev->irq.hpd[0] = false;
336 break;
337 case RADEON_HPD_2:
338 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
339 rdev->irq.hpd[1] = false;
340 break;
341 case RADEON_HPD_3:
342 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
343 rdev->irq.hpd[2] = false;
344 break;
345 default:
346 break;
347 }
348 }
349 }
350}
351
73/* 352/*
74 * R600 PCIE GART 353 * R600 PCIE GART
75 */ 354 */
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
180void r600_pcie_gart_disable(struct radeon_device *rdev) 459void r600_pcie_gart_disable(struct radeon_device *rdev)
181{ 460{
182 u32 tmp; 461 u32 tmp;
183 int i; 462 int i, r;
184 463
185 /* Disable all tables */ 464 /* Disable all tables */
186 for (i = 0; i < 7; i++) 465 for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 487 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 488 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
210 if (rdev->gart.table.vram.robj) { 489 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj); 490 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
212 radeon_object_unpin(rdev->gart.table.vram.robj); 491 if (likely(r == 0)) {
492 radeon_bo_kunmap(rdev->gart.table.vram.robj);
493 radeon_bo_unpin(rdev->gart.table.vram.robj);
494 radeon_bo_unreserve(rdev->gart.table.vram.robj);
495 }
213 } 496 }
214} 497}
215 498
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1101 (void)RREG32(PCIE_PORT_DATA); 1384 (void)RREG32(PCIE_PORT_DATA);
1102} 1385}
1103 1386
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1104 1391
1105/* 1392/*
1106 * CP & Ring 1393 * CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
1110 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1397 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1111} 1398}
1112 1399
1113int r600_cp_init_microcode(struct radeon_device *rdev) 1400int r600_init_microcode(struct radeon_device *rdev)
1114{ 1401{
1115 struct platform_device *pdev; 1402 struct platform_device *pdev;
1116 const char *chip_name; 1403 const char *chip_name;
1117 size_t pfp_req_size, me_req_size; 1404 const char *rlc_chip_name;
1405 size_t pfp_req_size, me_req_size, rlc_req_size;
1118 char fw_name[30]; 1406 char fw_name[30];
1119 int err; 1407 int err;
1120 1408
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1128 } 1416 }
1129 1417
1130 switch (rdev->family) { 1418 switch (rdev->family) {
1131 case CHIP_R600: chip_name = "R600"; break; 1419 case CHIP_R600:
1132 case CHIP_RV610: chip_name = "RV610"; break; 1420 chip_name = "R600";
1133 case CHIP_RV630: chip_name = "RV630"; break; 1421 rlc_chip_name = "R600";
1134 case CHIP_RV620: chip_name = "RV620"; break; 1422 break;
1135 case CHIP_RV635: chip_name = "RV635"; break; 1423 case CHIP_RV610:
1136 case CHIP_RV670: chip_name = "RV670"; break; 1424 chip_name = "RV610";
1425 rlc_chip_name = "R600";
1426 break;
1427 case CHIP_RV630:
1428 chip_name = "RV630";
1429 rlc_chip_name = "R600";
1430 break;
1431 case CHIP_RV620:
1432 chip_name = "RV620";
1433 rlc_chip_name = "R600";
1434 break;
1435 case CHIP_RV635:
1436 chip_name = "RV635";
1437 rlc_chip_name = "R600";
1438 break;
1439 case CHIP_RV670:
1440 chip_name = "RV670";
1441 rlc_chip_name = "R600";
1442 break;
1137 case CHIP_RS780: 1443 case CHIP_RS780:
1138 case CHIP_RS880: chip_name = "RS780"; break; 1444 case CHIP_RS880:
1139 case CHIP_RV770: chip_name = "RV770"; break; 1445 chip_name = "RS780";
1446 rlc_chip_name = "R600";
1447 break;
1448 case CHIP_RV770:
1449 chip_name = "RV770";
1450 rlc_chip_name = "R700";
1451 break;
1140 case CHIP_RV730: 1452 case CHIP_RV730:
1141 case CHIP_RV740: chip_name = "RV730"; break; 1453 case CHIP_RV740:
1142 case CHIP_RV710: chip_name = "RV710"; break; 1454 chip_name = "RV730";
1455 rlc_chip_name = "R700";
1456 break;
1457 case CHIP_RV710:
1458 chip_name = "RV710";
1459 rlc_chip_name = "R700";
1460 break;
1143 default: BUG(); 1461 default: BUG();
1144 } 1462 }
1145 1463
1146 if (rdev->family >= CHIP_RV770) { 1464 if (rdev->family >= CHIP_RV770) {
1147 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1465 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1148 me_req_size = R700_PM4_UCODE_SIZE * 4; 1466 me_req_size = R700_PM4_UCODE_SIZE * 4;
1467 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1149 } else { 1468 } else {
1150 pfp_req_size = PFP_UCODE_SIZE * 4; 1469 pfp_req_size = PFP_UCODE_SIZE * 4;
1151 me_req_size = PM4_UCODE_SIZE * 12; 1470 me_req_size = PM4_UCODE_SIZE * 12;
1471 rlc_req_size = RLC_UCODE_SIZE * 4;
1152 } 1472 }
1153 1473
1154 DRM_INFO("Loading %s CP Microcode\n", chip_name); 1474 DRM_INFO("Loading %s Microcode\n", chip_name);
1155 1475
1156 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1476 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1157 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1477 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1175 rdev->me_fw->size, fw_name); 1495 rdev->me_fw->size, fw_name);
1176 err = -EINVAL; 1496 err = -EINVAL;
1177 } 1497 }
1498
1499 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1500 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1501 if (err)
1502 goto out;
1503 if (rdev->rlc_fw->size != rlc_req_size) {
1504 printk(KERN_ERR
1505 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1506 rdev->rlc_fw->size, fw_name);
1507 err = -EINVAL;
1508 }
1509
1178out: 1510out:
1179 platform_device_unregister(pdev); 1511 platform_device_unregister(pdev);
1180 1512
@@ -1187,6 +1519,8 @@ out:
1187 rdev->pfp_fw = NULL; 1519 rdev->pfp_fw = NULL;
1188 release_firmware(rdev->me_fw); 1520 release_firmware(rdev->me_fw);
1189 rdev->me_fw = NULL; 1521 rdev->me_fw = NULL;
1522 release_firmware(rdev->rlc_fw);
1523 rdev->rlc_fw = NULL;
1190 } 1524 }
1191 return err; 1525 return err;
1192} 1526}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
1381 1715
1382void r600_wb_disable(struct radeon_device *rdev) 1716void r600_wb_disable(struct radeon_device *rdev)
1383{ 1717{
1718 int r;
1719
1384 WREG32(SCRATCH_UMSK, 0); 1720 WREG32(SCRATCH_UMSK, 0);
1385 if (rdev->wb.wb_obj) { 1721 if (rdev->wb.wb_obj) {
1386 radeon_object_kunmap(rdev->wb.wb_obj); 1722 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1387 radeon_object_unpin(rdev->wb.wb_obj); 1723 if (unlikely(r != 0))
1724 return;
1725 radeon_bo_kunmap(rdev->wb.wb_obj);
1726 radeon_bo_unpin(rdev->wb.wb_obj);
1727 radeon_bo_unreserve(rdev->wb.wb_obj);
1388 } 1728 }
1389} 1729}
1390 1730
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
1392{ 1732{
1393 r600_wb_disable(rdev); 1733 r600_wb_disable(rdev);
1394 if (rdev->wb.wb_obj) { 1734 if (rdev->wb.wb_obj) {
1395 radeon_object_unref(&rdev->wb.wb_obj); 1735 radeon_bo_unref(&rdev->wb.wb_obj);
1396 rdev->wb.wb = NULL; 1736 rdev->wb.wb = NULL;
1397 rdev->wb.wb_obj = NULL; 1737 rdev->wb.wb_obj = NULL;
1398 } 1738 }
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
1403 int r; 1743 int r;
1404 1744
1405 if (rdev->wb.wb_obj == NULL) { 1745 if (rdev->wb.wb_obj == NULL) {
1406 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 1746 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1407 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); 1747 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1408 if (r) { 1748 if (r) {
1409 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); 1749 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1410 return r; 1750 return r;
1411 } 1751 }
1412 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 1752 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1753 if (unlikely(r != 0)) {
1754 r600_wb_fini(rdev);
1755 return r;
1756 }
1757 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1413 &rdev->wb.gpu_addr); 1758 &rdev->wb.gpu_addr);
1414 if (r) { 1759 if (r) {
1415 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); 1760 radeon_bo_unreserve(rdev->wb.wb_obj);
1761 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1416 r600_wb_fini(rdev); 1762 r600_wb_fini(rdev);
1417 return r; 1763 return r;
1418 } 1764 }
1419 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1765 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1766 radeon_bo_unreserve(rdev->wb.wb_obj);
1420 if (r) { 1767 if (r) {
1421 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); 1768 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1422 r600_wb_fini(rdev); 1769 r600_wb_fini(rdev);
1423 return r; 1770 return r;
1424 } 1771 }
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
1433void r600_fence_ring_emit(struct radeon_device *rdev, 1780void r600_fence_ring_emit(struct radeon_device *rdev,
1434 struct radeon_fence *fence) 1781 struct radeon_fence *fence)
1435{ 1782{
1783 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1436 /* Emit fence sequence & fire IRQ */ 1784 /* Emit fence sequence & fire IRQ */
1437 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1438 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1439 radeon_ring_write(rdev, fence->seq); 1787 radeon_ring_write(rdev, fence->seq);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT);
1440} 1791}
1441 1792
1442int r600_copy_dma(struct radeon_device *rdev, 1793int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
1459 return 0; 1810 return 0;
1460} 1811}
1461 1812
1462int r600_irq_process(struct radeon_device *rdev)
1463{
1464 /* FIXME: implement */
1465 return 0;
1466}
1467
1468int r600_irq_set(struct radeon_device *rdev)
1469{
1470 /* FIXME: implement */
1471 return 0;
1472}
1473
1474int r600_set_surface_reg(struct radeon_device *rdev, int reg, 1813int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1475 uint32_t tiling_flags, uint32_t pitch, 1814 uint32_t tiling_flags, uint32_t pitch,
1476 uint32_t offset, uint32_t obj_size) 1815 uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
1506{ 1845{
1507 int r; 1846 int r;
1508 1847
1848 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1849 r = r600_init_microcode(rdev);
1850 if (r) {
1851 DRM_ERROR("Failed to load firmware!\n");
1852 return r;
1853 }
1854 }
1855
1509 r600_mc_program(rdev); 1856 r600_mc_program(rdev);
1510 if (rdev->flags & RADEON_IS_AGP) { 1857 if (rdev->flags & RADEON_IS_AGP) {
1511 r600_agp_enable(rdev); 1858 r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@ int r600_startup(struct radeon_device *rdev)
1516 } 1863 }
1517 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1518 1865
1519 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1520 &rdev->r600_blit.shader_gpu_addr); 1867 if (unlikely(r != 0))
1868 return r;
1869 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1870 &rdev->r600_blit.shader_gpu_addr);
1871 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1521 if (r) { 1872 if (r) {
1522 DRM_ERROR("failed to pin blit object %d\n", r); 1873 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1523 return r; 1874 return r;
1524 } 1875 }
1525 1876
1877 /* Enable IRQ */
1878 r = r600_irq_init(rdev);
1879 if (r) {
1880 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1881 radeon_irq_kms_fini(rdev);
1882 return r;
1883 }
1884 r600_irq_set(rdev);
1885
1526 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1886 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1527 if (r) 1887 if (r)
1528 return r; 1888 return r;
@@ -1583,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
1583 1943
1584int r600_suspend(struct radeon_device *rdev) 1944int r600_suspend(struct radeon_device *rdev)
1585{ 1945{
1946 int r;
1947
1586 /* FIXME: we should wait for ring to be empty */ 1948 /* FIXME: we should wait for ring to be empty */
1587 r600_cp_stop(rdev); 1949 r600_cp_stop(rdev);
1588 rdev->cp.ready = false; 1950 rdev->cp.ready = false;
1589 r600_wb_disable(rdev); 1951 r600_wb_disable(rdev);
1590 r600_pcie_gart_disable(rdev); 1952 r600_pcie_gart_disable(rdev);
1591 /* unpin shaders bo */ 1953 /* unpin shaders bo */
1592 radeon_object_unpin(rdev->r600_blit.shader_obj); 1954 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1955 if (unlikely(r != 0))
1956 return r;
1957 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1958 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1593 return 0; 1959 return 0;
1594} 1960}
1595 1961
@@ -1627,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
1627 if (r) 1993 if (r)
1628 return r; 1994 return r;
1629 /* Post card if necessary */ 1995 /* Post card if necessary */
1630 if (!r600_card_posted(rdev) && rdev->bios) { 1996 if (!r600_card_posted(rdev)) {
1997 if (!rdev->bios) {
1998 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1999 return -EINVAL;
2000 }
1631 DRM_INFO("GPU not posted. posting now...\n"); 2001 DRM_INFO("GPU not posted. posting now...\n");
1632 atom_asic_init(rdev->mode_info.atom_context); 2002 atom_asic_init(rdev->mode_info.atom_context);
1633 } 2003 }
@@ -1650,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
1650 if (r) 2020 if (r)
1651 return r; 2021 return r;
1652 /* Memory manager */ 2022 /* Memory manager */
1653 r = radeon_object_init(rdev); 2023 r = radeon_bo_init(rdev);
1654 if (r) 2024 if (r)
1655 return r; 2025 return r;
2026
2027 r = radeon_irq_kms_init(rdev);
2028 if (r)
2029 return r;
2030
1656 rdev->cp.ring_obj = NULL; 2031 rdev->cp.ring_obj = NULL;
1657 r600_ring_init(rdev, 1024 * 1024); 2032 r600_ring_init(rdev, 1024 * 1024);
1658 2033
1659 if (!rdev->me_fw || !rdev->pfp_fw) { 2034 rdev->ih.ring_obj = NULL;
1660 r = r600_cp_init_microcode(rdev); 2035 r600_ih_ring_init(rdev, 64 * 1024);
1661 if (r) {
1662 DRM_ERROR("Failed to load firmware!\n");
1663 return r;
1664 }
1665 }
1666 2036
1667 r = r600_pcie_gart_init(rdev); 2037 r = r600_pcie_gart_init(rdev);
1668 if (r) 2038 if (r)
1669 return r; 2039 return r;
1670 2040
1671 rdev->accel_working = true;
1672 r = r600_blit_init(rdev); 2041 r = r600_blit_init(rdev);
1673 if (r) { 2042 if (r) {
1674 DRM_ERROR("radeon: failled blitter (%d).\n", r); 2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1675 return r; 2044 return r;
1676 } 2045 }
1677 2046
2047 rdev->accel_working = true;
1678 r = r600_startup(rdev); 2048 r = r600_startup(rdev);
1679 if (r) { 2049 if (r) {
1680 r600_suspend(rdev); 2050 r600_suspend(rdev);
@@ -1686,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
1686 if (rdev->accel_working) { 2056 if (rdev->accel_working) {
1687 r = radeon_ib_pool_init(rdev); 2057 r = radeon_ib_pool_init(rdev);
1688 if (r) { 2058 if (r) {
1689 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 2059 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1690 rdev->accel_working = false; 2060 rdev->accel_working = false;
1691 } 2061 }
1692 r = r600_ib_test(rdev); 2062 r = r600_ib_test(rdev);
1693 if (r) { 2063 if (r) {
1694 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2064 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1695 rdev->accel_working = false; 2065 rdev->accel_working = false;
1696 } 2066 }
1697 } 2067 }
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
1704 r600_suspend(rdev); 2074 r600_suspend(rdev);
1705 2075
1706 r600_blit_fini(rdev); 2076 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev);
1707 radeon_ring_fini(rdev); 2079 radeon_ring_fini(rdev);
1708 r600_wb_fini(rdev); 2080 r600_wb_fini(rdev);
1709 r600_pcie_gart_fini(rdev); 2081 r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
1712 radeon_clocks_fini(rdev); 2084 radeon_clocks_fini(rdev);
1713 if (rdev->flags & RADEON_IS_AGP) 2085 if (rdev->flags & RADEON_IS_AGP)
1714 radeon_agp_fini(rdev); 2086 radeon_agp_fini(rdev);
1715 radeon_object_fini(rdev); 2087 radeon_bo_fini(rdev);
1716 radeon_atombios_fini(rdev); 2088 radeon_atombios_fini(rdev);
1717 kfree(rdev->bios); 2089 kfree(rdev->bios);
1718 rdev->bios = NULL; 2090 rdev->bios = NULL;
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
1798 return r; 2170 return r;
1799} 2171}
1800 2172
2173/*
2174 * Interrupts
2175 *
2176 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2177 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2178 * writing to the ring and the GPU consuming, the GPU writes to the ring
2179 * and host consumes. As the host irq handler processes interrupts, it
2180 * increments the rptr. When the rptr catches up with the wptr, all the
2181 * current interrupts have been processed.
2182 */
2183
2184void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2185{
2186 u32 rb_bufsz;
2187
2188 /* Align ring size */
2189 rb_bufsz = drm_order(ring_size / 4);
2190 ring_size = (1 << rb_bufsz) * 4;
2191 rdev->ih.ring_size = ring_size;
2192 rdev->ih.align_mask = 4 - 1;
2193}
2194
2195static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2196{
2197 int r;
2198
2199 rdev->ih.ring_size = ring_size;
2200 /* Allocate ring buffer */
2201 if (rdev->ih.ring_obj == NULL) {
2202 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2203 true,
2204 RADEON_GEM_DOMAIN_GTT,
2205 &rdev->ih.ring_obj);
2206 if (r) {
2207 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2208 return r;
2209 }
2210 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2211 if (unlikely(r != 0))
2212 return r;
2213 r = radeon_bo_pin(rdev->ih.ring_obj,
2214 RADEON_GEM_DOMAIN_GTT,
2215 &rdev->ih.gpu_addr);
2216 if (r) {
2217 radeon_bo_unreserve(rdev->ih.ring_obj);
2218 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2219 return r;
2220 }
2221 r = radeon_bo_kmap(rdev->ih.ring_obj,
2222 (void **)&rdev->ih.ring);
2223 radeon_bo_unreserve(rdev->ih.ring_obj);
2224 if (r) {
2225 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2226 return r;
2227 }
2228 }
2229 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2230 rdev->ih.rptr = 0;
2231
2232 return 0;
2233}
2234
2235static void r600_ih_ring_fini(struct radeon_device *rdev)
2236{
2237 int r;
2238 if (rdev->ih.ring_obj) {
2239 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2240 if (likely(r == 0)) {
2241 radeon_bo_kunmap(rdev->ih.ring_obj);
2242 radeon_bo_unpin(rdev->ih.ring_obj);
2243 radeon_bo_unreserve(rdev->ih.ring_obj);
2244 }
2245 radeon_bo_unref(&rdev->ih.ring_obj);
2246 rdev->ih.ring = NULL;
2247 rdev->ih.ring_obj = NULL;
2248 }
2249}
2250
2251static void r600_rlc_stop(struct radeon_device *rdev)
2252{
2253
2254 if (rdev->family >= CHIP_RV770) {
2255 /* r7xx asics need to soft reset RLC before halting */
2256 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2257 RREG32(SRBM_SOFT_RESET);
2258 udelay(15000);
2259 WREG32(SRBM_SOFT_RESET, 0);
2260 RREG32(SRBM_SOFT_RESET);
2261 }
2262
2263 WREG32(RLC_CNTL, 0);
2264}
2265
2266static void r600_rlc_start(struct radeon_device *rdev)
2267{
2268 WREG32(RLC_CNTL, RLC_ENABLE);
2269}
2270
2271static int r600_rlc_init(struct radeon_device *rdev)
2272{
2273 u32 i;
2274 const __be32 *fw_data;
2275
2276 if (!rdev->rlc_fw)
2277 return -EINVAL;
2278
2279 r600_rlc_stop(rdev);
2280
2281 WREG32(RLC_HB_BASE, 0);
2282 WREG32(RLC_HB_CNTL, 0);
2283 WREG32(RLC_HB_RPTR, 0);
2284 WREG32(RLC_HB_WPTR, 0);
2285 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2286 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2287 WREG32(RLC_MC_CNTL, 0);
2288 WREG32(RLC_UCODE_CNTL, 0);
2289
2290 fw_data = (const __be32 *)rdev->rlc_fw->data;
2291 if (rdev->family >= CHIP_RV770) {
2292 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2293 WREG32(RLC_UCODE_ADDR, i);
2294 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2295 }
2296 } else {
2297 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2298 WREG32(RLC_UCODE_ADDR, i);
2299 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2300 }
2301 }
2302 WREG32(RLC_UCODE_ADDR, 0);
2303
2304 r600_rlc_start(rdev);
2305
2306 return 0;
2307}
2308
2309static void r600_enable_interrupts(struct radeon_device *rdev)
2310{
2311 u32 ih_cntl = RREG32(IH_CNTL);
2312 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2313
2314 ih_cntl |= ENABLE_INTR;
2315 ih_rb_cntl |= IH_RB_ENABLE;
2316 WREG32(IH_CNTL, ih_cntl);
2317 WREG32(IH_RB_CNTL, ih_rb_cntl);
2318 rdev->ih.enabled = true;
2319}
2320
2321static void r600_disable_interrupts(struct radeon_device *rdev)
2322{
2323 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2324 u32 ih_cntl = RREG32(IH_CNTL);
2325
2326 ih_rb_cntl &= ~IH_RB_ENABLE;
2327 ih_cntl &= ~ENABLE_INTR;
2328 WREG32(IH_RB_CNTL, ih_rb_cntl);
2329 WREG32(IH_CNTL, ih_cntl);
2330 /* set rptr, wptr to 0 */
2331 WREG32(IH_RB_RPTR, 0);
2332 WREG32(IH_RB_WPTR, 0);
2333 rdev->ih.enabled = false;
2334 rdev->ih.wptr = 0;
2335 rdev->ih.rptr = 0;
2336}
2337
2338static void r600_disable_interrupt_state(struct radeon_device *rdev)
2339{
2340 u32 tmp;
2341
2342 WREG32(CP_INT_CNTL, 0);
2343 WREG32(GRBM_INT_CNTL, 0);
2344 WREG32(DxMODE_INT_MASK, 0);
2345 if (ASIC_IS_DCE3(rdev)) {
2346 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2347 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2348 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2349 WREG32(DC_HPD1_INT_CONTROL, tmp);
2350 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2351 WREG32(DC_HPD2_INT_CONTROL, tmp);
2352 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2353 WREG32(DC_HPD3_INT_CONTROL, tmp);
2354 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2355 WREG32(DC_HPD4_INT_CONTROL, tmp);
2356 if (ASIC_IS_DCE32(rdev)) {
2357 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2358 WREG32(DC_HPD5_INT_CONTROL, 0);
2359 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2360 WREG32(DC_HPD6_INT_CONTROL, 0);
2361 }
2362 } else {
2363 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2364 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2365 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2366 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
2367 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2368 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
2369 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2370 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
2371 }
2372}
2373
2374int r600_irq_init(struct radeon_device *rdev)
2375{
2376 int ret = 0;
2377 int rb_bufsz;
2378 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2379
2380 /* allocate ring */
2381 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
2382 if (ret)
2383 return ret;
2384
2385 /* disable irqs */
2386 r600_disable_interrupts(rdev);
2387
2388 /* init rlc */
2389 ret = r600_rlc_init(rdev);
2390 if (ret) {
2391 r600_ih_ring_fini(rdev);
2392 return ret;
2393 }
2394
2395 /* setup interrupt control */
2396 /* set dummy read address to ring address */
2397 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2398 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2399 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2400 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2401 */
2402 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2403 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2404 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2405 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2406
2407 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2408 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2409
2410 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2411 IH_WPTR_OVERFLOW_CLEAR |
2412 (rb_bufsz << 1));
2413 /* WPTR writeback, not yet */
2414 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2415 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2416 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2417
2418 WREG32(IH_RB_CNTL, ih_rb_cntl);
2419
2420 /* set rptr, wptr to 0 */
2421 WREG32(IH_RB_RPTR, 0);
2422 WREG32(IH_RB_WPTR, 0);
2423
2424 /* Default settings for IH_CNTL (disabled at first) */
2425 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2426 /* RPTR_REARM only works if msi's are enabled */
2427 if (rdev->msi_enabled)
2428 ih_cntl |= RPTR_REARM;
2429
2430#ifdef __BIG_ENDIAN
2431 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2432#endif
2433 WREG32(IH_CNTL, ih_cntl);
2434
2435 /* force the active interrupt state to all disabled */
2436 r600_disable_interrupt_state(rdev);
2437
2438 /* enable irqs */
2439 r600_enable_interrupts(rdev);
2440
2441 return ret;
2442}
2443
2444void r600_irq_fini(struct radeon_device *rdev)
2445{
2446 r600_disable_interrupts(rdev);
2447 r600_rlc_stop(rdev);
2448 r600_ih_ring_fini(rdev);
2449}
2450
2451int r600_irq_set(struct radeon_device *rdev)
2452{
2453 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2454 u32 mode_int = 0;
2455 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2456
2457 /* don't enable anything if the ih is disabled */
2458 if (!rdev->ih.enabled)
2459 return 0;
2460
2461 if (ASIC_IS_DCE3(rdev)) {
2462 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2463 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2464 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2465 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2466 if (ASIC_IS_DCE32(rdev)) {
2467 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2468 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2469 }
2470 } else {
2471 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2472 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2473 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2474 }
2475
2476 if (rdev->irq.sw_int) {
2477 DRM_DEBUG("r600_irq_set: sw int\n");
2478 cp_int_cntl |= RB_INT_ENABLE;
2479 }
2480 if (rdev->irq.crtc_vblank_int[0]) {
2481 DRM_DEBUG("r600_irq_set: vblank 0\n");
2482 mode_int |= D1MODE_VBLANK_INT_MASK;
2483 }
2484 if (rdev->irq.crtc_vblank_int[1]) {
2485 DRM_DEBUG("r600_irq_set: vblank 1\n");
2486 mode_int |= D2MODE_VBLANK_INT_MASK;
2487 }
2488 if (rdev->irq.hpd[0]) {
2489 DRM_DEBUG("r600_irq_set: hpd 1\n");
2490 hpd1 |= DC_HPDx_INT_EN;
2491 }
2492 if (rdev->irq.hpd[1]) {
2493 DRM_DEBUG("r600_irq_set: hpd 2\n");
2494 hpd2 |= DC_HPDx_INT_EN;
2495 }
2496 if (rdev->irq.hpd[2]) {
2497 DRM_DEBUG("r600_irq_set: hpd 3\n");
2498 hpd3 |= DC_HPDx_INT_EN;
2499 }
2500 if (rdev->irq.hpd[3]) {
2501 DRM_DEBUG("r600_irq_set: hpd 4\n");
2502 hpd4 |= DC_HPDx_INT_EN;
2503 }
2504 if (rdev->irq.hpd[4]) {
2505 DRM_DEBUG("r600_irq_set: hpd 5\n");
2506 hpd5 |= DC_HPDx_INT_EN;
2507 }
2508 if (rdev->irq.hpd[5]) {
2509 DRM_DEBUG("r600_irq_set: hpd 6\n");
2510 hpd6 |= DC_HPDx_INT_EN;
2511 }
2512
2513 WREG32(CP_INT_CNTL, cp_int_cntl);
2514 WREG32(DxMODE_INT_MASK, mode_int);
2515 if (ASIC_IS_DCE3(rdev)) {
2516 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2517 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2518 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2519 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2520 if (ASIC_IS_DCE32(rdev)) {
2521 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2522 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2523 }
2524 } else {
2525 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2526 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2527 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2528 }
2529
2530 return 0;
2531}
2532
2533static inline void r600_irq_ack(struct radeon_device *rdev,
2534 u32 *disp_int,
2535 u32 *disp_int_cont,
2536 u32 *disp_int_cont2)
2537{
2538 u32 tmp;
2539
2540 if (ASIC_IS_DCE3(rdev)) {
2541 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2542 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2543 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2544 } else {
2545 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2546 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2547 *disp_int_cont2 = 0;
2548 }
2549
2550 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2551 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2552 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2553 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2554 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2555 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2556 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2557 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2558 if (*disp_int & DC_HPD1_INTERRUPT) {
2559 if (ASIC_IS_DCE3(rdev)) {
2560 tmp = RREG32(DC_HPD1_INT_CONTROL);
2561 tmp |= DC_HPDx_INT_ACK;
2562 WREG32(DC_HPD1_INT_CONTROL, tmp);
2563 } else {
2564 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2565 tmp |= DC_HPDx_INT_ACK;
2566 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2567 }
2568 }
2569 if (*disp_int & DC_HPD2_INTERRUPT) {
2570 if (ASIC_IS_DCE3(rdev)) {
2571 tmp = RREG32(DC_HPD2_INT_CONTROL);
2572 tmp |= DC_HPDx_INT_ACK;
2573 WREG32(DC_HPD2_INT_CONTROL, tmp);
2574 } else {
2575 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2576 tmp |= DC_HPDx_INT_ACK;
2577 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2578 }
2579 }
2580 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2581 if (ASIC_IS_DCE3(rdev)) {
2582 tmp = RREG32(DC_HPD3_INT_CONTROL);
2583 tmp |= DC_HPDx_INT_ACK;
2584 WREG32(DC_HPD3_INT_CONTROL, tmp);
2585 } else {
2586 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2587 tmp |= DC_HPDx_INT_ACK;
2588 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2589 }
2590 }
2591 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2592 tmp = RREG32(DC_HPD4_INT_CONTROL);
2593 tmp |= DC_HPDx_INT_ACK;
2594 WREG32(DC_HPD4_INT_CONTROL, tmp);
2595 }
2596 if (ASIC_IS_DCE32(rdev)) {
2597 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2598 tmp = RREG32(DC_HPD5_INT_CONTROL);
2599 tmp |= DC_HPDx_INT_ACK;
2600 WREG32(DC_HPD5_INT_CONTROL, tmp);
2601 }
2602 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2603 tmp = RREG32(DC_HPD5_INT_CONTROL);
2604 tmp |= DC_HPDx_INT_ACK;
2605 WREG32(DC_HPD6_INT_CONTROL, tmp);
2606 }
2607 }
2608}
2609
2610void r600_irq_disable(struct radeon_device *rdev)
2611{
2612 u32 disp_int, disp_int_cont, disp_int_cont2;
2613
2614 r600_disable_interrupts(rdev);
2615 /* Wait and acknowledge irq */
2616 mdelay(1);
2617 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2618 r600_disable_interrupt_state(rdev);
2619}
2620
2621static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2622{
2623 u32 wptr, tmp;
1801 2624
2625 /* XXX use writeback */
2626 wptr = RREG32(IH_RB_WPTR);
1802 2627
2628 if (wptr & RB_OVERFLOW) {
2629 WARN_ON(1);
2630 /* XXX deal with overflow */
2631 DRM_ERROR("IH RB overflow\n");
2632 tmp = RREG32(IH_RB_CNTL);
2633 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2634 WREG32(IH_RB_CNTL, tmp);
2635 }
2636 wptr = wptr & WPTR_OFFSET_MASK;
2637
2638 return wptr;
2639}
2640
2641/* r600 IV Ring
2642 * Each IV ring entry is 128 bits:
2643 * [7:0] - interrupt source id
2644 * [31:8] - reserved
2645 * [59:32] - interrupt source data
2646 * [127:60] - reserved
2647 *
2648 * The basic interrupt vector entries
2649 * are decoded as follows:
2650 * src_id src_data description
2651 * 1 0 D1 Vblank
2652 * 1 1 D1 Vline
2653 * 5 0 D2 Vblank
2654 * 5 1 D2 Vline
2655 * 19 0 FP Hot plug detection A
2656 * 19 1 FP Hot plug detection B
2657 * 19 2 DAC A auto-detection
2658 * 19 3 DAC B auto-detection
2659 * 176 - CP_INT RB
2660 * 177 - CP_INT IB1
2661 * 178 - CP_INT IB2
2662 * 181 - EOP Interrupt
2663 * 233 - GUI Idle
2664 *
2665 * Note, these are based on r600 and may need to be
2666 * adjusted or added to on newer asics
2667 */
2668
2669int r600_irq_process(struct radeon_device *rdev)
2670{
2671 u32 wptr = r600_get_ih_wptr(rdev);
2672 u32 rptr = rdev->ih.rptr;
2673 u32 src_id, src_data;
2674 u32 last_entry = rdev->ih.ring_size - 16;
2675 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2676 unsigned long flags;
2677 bool queue_hotplug = false;
2678
2679 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2680
2681 spin_lock_irqsave(&rdev->ih.lock, flags);
2682
2683 if (rptr == wptr) {
2684 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2685 return IRQ_NONE;
2686 }
2687 if (rdev->shutdown) {
2688 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2689 return IRQ_NONE;
2690 }
2691
2692restart_ih:
2693 /* display interrupts */
2694 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2695
2696 rdev->ih.wptr = wptr;
2697 while (rptr != wptr) {
2698 /* wptr/rptr are in bytes! */
2699 ring_index = rptr / 4;
2700 src_id = rdev->ih.ring[ring_index] & 0xff;
2701 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2702
2703 switch (src_id) {
2704 case 1: /* D1 vblank/vline */
2705 switch (src_data) {
2706 case 0: /* D1 vblank */
2707 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2708 drm_handle_vblank(rdev->ddev, 0);
2709 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2710 DRM_DEBUG("IH: D1 vblank\n");
2711 }
2712 break;
2713 case 1: /* D1 vline */
2714 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2715 disp_int &= ~LB_D1_VLINE_INTERRUPT;
2716 DRM_DEBUG("IH: D1 vline\n");
2717 }
2718 break;
2719 default:
2720 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2721 break;
2722 }
2723 break;
2724 case 5: /* D2 vblank/vline */
2725 switch (src_data) {
2726 case 0: /* D2 vblank */
2727 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2728 drm_handle_vblank(rdev->ddev, 1);
2729 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2730 DRM_DEBUG("IH: D2 vblank\n");
2731 }
2732 break;
2733 case 1: /* D1 vline */
2734 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2735 disp_int &= ~LB_D2_VLINE_INTERRUPT;
2736 DRM_DEBUG("IH: D2 vline\n");
2737 }
2738 break;
2739 default:
2740 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2741 break;
2742 }
2743 break;
2744 case 19: /* HPD/DAC hotplug */
2745 switch (src_data) {
2746 case 0:
2747 if (disp_int & DC_HPD1_INTERRUPT) {
2748 disp_int &= ~DC_HPD1_INTERRUPT;
2749 queue_hotplug = true;
2750 DRM_DEBUG("IH: HPD1\n");
2751 }
2752 break;
2753 case 1:
2754 if (disp_int & DC_HPD2_INTERRUPT) {
2755 disp_int &= ~DC_HPD2_INTERRUPT;
2756 queue_hotplug = true;
2757 DRM_DEBUG("IH: HPD2\n");
2758 }
2759 break;
2760 case 4:
2761 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2762 disp_int_cont &= ~DC_HPD3_INTERRUPT;
2763 queue_hotplug = true;
2764 DRM_DEBUG("IH: HPD3\n");
2765 }
2766 break;
2767 case 5:
2768 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2769 disp_int_cont &= ~DC_HPD4_INTERRUPT;
2770 queue_hotplug = true;
2771 DRM_DEBUG("IH: HPD4\n");
2772 }
2773 break;
2774 case 10:
2775 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2776 disp_int_cont &= ~DC_HPD5_INTERRUPT;
2777 queue_hotplug = true;
2778 DRM_DEBUG("IH: HPD5\n");
2779 }
2780 break;
2781 case 12:
2782 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2783 disp_int_cont &= ~DC_HPD6_INTERRUPT;
2784 queue_hotplug = true;
2785 DRM_DEBUG("IH: HPD6\n");
2786 }
2787 break;
2788 default:
2789 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2790 break;
2791 }
2792 break;
2793 case 176: /* CP_INT in ring buffer */
2794 case 177: /* CP_INT in IB1 */
2795 case 178: /* CP_INT in IB2 */
2796 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2797 radeon_fence_process(rdev);
2798 break;
2799 case 181: /* CP EOP event */
2800 DRM_DEBUG("IH: CP EOP\n");
2801 break;
2802 default:
2803 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2804 break;
2805 }
2806
2807 /* wptr/rptr are in bytes! */
2808 if (rptr == last_entry)
2809 rptr = 0;
2810 else
2811 rptr += 16;
2812 }
2813 /* make sure wptr hasn't changed while processing */
2814 wptr = r600_get_ih_wptr(rdev);
2815 if (wptr != rdev->ih.wptr)
2816 goto restart_ih;
2817 if (queue_hotplug)
2818 queue_work(rdev->wq, &rdev->hotplug_work);
2819 rdev->ih.rptr = rptr;
2820 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2821 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2822 return IRQ_HANDLED;
2823}
1803 2824
1804/* 2825/*
1805 * Debugfs info 2826 * Debugfs info
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1811 struct drm_info_node *node = (struct drm_info_node *) m->private; 2832 struct drm_info_node *node = (struct drm_info_node *) m->private;
1812 struct drm_device *dev = node->minor->dev; 2833 struct drm_device *dev = node->minor->dev;
1813 struct radeon_device *rdev = dev->dev_private; 2834 struct radeon_device *rdev = dev->dev_private;
1814 uint32_t rdp, wdp;
1815 unsigned count, i, j; 2835 unsigned count, i, j;
1816 2836
1817 radeon_ring_free_size(rdev); 2837 radeon_ring_free_size(rdev);
1818 rdp = RREG32(CP_RB_RPTR); 2838 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1819 wdp = RREG32(CP_RB_WPTR);
1820 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1821 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); 2839 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1822 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2840 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1823 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2841 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2842 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2843 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1824 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2844 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1825 seq_printf(m, "%u dwords in ring\n", count); 2845 seq_printf(m, "%u dwords in ring\n", count);
2846 i = rdev->cp.rptr;
1826 for (j = 0; j <= count; j++) { 2847 for (j = 0; j <= count; j++) {
1827 i = (rdp + j) & rdev->cp.ptr_mask;
1828 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2848 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2849 i = (i + 1) & rdev->cp.ptr_mask;
1829 } 2850 }
1830 return 0; 2851 return 0;
1831} 2852}