aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r--drivers/gpu/drm/radeon/r600.c1257
1 files changed, 1142 insertions, 115 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 609719490ec2..36656bd110bf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
41#define RLC_UCODE_SIZE 768
41#define R700_PFP_UCODE_SIZE 848 42#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360 43#define R700_PM4_UCODE_SIZE 1360
44#define R700_RLC_UCODE_SIZE 1024
43 45
44/* Firmware Names */ 46/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin"); 47MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin"); 64MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/R600_rlc.bin");
68MODULE_FIRMWARE("radeon/R700_rlc.bin");
65 69
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 70int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 71
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev); 74void r600_gpu_init(struct radeon_device *rdev);
71void r600_fini(struct radeon_device *rdev); 75void r600_fini(struct radeon_device *rdev);
72 76
77/* hpd for digital panel detect/disconnect */
78bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
79{
80 bool connected = false;
81
82 if (ASIC_IS_DCE3(rdev)) {
83 switch (hpd) {
84 case RADEON_HPD_1:
85 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
86 connected = true;
87 break;
88 case RADEON_HPD_2:
89 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
90 connected = true;
91 break;
92 case RADEON_HPD_3:
93 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
94 connected = true;
95 break;
96 case RADEON_HPD_4:
97 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
98 connected = true;
99 break;
100 /* DCE 3.2 */
101 case RADEON_HPD_5:
102 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
103 connected = true;
104 break;
105 case RADEON_HPD_6:
106 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
107 connected = true;
108 break;
109 default:
110 break;
111 }
112 } else {
113 switch (hpd) {
114 case RADEON_HPD_1:
115 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
116 connected = true;
117 break;
118 case RADEON_HPD_2:
119 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
120 connected = true;
121 break;
122 case RADEON_HPD_3:
123 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
124 connected = true;
125 break;
126 default:
127 break;
128 }
129 }
130 return connected;
131}
132
133void r600_hpd_set_polarity(struct radeon_device *rdev,
134 enum radeon_hpd_id hpd)
135{
136 u32 tmp;
137 bool connected = r600_hpd_sense(rdev, hpd);
138
139 if (ASIC_IS_DCE3(rdev)) {
140 switch (hpd) {
141 case RADEON_HPD_1:
142 tmp = RREG32(DC_HPD1_INT_CONTROL);
143 if (connected)
144 tmp &= ~DC_HPDx_INT_POLARITY;
145 else
146 tmp |= DC_HPDx_INT_POLARITY;
147 WREG32(DC_HPD1_INT_CONTROL, tmp);
148 break;
149 case RADEON_HPD_2:
150 tmp = RREG32(DC_HPD2_INT_CONTROL);
151 if (connected)
152 tmp &= ~DC_HPDx_INT_POLARITY;
153 else
154 tmp |= DC_HPDx_INT_POLARITY;
155 WREG32(DC_HPD2_INT_CONTROL, tmp);
156 break;
157 case RADEON_HPD_3:
158 tmp = RREG32(DC_HPD3_INT_CONTROL);
159 if (connected)
160 tmp &= ~DC_HPDx_INT_POLARITY;
161 else
162 tmp |= DC_HPDx_INT_POLARITY;
163 WREG32(DC_HPD3_INT_CONTROL, tmp);
164 break;
165 case RADEON_HPD_4:
166 tmp = RREG32(DC_HPD4_INT_CONTROL);
167 if (connected)
168 tmp &= ~DC_HPDx_INT_POLARITY;
169 else
170 tmp |= DC_HPDx_INT_POLARITY;
171 WREG32(DC_HPD4_INT_CONTROL, tmp);
172 break;
173 case RADEON_HPD_5:
174 tmp = RREG32(DC_HPD5_INT_CONTROL);
175 if (connected)
176 tmp &= ~DC_HPDx_INT_POLARITY;
177 else
178 tmp |= DC_HPDx_INT_POLARITY;
179 WREG32(DC_HPD5_INT_CONTROL, tmp);
180 break;
181 /* DCE 3.2 */
182 case RADEON_HPD_6:
183 tmp = RREG32(DC_HPD6_INT_CONTROL);
184 if (connected)
185 tmp &= ~DC_HPDx_INT_POLARITY;
186 else
187 tmp |= DC_HPDx_INT_POLARITY;
188 WREG32(DC_HPD6_INT_CONTROL, tmp);
189 break;
190 default:
191 break;
192 }
193 } else {
194 switch (hpd) {
195 case RADEON_HPD_1:
196 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
197 if (connected)
198 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
199 else
200 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
201 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
202 break;
203 case RADEON_HPD_2:
204 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
205 if (connected)
206 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
207 else
208 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
209 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
210 break;
211 case RADEON_HPD_3:
212 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
213 if (connected)
214 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
215 else
216 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
217 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
218 break;
219 default:
220 break;
221 }
222 }
223}
224
225void r600_hpd_init(struct radeon_device *rdev)
226{
227 struct drm_device *dev = rdev->ddev;
228 struct drm_connector *connector;
229
230 if (ASIC_IS_DCE3(rdev)) {
231 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
232 if (ASIC_IS_DCE32(rdev))
233 tmp |= DC_HPDx_EN;
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
236 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
237 switch (radeon_connector->hpd.hpd) {
238 case RADEON_HPD_1:
239 WREG32(DC_HPD1_CONTROL, tmp);
240 rdev->irq.hpd[0] = true;
241 break;
242 case RADEON_HPD_2:
243 WREG32(DC_HPD2_CONTROL, tmp);
244 rdev->irq.hpd[1] = true;
245 break;
246 case RADEON_HPD_3:
247 WREG32(DC_HPD3_CONTROL, tmp);
248 rdev->irq.hpd[2] = true;
249 break;
250 case RADEON_HPD_4:
251 WREG32(DC_HPD4_CONTROL, tmp);
252 rdev->irq.hpd[3] = true;
253 break;
254 /* DCE 3.2 */
255 case RADEON_HPD_5:
256 WREG32(DC_HPD5_CONTROL, tmp);
257 rdev->irq.hpd[4] = true;
258 break;
259 case RADEON_HPD_6:
260 WREG32(DC_HPD6_CONTROL, tmp);
261 rdev->irq.hpd[5] = true;
262 break;
263 default:
264 break;
265 }
266 }
267 } else {
268 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
269 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
270 switch (radeon_connector->hpd.hpd) {
271 case RADEON_HPD_1:
272 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
273 rdev->irq.hpd[0] = true;
274 break;
275 case RADEON_HPD_2:
276 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
277 rdev->irq.hpd[1] = true;
278 break;
279 case RADEON_HPD_3:
280 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
281 rdev->irq.hpd[2] = true;
282 break;
283 default:
284 break;
285 }
286 }
287 }
288 r600_irq_set(rdev);
289}
290
291void r600_hpd_fini(struct radeon_device *rdev)
292{
293 struct drm_device *dev = rdev->ddev;
294 struct drm_connector *connector;
295
296 if (ASIC_IS_DCE3(rdev)) {
297 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299 switch (radeon_connector->hpd.hpd) {
300 case RADEON_HPD_1:
301 WREG32(DC_HPD1_CONTROL, 0);
302 rdev->irq.hpd[0] = false;
303 break;
304 case RADEON_HPD_2:
305 WREG32(DC_HPD2_CONTROL, 0);
306 rdev->irq.hpd[1] = false;
307 break;
308 case RADEON_HPD_3:
309 WREG32(DC_HPD3_CONTROL, 0);
310 rdev->irq.hpd[2] = false;
311 break;
312 case RADEON_HPD_4:
313 WREG32(DC_HPD4_CONTROL, 0);
314 rdev->irq.hpd[3] = false;
315 break;
316 /* DCE 3.2 */
317 case RADEON_HPD_5:
318 WREG32(DC_HPD5_CONTROL, 0);
319 rdev->irq.hpd[4] = false;
320 break;
321 case RADEON_HPD_6:
322 WREG32(DC_HPD6_CONTROL, 0);
323 rdev->irq.hpd[5] = false;
324 break;
325 default:
326 break;
327 }
328 }
329 } else {
330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332 switch (radeon_connector->hpd.hpd) {
333 case RADEON_HPD_1:
334 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
335 rdev->irq.hpd[0] = false;
336 break;
337 case RADEON_HPD_2:
338 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
339 rdev->irq.hpd[1] = false;
340 break;
341 case RADEON_HPD_3:
342 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
343 rdev->irq.hpd[2] = false;
344 break;
345 default:
346 break;
347 }
348 }
349 }
350}
351
73/* 352/*
74 * R600 PCIE GART 353 * R600 PCIE GART
75 */ 354 */
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
180void r600_pcie_gart_disable(struct radeon_device *rdev) 459void r600_pcie_gart_disable(struct radeon_device *rdev)
181{ 460{
182 u32 tmp; 461 u32 tmp;
183 int i; 462 int i, r;
184 463
185 /* Disable all tables */ 464 /* Disable all tables */
186 for (i = 0; i < 7; i++) 465 for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 487 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 488 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
210 if (rdev->gart.table.vram.robj) { 489 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj); 490 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
212 radeon_object_unpin(rdev->gart.table.vram.robj); 491 if (likely(r == 0)) {
492 radeon_bo_kunmap(rdev->gart.table.vram.robj);
493 radeon_bo_unpin(rdev->gart.table.vram.robj);
494 radeon_bo_unreserve(rdev->gart.table.vram.robj);
495 }
213 } 496 }
214} 497}
215 498
@@ -339,11 +622,10 @@ int r600_mc_init(struct radeon_device *rdev)
339{ 622{
340 fixed20_12 a; 623 fixed20_12 a;
341 u32 tmp; 624 u32 tmp;
342 int chansize; 625 int chansize, numchan;
343 int r; 626 int r;
344 627
345 /* Get VRAM informations */ 628 /* Get VRAM informations */
346 rdev->mc.vram_width = 128;
347 rdev->mc.vram_is_ddr = true; 629 rdev->mc.vram_is_ddr = true;
348 tmp = RREG32(RAMCFG); 630 tmp = RREG32(RAMCFG);
349 if (tmp & CHANSIZE_OVERRIDE) { 631 if (tmp & CHANSIZE_OVERRIDE) {
@@ -353,17 +635,23 @@ int r600_mc_init(struct radeon_device *rdev)
353 } else { 635 } else {
354 chansize = 32; 636 chansize = 32;
355 } 637 }
356 if (rdev->family == CHIP_R600) { 638 tmp = RREG32(CHMAP);
357 rdev->mc.vram_width = 8 * chansize; 639 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
358 } else if (rdev->family == CHIP_RV670) { 640 case 0:
359 rdev->mc.vram_width = 4 * chansize; 641 default:
360 } else if ((rdev->family == CHIP_RV610) || 642 numchan = 1;
361 (rdev->family == CHIP_RV620)) { 643 break;
362 rdev->mc.vram_width = chansize; 644 case 1:
363 } else if ((rdev->family == CHIP_RV630) || 645 numchan = 2;
364 (rdev->family == CHIP_RV635)) { 646 break;
365 rdev->mc.vram_width = 2 * chansize; 647 case 2:
648 numchan = 4;
649 break;
650 case 3:
651 numchan = 8;
652 break;
366 } 653 }
654 rdev->mc.vram_width = numchan * chansize;
367 /* Could aper size report 0 ? */ 655 /* Could aper size report 0 ? */
368 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 656 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
369 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 657 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
@@ -389,11 +677,11 @@ int r600_mc_init(struct radeon_device *rdev)
389 * AGP so that GPU can catch out of VRAM/AGP access 677 * AGP so that GPU can catch out of VRAM/AGP access
390 */ 678 */
391 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { 679 if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
392 /* Enought place before */ 680 /* Enough place before */
393 rdev->mc.vram_location = rdev->mc.gtt_location - 681 rdev->mc.vram_location = rdev->mc.gtt_location -
394 rdev->mc.mc_vram_size; 682 rdev->mc.mc_vram_size;
395 } else if (tmp > rdev->mc.mc_vram_size) { 683 } else if (tmp > rdev->mc.mc_vram_size) {
396 /* Enought place after */ 684 /* Enough place after */
397 rdev->mc.vram_location = rdev->mc.gtt_location + 685 rdev->mc.vram_location = rdev->mc.gtt_location +
398 rdev->mc.gtt_size; 686 rdev->mc.gtt_size;
399 } else { 687 } else {
@@ -404,35 +692,29 @@ int r600_mc_init(struct radeon_device *rdev)
404 rdev->mc.gtt_location = rdev->mc.mc_vram_size; 692 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
405 } 693 }
406 } else { 694 } else {
407 if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) { 695 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
408 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & 696 rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
409 0xFFFF) << 24; 697 0xFFFF) << 24;
410 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 698 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
411 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; 699 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
412 if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { 700 /* Enough place after vram */
413 /* Enough place after vram */ 701 rdev->mc.gtt_location = tmp;
414 rdev->mc.gtt_location = tmp; 702 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
415 } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { 703 /* Enough place before vram */
416 /* Enough place before vram */ 704 rdev->mc.gtt_location = 0;
705 } else {
706 /* Not enough place after or before shrink
707 * gart size
708 */
709 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
417 rdev->mc.gtt_location = 0; 710 rdev->mc.gtt_location = 0;
711 rdev->mc.gtt_size = rdev->mc.vram_location;
418 } else { 712 } else {
419 /* Not enough place after or before shrink 713 rdev->mc.gtt_location = tmp;
420 * gart size 714 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
421 */
422 if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
423 rdev->mc.gtt_location = 0;
424 rdev->mc.gtt_size = rdev->mc.vram_location;
425 } else {
426 rdev->mc.gtt_location = tmp;
427 rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
428 }
429 } 715 }
430 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
431 } else {
432 rdev->mc.vram_location = 0x00000000UL;
433 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
434 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
435 } 716 }
717 rdev->mc.gtt_location = rdev->mc.mc_vram_size;
436 } 718 }
437 rdev->mc.vram_start = rdev->mc.vram_location; 719 rdev->mc.vram_start = rdev->mc.vram_location;
438 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; 720 rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
@@ -859,7 +1141,8 @@ void r600_gpu_init(struct radeon_device *rdev)
859 ((rdev->family) == CHIP_RV630) || 1141 ((rdev->family) == CHIP_RV630) ||
860 ((rdev->family) == CHIP_RV610) || 1142 ((rdev->family) == CHIP_RV610) ||
861 ((rdev->family) == CHIP_RV620) || 1143 ((rdev->family) == CHIP_RV620) ||
862 ((rdev->family) == CHIP_RS780)) { 1144 ((rdev->family) == CHIP_RS780) ||
1145 ((rdev->family) == CHIP_RS880)) {
863 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE); 1146 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
864 } else { 1147 } else {
865 WREG32(DB_DEBUG, 0); 1148 WREG32(DB_DEBUG, 0);
@@ -876,7 +1159,8 @@ void r600_gpu_init(struct radeon_device *rdev)
876 tmp = RREG32(SQ_MS_FIFO_SIZES); 1159 tmp = RREG32(SQ_MS_FIFO_SIZES);
877 if (((rdev->family) == CHIP_RV610) || 1160 if (((rdev->family) == CHIP_RV610) ||
878 ((rdev->family) == CHIP_RV620) || 1161 ((rdev->family) == CHIP_RV620) ||
879 ((rdev->family) == CHIP_RS780)) { 1162 ((rdev->family) == CHIP_RS780) ||
1163 ((rdev->family) == CHIP_RS880)) {
880 tmp = (CACHE_FIFO_SIZE(0xa) | 1164 tmp = (CACHE_FIFO_SIZE(0xa) |
881 FETCH_FIFO_HIWATER(0xa) | 1165 FETCH_FIFO_HIWATER(0xa) |
882 DONE_FIFO_HIWATER(0xe0) | 1166 DONE_FIFO_HIWATER(0xe0) |
@@ -919,7 +1203,8 @@ void r600_gpu_init(struct radeon_device *rdev)
919 NUM_ES_STACK_ENTRIES(0)); 1203 NUM_ES_STACK_ENTRIES(0));
920 } else if (((rdev->family) == CHIP_RV610) || 1204 } else if (((rdev->family) == CHIP_RV610) ||
921 ((rdev->family) == CHIP_RV620) || 1205 ((rdev->family) == CHIP_RV620) ||
922 ((rdev->family) == CHIP_RS780)) { 1206 ((rdev->family) == CHIP_RS780) ||
1207 ((rdev->family) == CHIP_RS880)) {
923 /* no vertex cache */ 1208 /* no vertex cache */
924 sq_config &= ~VC_ENABLE; 1209 sq_config &= ~VC_ENABLE;
925 1210
@@ -976,7 +1261,8 @@ void r600_gpu_init(struct radeon_device *rdev)
976 1261
977 if (((rdev->family) == CHIP_RV610) || 1262 if (((rdev->family) == CHIP_RV610) ||
978 ((rdev->family) == CHIP_RV620) || 1263 ((rdev->family) == CHIP_RV620) ||
979 ((rdev->family) == CHIP_RS780)) { 1264 ((rdev->family) == CHIP_RS780) ||
1265 ((rdev->family) == CHIP_RS880)) {
980 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY)); 1266 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
981 } else { 1267 } else {
982 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC)); 1268 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
@@ -1002,8 +1288,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1002 tmp = rdev->config.r600.max_pipes * 16; 1288 tmp = rdev->config.r600.max_pipes * 16;
1003 switch (rdev->family) { 1289 switch (rdev->family) {
1004 case CHIP_RV610: 1290 case CHIP_RV610:
1005 case CHIP_RS780:
1006 case CHIP_RV620: 1291 case CHIP_RV620:
1292 case CHIP_RS780:
1293 case CHIP_RS880:
1007 tmp += 32; 1294 tmp += 32;
1008 break; 1295 break;
1009 case CHIP_RV670: 1296 case CHIP_RV670:
@@ -1044,8 +1331,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1044 1331
1045 switch (rdev->family) { 1332 switch (rdev->family) {
1046 case CHIP_RV610: 1333 case CHIP_RV610:
1047 case CHIP_RS780:
1048 case CHIP_RV620: 1334 case CHIP_RV620:
1335 case CHIP_RS780:
1336 case CHIP_RS880:
1049 tmp = TC_L2_SIZE(8); 1337 tmp = TC_L2_SIZE(8);
1050 break; 1338 break;
1051 case CHIP_RV630: 1339 case CHIP_RV630:
@@ -1096,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1096 (void)RREG32(PCIE_PORT_DATA); 1384 (void)RREG32(PCIE_PORT_DATA);
1097} 1385}
1098 1386
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1099 1391
1100/* 1392/*
1101 * CP & Ring 1393 * CP & Ring
@@ -1105,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
1105 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1397 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1106} 1398}
1107 1399
1108int r600_cp_init_microcode(struct radeon_device *rdev) 1400int r600_init_microcode(struct radeon_device *rdev)
1109{ 1401{
1110 struct platform_device *pdev; 1402 struct platform_device *pdev;
1111 const char *chip_name; 1403 const char *chip_name;
1112 size_t pfp_req_size, me_req_size; 1404 const char *rlc_chip_name;
1405 size_t pfp_req_size, me_req_size, rlc_req_size;
1113 char fw_name[30]; 1406 char fw_name[30];
1114 int err; 1407 int err;
1115 1408
@@ -1123,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1123 } 1416 }
1124 1417
1125 switch (rdev->family) { 1418 switch (rdev->family) {
1126 case CHIP_R600: chip_name = "R600"; break; 1419 case CHIP_R600:
1127 case CHIP_RV610: chip_name = "RV610"; break; 1420 chip_name = "R600";
1128 case CHIP_RV630: chip_name = "RV630"; break; 1421 rlc_chip_name = "R600";
1129 case CHIP_RV620: chip_name = "RV620"; break; 1422 break;
1130 case CHIP_RV635: chip_name = "RV635"; break; 1423 case CHIP_RV610:
1131 case CHIP_RV670: chip_name = "RV670"; break; 1424 chip_name = "RV610";
1425 rlc_chip_name = "R600";
1426 break;
1427 case CHIP_RV630:
1428 chip_name = "RV630";
1429 rlc_chip_name = "R600";
1430 break;
1431 case CHIP_RV620:
1432 chip_name = "RV620";
1433 rlc_chip_name = "R600";
1434 break;
1435 case CHIP_RV635:
1436 chip_name = "RV635";
1437 rlc_chip_name = "R600";
1438 break;
1439 case CHIP_RV670:
1440 chip_name = "RV670";
1441 rlc_chip_name = "R600";
1442 break;
1132 case CHIP_RS780: 1443 case CHIP_RS780:
1133 case CHIP_RS880: chip_name = "RS780"; break; 1444 case CHIP_RS880:
1134 case CHIP_RV770: chip_name = "RV770"; break; 1445 chip_name = "RS780";
1446 rlc_chip_name = "R600";
1447 break;
1448 case CHIP_RV770:
1449 chip_name = "RV770";
1450 rlc_chip_name = "R700";
1451 break;
1135 case CHIP_RV730: 1452 case CHIP_RV730:
1136 case CHIP_RV740: chip_name = "RV730"; break; 1453 case CHIP_RV740:
1137 case CHIP_RV710: chip_name = "RV710"; break; 1454 chip_name = "RV730";
1455 rlc_chip_name = "R700";
1456 break;
1457 case CHIP_RV710:
1458 chip_name = "RV710";
1459 rlc_chip_name = "R700";
1460 break;
1138 default: BUG(); 1461 default: BUG();
1139 } 1462 }
1140 1463
1141 if (rdev->family >= CHIP_RV770) { 1464 if (rdev->family >= CHIP_RV770) {
1142 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1465 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1143 me_req_size = R700_PM4_UCODE_SIZE * 4; 1466 me_req_size = R700_PM4_UCODE_SIZE * 4;
1467 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1144 } else { 1468 } else {
1145 pfp_req_size = PFP_UCODE_SIZE * 4; 1469 pfp_req_size = PFP_UCODE_SIZE * 4;
1146 me_req_size = PM4_UCODE_SIZE * 12; 1470 me_req_size = PM4_UCODE_SIZE * 12;
1471 rlc_req_size = RLC_UCODE_SIZE * 4;
1147 } 1472 }
1148 1473
1149 DRM_INFO("Loading %s CP Microcode\n", chip_name); 1474 DRM_INFO("Loading %s Microcode\n", chip_name);
1150 1475
1151 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1476 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1152 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1477 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1170,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1170 rdev->me_fw->size, fw_name); 1495 rdev->me_fw->size, fw_name);
1171 err = -EINVAL; 1496 err = -EINVAL;
1172 } 1497 }
1498
1499 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1500 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1501 if (err)
1502 goto out;
1503 if (rdev->rlc_fw->size != rlc_req_size) {
1504 printk(KERN_ERR
1505 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1506 rdev->rlc_fw->size, fw_name);
1507 err = -EINVAL;
1508 }
1509
1173out: 1510out:
1174 platform_device_unregister(pdev); 1511 platform_device_unregister(pdev);
1175 1512
@@ -1182,6 +1519,8 @@ out:
1182 rdev->pfp_fw = NULL; 1519 rdev->pfp_fw = NULL;
1183 release_firmware(rdev->me_fw); 1520 release_firmware(rdev->me_fw);
1184 rdev->me_fw = NULL; 1521 rdev->me_fw = NULL;
1522 release_firmware(rdev->rlc_fw);
1523 rdev->rlc_fw = NULL;
1185 } 1524 }
1186 return err; 1525 return err;
1187} 1526}
@@ -1267,19 +1606,17 @@ int r600_cp_resume(struct radeon_device *rdev)
1267 1606
1268 /* Set ring buffer size */ 1607 /* Set ring buffer size */
1269 rb_bufsz = drm_order(rdev->cp.ring_size / 8); 1608 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
1609 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1270#ifdef __BIG_ENDIAN 1610#ifdef __BIG_ENDIAN
1271 WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE | 1611 tmp |= BUF_SWAP_32BIT;
1272 (drm_order(4096/8) << 8) | rb_bufsz);
1273#else
1274 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
1275#endif 1612#endif
1613 WREG32(CP_RB_CNTL, tmp);
1276 WREG32(CP_SEM_WAIT_TIMER, 0x4); 1614 WREG32(CP_SEM_WAIT_TIMER, 0x4);
1277 1615
1278 /* Set the write pointer delay */ 1616 /* Set the write pointer delay */
1279 WREG32(CP_RB_WPTR_DELAY, 0); 1617 WREG32(CP_RB_WPTR_DELAY, 0);
1280 1618
1281 /* Initialize the ring buffer's read and write pointers */ 1619 /* Initialize the ring buffer's read and write pointers */
1282 tmp = RREG32(CP_RB_CNTL);
1283 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); 1620 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
1284 WREG32(CP_RB_RPTR_WR, 0); 1621 WREG32(CP_RB_RPTR_WR, 0);
1285 WREG32(CP_RB_WPTR, 0); 1622 WREG32(CP_RB_WPTR, 0);
@@ -1378,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
1378 1715
1379void r600_wb_disable(struct radeon_device *rdev) 1716void r600_wb_disable(struct radeon_device *rdev)
1380{ 1717{
1718 int r;
1719
1381 WREG32(SCRATCH_UMSK, 0); 1720 WREG32(SCRATCH_UMSK, 0);
1382 if (rdev->wb.wb_obj) { 1721 if (rdev->wb.wb_obj) {
1383 radeon_object_kunmap(rdev->wb.wb_obj); 1722 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1384 radeon_object_unpin(rdev->wb.wb_obj); 1723 if (unlikely(r != 0))
1724 return;
1725 radeon_bo_kunmap(rdev->wb.wb_obj);
1726 radeon_bo_unpin(rdev->wb.wb_obj);
1727 radeon_bo_unreserve(rdev->wb.wb_obj);
1385 } 1728 }
1386} 1729}
1387 1730
@@ -1389,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
1389{ 1732{
1390 r600_wb_disable(rdev); 1733 r600_wb_disable(rdev);
1391 if (rdev->wb.wb_obj) { 1734 if (rdev->wb.wb_obj) {
1392 radeon_object_unref(&rdev->wb.wb_obj); 1735 radeon_bo_unref(&rdev->wb.wb_obj);
1393 rdev->wb.wb = NULL; 1736 rdev->wb.wb = NULL;
1394 rdev->wb.wb_obj = NULL; 1737 rdev->wb.wb_obj = NULL;
1395 } 1738 }
@@ -1400,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
1400 int r; 1743 int r;
1401 1744
1402 if (rdev->wb.wb_obj == NULL) { 1745 if (rdev->wb.wb_obj == NULL) {
1403 r = radeon_object_create(rdev, NULL, 4096, true, 1746 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1404 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); 1747 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1405 if (r) { 1748 if (r) {
1406 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); 1749 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1750 return r;
1751 }
1752 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1753 if (unlikely(r != 0)) {
1754 r600_wb_fini(rdev);
1407 return r; 1755 return r;
1408 } 1756 }
1409 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 1757 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1410 &rdev->wb.gpu_addr); 1758 &rdev->wb.gpu_addr);
1411 if (r) { 1759 if (r) {
1412 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); 1760 radeon_bo_unreserve(rdev->wb.wb_obj);
1761 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1413 r600_wb_fini(rdev); 1762 r600_wb_fini(rdev);
1414 return r; 1763 return r;
1415 } 1764 }
1416 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1765 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1766 radeon_bo_unreserve(rdev->wb.wb_obj);
1417 if (r) { 1767 if (r) {
1418 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); 1768 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1419 r600_wb_fini(rdev); 1769 r600_wb_fini(rdev);
1420 return r; 1770 return r;
1421 } 1771 }
@@ -1430,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
1430void r600_fence_ring_emit(struct radeon_device *rdev, 1780void r600_fence_ring_emit(struct radeon_device *rdev,
1431 struct radeon_fence *fence) 1781 struct radeon_fence *fence)
1432{ 1782{
1783 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1433 /* Emit fence sequence & fire IRQ */ 1784 /* Emit fence sequence & fire IRQ */
1434 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1435 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1436 radeon_ring_write(rdev, fence->seq); 1787 radeon_ring_write(rdev, fence->seq);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT);
1437} 1791}
1438 1792
1439int r600_copy_dma(struct radeon_device *rdev, 1793int r600_copy_dma(struct radeon_device *rdev,
@@ -1450,24 +1804,12 @@ int r600_copy_blit(struct radeon_device *rdev,
1450 uint64_t src_offset, uint64_t dst_offset, 1804 uint64_t src_offset, uint64_t dst_offset,
1451 unsigned num_pages, struct radeon_fence *fence) 1805 unsigned num_pages, struct radeon_fence *fence)
1452{ 1806{
1453 r600_blit_prepare_copy(rdev, num_pages * 4096); 1807 r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
1454 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096); 1808 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
1455 r600_blit_done_copy(rdev, fence); 1809 r600_blit_done_copy(rdev, fence);
1456 return 0; 1810 return 0;
1457} 1811}
1458 1812
1459int r600_irq_process(struct radeon_device *rdev)
1460{
1461 /* FIXME: implement */
1462 return 0;
1463}
1464
1465int r600_irq_set(struct radeon_device *rdev)
1466{
1467 /* FIXME: implement */
1468 return 0;
1469}
1470
1471int r600_set_surface_reg(struct radeon_device *rdev, int reg, 1813int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1472 uint32_t tiling_flags, uint32_t pitch, 1814 uint32_t tiling_flags, uint32_t pitch,
1473 uint32_t offset, uint32_t obj_size) 1815 uint32_t offset, uint32_t obj_size)
@@ -1503,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
1503{ 1845{
1504 int r; 1846 int r;
1505 1847
1848 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1849 r = r600_init_microcode(rdev);
1850 if (r) {
1851 DRM_ERROR("Failed to load firmware!\n");
1852 return r;
1853 }
1854 }
1855
1506 r600_mc_program(rdev); 1856 r600_mc_program(rdev);
1507 if (rdev->flags & RADEON_IS_AGP) { 1857 if (rdev->flags & RADEON_IS_AGP) {
1508 r600_agp_enable(rdev); 1858 r600_agp_enable(rdev);
@@ -1513,12 +1863,25 @@ int r600_startup(struct radeon_device *rdev)
1513 } 1863 }
1514 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1515 1865
1516 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1517 &rdev->r600_blit.shader_gpu_addr); 1867 if (unlikely(r != 0))
1868 return r;
1869 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1870 &rdev->r600_blit.shader_gpu_addr);
1871 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1872 if (r) {
1873 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1874 return r;
1875 }
1876
1877 /* Enable IRQ */
1878 r = r600_irq_init(rdev);
1518 if (r) { 1879 if (r) {
1519 DRM_ERROR("failed to pin blit object %d\n", r); 1880 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1881 radeon_irq_kms_fini(rdev);
1520 return r; 1882 return r;
1521 } 1883 }
1884 r600_irq_set(rdev);
1522 1885
1523 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1886 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1524 if (r) 1887 if (r)
@@ -1580,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
1580 1943
1581int r600_suspend(struct radeon_device *rdev) 1944int r600_suspend(struct radeon_device *rdev)
1582{ 1945{
1946 int r;
1947
1583 /* FIXME: we should wait for ring to be empty */ 1948 /* FIXME: we should wait for ring to be empty */
1584 r600_cp_stop(rdev); 1949 r600_cp_stop(rdev);
1585 rdev->cp.ready = false; 1950 rdev->cp.ready = false;
1586 r600_wb_disable(rdev); 1951 r600_wb_disable(rdev);
1587 r600_pcie_gart_disable(rdev); 1952 r600_pcie_gart_disable(rdev);
1588 /* unpin shaders bo */ 1953 /* unpin shaders bo */
1589 radeon_object_unpin(rdev->r600_blit.shader_obj); 1954 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1955 if (unlikely(r != 0))
1956 return r;
1957 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1958 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1590 return 0; 1959 return 0;
1591} 1960}
1592 1961
@@ -1624,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
1624 if (r) 1993 if (r)
1625 return r; 1994 return r;
1626 /* Post card if necessary */ 1995 /* Post card if necessary */
1627 if (!r600_card_posted(rdev) && rdev->bios) { 1996 if (!r600_card_posted(rdev)) {
1997 if (!rdev->bios) {
1998 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1999 return -EINVAL;
2000 }
1628 DRM_INFO("GPU not posted. posting now...\n"); 2001 DRM_INFO("GPU not posted. posting now...\n");
1629 atom_asic_init(rdev->mode_info.atom_context); 2002 atom_asic_init(rdev->mode_info.atom_context);
1630 } 2003 }
@@ -1632,10 +2005,13 @@ int r600_init(struct radeon_device *rdev)
1632 r600_scratch_init(rdev); 2005 r600_scratch_init(rdev);
1633 /* Initialize surface registers */ 2006 /* Initialize surface registers */
1634 radeon_surface_init(rdev); 2007 radeon_surface_init(rdev);
2008 /* Initialize clocks */
1635 radeon_get_clock_info(rdev->ddev); 2009 radeon_get_clock_info(rdev->ddev);
1636 r = radeon_clocks_init(rdev); 2010 r = radeon_clocks_init(rdev);
1637 if (r) 2011 if (r)
1638 return r; 2012 return r;
2013 /* Initialize power management */
2014 radeon_pm_init(rdev);
1639 /* Fence driver */ 2015 /* Fence driver */
1640 r = radeon_fence_driver_init(rdev); 2016 r = radeon_fence_driver_init(rdev);
1641 if (r) 2017 if (r)
@@ -1644,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
1644 if (r) 2020 if (r)
1645 return r; 2021 return r;
1646 /* Memory manager */ 2022 /* Memory manager */
1647 r = radeon_object_init(rdev); 2023 r = radeon_bo_init(rdev);
1648 if (r) 2024 if (r)
1649 return r; 2025 return r;
2026
2027 r = radeon_irq_kms_init(rdev);
2028 if (r)
2029 return r;
2030
1650 rdev->cp.ring_obj = NULL; 2031 rdev->cp.ring_obj = NULL;
1651 r600_ring_init(rdev, 1024 * 1024); 2032 r600_ring_init(rdev, 1024 * 1024);
1652 2033
1653 if (!rdev->me_fw || !rdev->pfp_fw) { 2034 rdev->ih.ring_obj = NULL;
1654 r = r600_cp_init_microcode(rdev); 2035 r600_ih_ring_init(rdev, 64 * 1024);
1655 if (r) {
1656 DRM_ERROR("Failed to load firmware!\n");
1657 return r;
1658 }
1659 }
1660 2036
1661 r = r600_pcie_gart_init(rdev); 2037 r = r600_pcie_gart_init(rdev);
1662 if (r) 2038 if (r)
1663 return r; 2039 return r;
1664 2040
1665 rdev->accel_working = true;
1666 r = r600_blit_init(rdev); 2041 r = r600_blit_init(rdev);
1667 if (r) { 2042 if (r) {
1668 DRM_ERROR("radeon: failled blitter (%d).\n", r); 2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1669 return r; 2044 return r;
1670 } 2045 }
1671 2046
2047 rdev->accel_working = true;
1672 r = r600_startup(rdev); 2048 r = r600_startup(rdev);
1673 if (r) { 2049 if (r) {
1674 r600_suspend(rdev); 2050 r600_suspend(rdev);
@@ -1680,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
1680 if (rdev->accel_working) { 2056 if (rdev->accel_working) {
1681 r = radeon_ib_pool_init(rdev); 2057 r = radeon_ib_pool_init(rdev);
1682 if (r) { 2058 if (r) {
1683 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 2059 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1684 rdev->accel_working = false; 2060 rdev->accel_working = false;
1685 } 2061 }
1686 r = r600_ib_test(rdev); 2062 r = r600_ib_test(rdev);
1687 if (r) { 2063 if (r) {
1688 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2064 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1689 rdev->accel_working = false; 2065 rdev->accel_working = false;
1690 } 2066 }
1691 } 2067 }
@@ -1698,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
1698 r600_suspend(rdev); 2074 r600_suspend(rdev);
1699 2075
1700 r600_blit_fini(rdev); 2076 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev);
1701 radeon_ring_fini(rdev); 2079 radeon_ring_fini(rdev);
1702 r600_wb_fini(rdev); 2080 r600_wb_fini(rdev);
1703 r600_pcie_gart_fini(rdev); 2081 r600_pcie_gart_fini(rdev);
@@ -1706,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
1706 radeon_clocks_fini(rdev); 2084 radeon_clocks_fini(rdev);
1707 if (rdev->flags & RADEON_IS_AGP) 2085 if (rdev->flags & RADEON_IS_AGP)
1708 radeon_agp_fini(rdev); 2086 radeon_agp_fini(rdev);
1709 radeon_object_fini(rdev); 2087 radeon_bo_fini(rdev);
1710 radeon_atombios_fini(rdev); 2088 radeon_atombios_fini(rdev);
1711 kfree(rdev->bios); 2089 kfree(rdev->bios);
1712 rdev->bios = NULL; 2090 rdev->bios = NULL;
@@ -1792,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
1792 return r; 2170 return r;
1793} 2171}
1794 2172
2173/*
2174 * Interrupts
2175 *
2176 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2177 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2178 * writing to the ring and the GPU consuming, the GPU writes to the ring
2179 * and host consumes. As the host irq handler processes interrupts, it
2180 * increments the rptr. When the rptr catches up with the wptr, all the
2181 * current interrupts have been processed.
2182 */
2183
2184void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2185{
2186 u32 rb_bufsz;
2187
2188 /* Align ring size */
2189 rb_bufsz = drm_order(ring_size / 4);
2190 ring_size = (1 << rb_bufsz) * 4;
2191 rdev->ih.ring_size = ring_size;
2192 rdev->ih.align_mask = 4 - 1;
2193}
2194
2195static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2196{
2197 int r;
2198
2199 rdev->ih.ring_size = ring_size;
2200 /* Allocate ring buffer */
2201 if (rdev->ih.ring_obj == NULL) {
2202 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2203 true,
2204 RADEON_GEM_DOMAIN_GTT,
2205 &rdev->ih.ring_obj);
2206 if (r) {
2207 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2208 return r;
2209 }
2210 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2211 if (unlikely(r != 0))
2212 return r;
2213 r = radeon_bo_pin(rdev->ih.ring_obj,
2214 RADEON_GEM_DOMAIN_GTT,
2215 &rdev->ih.gpu_addr);
2216 if (r) {
2217 radeon_bo_unreserve(rdev->ih.ring_obj);
2218 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2219 return r;
2220 }
2221 r = radeon_bo_kmap(rdev->ih.ring_obj,
2222 (void **)&rdev->ih.ring);
2223 radeon_bo_unreserve(rdev->ih.ring_obj);
2224 if (r) {
2225 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2226 return r;
2227 }
2228 }
2229 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2230 rdev->ih.rptr = 0;
2231
2232 return 0;
2233}
2234
2235static void r600_ih_ring_fini(struct radeon_device *rdev)
2236{
2237 int r;
2238 if (rdev->ih.ring_obj) {
2239 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2240 if (likely(r == 0)) {
2241 radeon_bo_kunmap(rdev->ih.ring_obj);
2242 radeon_bo_unpin(rdev->ih.ring_obj);
2243 radeon_bo_unreserve(rdev->ih.ring_obj);
2244 }
2245 radeon_bo_unref(&rdev->ih.ring_obj);
2246 rdev->ih.ring = NULL;
2247 rdev->ih.ring_obj = NULL;
2248 }
2249}
2250
2251static void r600_rlc_stop(struct radeon_device *rdev)
2252{
2253
2254 if (rdev->family >= CHIP_RV770) {
2255 /* r7xx asics need to soft reset RLC before halting */
2256 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2257 RREG32(SRBM_SOFT_RESET);
2258 udelay(15000);
2259 WREG32(SRBM_SOFT_RESET, 0);
2260 RREG32(SRBM_SOFT_RESET);
2261 }
2262
2263 WREG32(RLC_CNTL, 0);
2264}
2265
2266static void r600_rlc_start(struct radeon_device *rdev)
2267{
2268 WREG32(RLC_CNTL, RLC_ENABLE);
2269}
2270
2271static int r600_rlc_init(struct radeon_device *rdev)
2272{
2273 u32 i;
2274 const __be32 *fw_data;
2275
2276 if (!rdev->rlc_fw)
2277 return -EINVAL;
2278
2279 r600_rlc_stop(rdev);
2280
2281 WREG32(RLC_HB_BASE, 0);
2282 WREG32(RLC_HB_CNTL, 0);
2283 WREG32(RLC_HB_RPTR, 0);
2284 WREG32(RLC_HB_WPTR, 0);
2285 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2286 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2287 WREG32(RLC_MC_CNTL, 0);
2288 WREG32(RLC_UCODE_CNTL, 0);
2289
2290 fw_data = (const __be32 *)rdev->rlc_fw->data;
2291 if (rdev->family >= CHIP_RV770) {
2292 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2293 WREG32(RLC_UCODE_ADDR, i);
2294 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2295 }
2296 } else {
2297 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2298 WREG32(RLC_UCODE_ADDR, i);
2299 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2300 }
2301 }
2302 WREG32(RLC_UCODE_ADDR, 0);
2303
2304 r600_rlc_start(rdev);
2305
2306 return 0;
2307}
2308
2309static void r600_enable_interrupts(struct radeon_device *rdev)
2310{
2311 u32 ih_cntl = RREG32(IH_CNTL);
2312 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2313
2314 ih_cntl |= ENABLE_INTR;
2315 ih_rb_cntl |= IH_RB_ENABLE;
2316 WREG32(IH_CNTL, ih_cntl);
2317 WREG32(IH_RB_CNTL, ih_rb_cntl);
2318 rdev->ih.enabled = true;
2319}
2320
2321static void r600_disable_interrupts(struct radeon_device *rdev)
2322{
2323 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2324 u32 ih_cntl = RREG32(IH_CNTL);
2325
2326 ih_rb_cntl &= ~IH_RB_ENABLE;
2327 ih_cntl &= ~ENABLE_INTR;
2328 WREG32(IH_RB_CNTL, ih_rb_cntl);
2329 WREG32(IH_CNTL, ih_cntl);
2330 /* set rptr, wptr to 0 */
2331 WREG32(IH_RB_RPTR, 0);
2332 WREG32(IH_RB_WPTR, 0);
2333 rdev->ih.enabled = false;
2334 rdev->ih.wptr = 0;
2335 rdev->ih.rptr = 0;
2336}
2337
2338static void r600_disable_interrupt_state(struct radeon_device *rdev)
2339{
2340 u32 tmp;
2341
2342 WREG32(CP_INT_CNTL, 0);
2343 WREG32(GRBM_INT_CNTL, 0);
2344 WREG32(DxMODE_INT_MASK, 0);
2345 if (ASIC_IS_DCE3(rdev)) {
2346 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2347 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2348 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2349 WREG32(DC_HPD1_INT_CONTROL, tmp);
2350 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2351 WREG32(DC_HPD2_INT_CONTROL, tmp);
2352 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2353 WREG32(DC_HPD3_INT_CONTROL, tmp);
2354 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2355 WREG32(DC_HPD4_INT_CONTROL, tmp);
2356 if (ASIC_IS_DCE32(rdev)) {
2357 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2358 WREG32(DC_HPD5_INT_CONTROL, 0);
2359 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2360 WREG32(DC_HPD6_INT_CONTROL, 0);
2361 }
2362 } else {
2363 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2364 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2365 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2366 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
2367 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2368 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
2369 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2370 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
2371 }
2372}
2373
2374int r600_irq_init(struct radeon_device *rdev)
2375{
2376 int ret = 0;
2377 int rb_bufsz;
2378 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2379
2380 /* allocate ring */
2381 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
2382 if (ret)
2383 return ret;
2384
2385 /* disable irqs */
2386 r600_disable_interrupts(rdev);
2387
2388 /* init rlc */
2389 ret = r600_rlc_init(rdev);
2390 if (ret) {
2391 r600_ih_ring_fini(rdev);
2392 return ret;
2393 }
2394
2395 /* setup interrupt control */
2396 /* set dummy read address to ring address */
2397 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2398 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2399 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2400 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2401 */
2402 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2403 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2404 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2405 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2406
2407 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2408 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2409
2410 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2411 IH_WPTR_OVERFLOW_CLEAR |
2412 (rb_bufsz << 1));
2413 /* WPTR writeback, not yet */
2414 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2415 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2416 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2417
2418 WREG32(IH_RB_CNTL, ih_rb_cntl);
2419
2420 /* set rptr, wptr to 0 */
2421 WREG32(IH_RB_RPTR, 0);
2422 WREG32(IH_RB_WPTR, 0);
2423
2424 /* Default settings for IH_CNTL (disabled at first) */
2425 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2426 /* RPTR_REARM only works if msi's are enabled */
2427 if (rdev->msi_enabled)
2428 ih_cntl |= RPTR_REARM;
2429
2430#ifdef __BIG_ENDIAN
2431 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2432#endif
2433 WREG32(IH_CNTL, ih_cntl);
2434
2435 /* force the active interrupt state to all disabled */
2436 r600_disable_interrupt_state(rdev);
2437
2438 /* enable irqs */
2439 r600_enable_interrupts(rdev);
2440
2441 return ret;
2442}
2443
2444void r600_irq_fini(struct radeon_device *rdev)
2445{
2446 r600_disable_interrupts(rdev);
2447 r600_rlc_stop(rdev);
2448 r600_ih_ring_fini(rdev);
2449}
2450
2451int r600_irq_set(struct radeon_device *rdev)
2452{
2453 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2454 u32 mode_int = 0;
2455 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2456
2457 /* don't enable anything if the ih is disabled */
2458 if (!rdev->ih.enabled)
2459 return 0;
2460
2461 if (ASIC_IS_DCE3(rdev)) {
2462 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2463 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2464 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2465 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2466 if (ASIC_IS_DCE32(rdev)) {
2467 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2468 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2469 }
2470 } else {
2471 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2472 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2473 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2474 }
2475
2476 if (rdev->irq.sw_int) {
2477 DRM_DEBUG("r600_irq_set: sw int\n");
2478 cp_int_cntl |= RB_INT_ENABLE;
2479 }
2480 if (rdev->irq.crtc_vblank_int[0]) {
2481 DRM_DEBUG("r600_irq_set: vblank 0\n");
2482 mode_int |= D1MODE_VBLANK_INT_MASK;
2483 }
2484 if (rdev->irq.crtc_vblank_int[1]) {
2485 DRM_DEBUG("r600_irq_set: vblank 1\n");
2486 mode_int |= D2MODE_VBLANK_INT_MASK;
2487 }
2488 if (rdev->irq.hpd[0]) {
2489 DRM_DEBUG("r600_irq_set: hpd 1\n");
2490 hpd1 |= DC_HPDx_INT_EN;
2491 }
2492 if (rdev->irq.hpd[1]) {
2493 DRM_DEBUG("r600_irq_set: hpd 2\n");
2494 hpd2 |= DC_HPDx_INT_EN;
2495 }
2496 if (rdev->irq.hpd[2]) {
2497 DRM_DEBUG("r600_irq_set: hpd 3\n");
2498 hpd3 |= DC_HPDx_INT_EN;
2499 }
2500 if (rdev->irq.hpd[3]) {
2501 DRM_DEBUG("r600_irq_set: hpd 4\n");
2502 hpd4 |= DC_HPDx_INT_EN;
2503 }
2504 if (rdev->irq.hpd[4]) {
2505 DRM_DEBUG("r600_irq_set: hpd 5\n");
2506 hpd5 |= DC_HPDx_INT_EN;
2507 }
2508 if (rdev->irq.hpd[5]) {
2509 DRM_DEBUG("r600_irq_set: hpd 6\n");
2510 hpd6 |= DC_HPDx_INT_EN;
2511 }
2512
2513 WREG32(CP_INT_CNTL, cp_int_cntl);
2514 WREG32(DxMODE_INT_MASK, mode_int);
2515 if (ASIC_IS_DCE3(rdev)) {
2516 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2517 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2518 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2519 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2520 if (ASIC_IS_DCE32(rdev)) {
2521 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2522 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2523 }
2524 } else {
2525 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2526 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2527 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2528 }
2529
2530 return 0;
2531}
2532
2533static inline void r600_irq_ack(struct radeon_device *rdev,
2534 u32 *disp_int,
2535 u32 *disp_int_cont,
2536 u32 *disp_int_cont2)
2537{
2538 u32 tmp;
1795 2539
2540 if (ASIC_IS_DCE3(rdev)) {
2541 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2542 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2543 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2544 } else {
2545 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2546 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2547 *disp_int_cont2 = 0;
2548 }
2549
2550 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2551 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2552 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2553 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2554 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2555 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2556 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2557 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2558 if (*disp_int & DC_HPD1_INTERRUPT) {
2559 if (ASIC_IS_DCE3(rdev)) {
2560 tmp = RREG32(DC_HPD1_INT_CONTROL);
2561 tmp |= DC_HPDx_INT_ACK;
2562 WREG32(DC_HPD1_INT_CONTROL, tmp);
2563 } else {
2564 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2565 tmp |= DC_HPDx_INT_ACK;
2566 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2567 }
2568 }
2569 if (*disp_int & DC_HPD2_INTERRUPT) {
2570 if (ASIC_IS_DCE3(rdev)) {
2571 tmp = RREG32(DC_HPD2_INT_CONTROL);
2572 tmp |= DC_HPDx_INT_ACK;
2573 WREG32(DC_HPD2_INT_CONTROL, tmp);
2574 } else {
2575 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2576 tmp |= DC_HPDx_INT_ACK;
2577 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2578 }
2579 }
2580 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2581 if (ASIC_IS_DCE3(rdev)) {
2582 tmp = RREG32(DC_HPD3_INT_CONTROL);
2583 tmp |= DC_HPDx_INT_ACK;
2584 WREG32(DC_HPD3_INT_CONTROL, tmp);
2585 } else {
2586 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2587 tmp |= DC_HPDx_INT_ACK;
2588 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2589 }
2590 }
2591 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2592 tmp = RREG32(DC_HPD4_INT_CONTROL);
2593 tmp |= DC_HPDx_INT_ACK;
2594 WREG32(DC_HPD4_INT_CONTROL, tmp);
2595 }
2596 if (ASIC_IS_DCE32(rdev)) {
2597 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2598 tmp = RREG32(DC_HPD5_INT_CONTROL);
2599 tmp |= DC_HPDx_INT_ACK;
2600 WREG32(DC_HPD5_INT_CONTROL, tmp);
2601 }
2602 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2603 tmp = RREG32(DC_HPD5_INT_CONTROL);
2604 tmp |= DC_HPDx_INT_ACK;
2605 WREG32(DC_HPD6_INT_CONTROL, tmp);
2606 }
2607 }
2608}
1796 2609
2610void r600_irq_disable(struct radeon_device *rdev)
2611{
2612 u32 disp_int, disp_int_cont, disp_int_cont2;
2613
2614 r600_disable_interrupts(rdev);
2615 /* Wait and acknowledge irq */
2616 mdelay(1);
2617 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2618 r600_disable_interrupt_state(rdev);
2619}
2620
2621static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2622{
2623 u32 wptr, tmp;
2624
2625 /* XXX use writeback */
2626 wptr = RREG32(IH_RB_WPTR);
2627
2628 if (wptr & RB_OVERFLOW) {
2629 WARN_ON(1);
2630 /* XXX deal with overflow */
2631 DRM_ERROR("IH RB overflow\n");
2632 tmp = RREG32(IH_RB_CNTL);
2633 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2634 WREG32(IH_RB_CNTL, tmp);
2635 }
2636 wptr = wptr & WPTR_OFFSET_MASK;
2637
2638 return wptr;
2639}
2640
2641/* r600 IV Ring
2642 * Each IV ring entry is 128 bits:
2643 * [7:0] - interrupt source id
2644 * [31:8] - reserved
2645 * [59:32] - interrupt source data
2646 * [127:60] - reserved
2647 *
2648 * The basic interrupt vector entries
2649 * are decoded as follows:
2650 * src_id src_data description
2651 * 1 0 D1 Vblank
2652 * 1 1 D1 Vline
2653 * 5 0 D2 Vblank
2654 * 5 1 D2 Vline
2655 * 19 0 FP Hot plug detection A
2656 * 19 1 FP Hot plug detection B
2657 * 19 2 DAC A auto-detection
2658 * 19 3 DAC B auto-detection
2659 * 176 - CP_INT RB
2660 * 177 - CP_INT IB1
2661 * 178 - CP_INT IB2
2662 * 181 - EOP Interrupt
2663 * 233 - GUI Idle
2664 *
2665 * Note, these are based on r600 and may need to be
2666 * adjusted or added to on newer asics
2667 */
2668
2669int r600_irq_process(struct radeon_device *rdev)
2670{
2671 u32 wptr = r600_get_ih_wptr(rdev);
2672 u32 rptr = rdev->ih.rptr;
2673 u32 src_id, src_data;
2674 u32 last_entry = rdev->ih.ring_size - 16;
2675 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2676 unsigned long flags;
2677 bool queue_hotplug = false;
2678
2679 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2680
2681 spin_lock_irqsave(&rdev->ih.lock, flags);
2682
2683 if (rptr == wptr) {
2684 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2685 return IRQ_NONE;
2686 }
2687 if (rdev->shutdown) {
2688 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2689 return IRQ_NONE;
2690 }
2691
2692restart_ih:
2693 /* display interrupts */
2694 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2695
2696 rdev->ih.wptr = wptr;
2697 while (rptr != wptr) {
2698 /* wptr/rptr are in bytes! */
2699 ring_index = rptr / 4;
2700 src_id = rdev->ih.ring[ring_index] & 0xff;
2701 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2702
2703 switch (src_id) {
2704 case 1: /* D1 vblank/vline */
2705 switch (src_data) {
2706 case 0: /* D1 vblank */
2707 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2708 drm_handle_vblank(rdev->ddev, 0);
2709 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2710 DRM_DEBUG("IH: D1 vblank\n");
2711 }
2712 break;
2713 case 1: /* D1 vline */
2714 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2715 disp_int &= ~LB_D1_VLINE_INTERRUPT;
2716 DRM_DEBUG("IH: D1 vline\n");
2717 }
2718 break;
2719 default:
2720 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2721 break;
2722 }
2723 break;
2724 case 5: /* D2 vblank/vline */
2725 switch (src_data) {
2726 case 0: /* D2 vblank */
2727 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2728 drm_handle_vblank(rdev->ddev, 1);
2729 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2730 DRM_DEBUG("IH: D2 vblank\n");
2731 }
2732 break;
2733 case 1: /* D1 vline */
2734 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2735 disp_int &= ~LB_D2_VLINE_INTERRUPT;
2736 DRM_DEBUG("IH: D2 vline\n");
2737 }
2738 break;
2739 default:
2740 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2741 break;
2742 }
2743 break;
2744 case 19: /* HPD/DAC hotplug */
2745 switch (src_data) {
2746 case 0:
2747 if (disp_int & DC_HPD1_INTERRUPT) {
2748 disp_int &= ~DC_HPD1_INTERRUPT;
2749 queue_hotplug = true;
2750 DRM_DEBUG("IH: HPD1\n");
2751 }
2752 break;
2753 case 1:
2754 if (disp_int & DC_HPD2_INTERRUPT) {
2755 disp_int &= ~DC_HPD2_INTERRUPT;
2756 queue_hotplug = true;
2757 DRM_DEBUG("IH: HPD2\n");
2758 }
2759 break;
2760 case 4:
2761 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2762 disp_int_cont &= ~DC_HPD3_INTERRUPT;
2763 queue_hotplug = true;
2764 DRM_DEBUG("IH: HPD3\n");
2765 }
2766 break;
2767 case 5:
2768 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2769 disp_int_cont &= ~DC_HPD4_INTERRUPT;
2770 queue_hotplug = true;
2771 DRM_DEBUG("IH: HPD4\n");
2772 }
2773 break;
2774 case 10:
2775 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2776 disp_int_cont &= ~DC_HPD5_INTERRUPT;
2777 queue_hotplug = true;
2778 DRM_DEBUG("IH: HPD5\n");
2779 }
2780 break;
2781 case 12:
2782 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2783 disp_int_cont &= ~DC_HPD6_INTERRUPT;
2784 queue_hotplug = true;
2785 DRM_DEBUG("IH: HPD6\n");
2786 }
2787 break;
2788 default:
2789 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2790 break;
2791 }
2792 break;
2793 case 176: /* CP_INT in ring buffer */
2794 case 177: /* CP_INT in IB1 */
2795 case 178: /* CP_INT in IB2 */
2796 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2797 radeon_fence_process(rdev);
2798 break;
2799 case 181: /* CP EOP event */
2800 DRM_DEBUG("IH: CP EOP\n");
2801 break;
2802 default:
2803 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2804 break;
2805 }
2806
2807 /* wptr/rptr are in bytes! */
2808 if (rptr == last_entry)
2809 rptr = 0;
2810 else
2811 rptr += 16;
2812 }
2813 /* make sure wptr hasn't changed while processing */
2814 wptr = r600_get_ih_wptr(rdev);
2815 if (wptr != rdev->ih.wptr)
2816 goto restart_ih;
2817 if (queue_hotplug)
2818 queue_work(rdev->wq, &rdev->hotplug_work);
2819 rdev->ih.rptr = rptr;
2820 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2821 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2822 return IRQ_HANDLED;
2823}
1797 2824
1798/* 2825/*
1799 * Debugfs info 2826 * Debugfs info
@@ -1805,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1805 struct drm_info_node *node = (struct drm_info_node *) m->private; 2832 struct drm_info_node *node = (struct drm_info_node *) m->private;
1806 struct drm_device *dev = node->minor->dev; 2833 struct drm_device *dev = node->minor->dev;
1807 struct radeon_device *rdev = dev->dev_private; 2834 struct radeon_device *rdev = dev->dev_private;
1808 uint32_t rdp, wdp;
1809 unsigned count, i, j; 2835 unsigned count, i, j;
1810 2836
1811 radeon_ring_free_size(rdev); 2837 radeon_ring_free_size(rdev);
1812 rdp = RREG32(CP_RB_RPTR); 2838 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1813 wdp = RREG32(CP_RB_WPTR);
1814 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1815 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); 2839 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1816 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2840 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1817 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2841 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2842 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2843 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1818 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2844 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1819 seq_printf(m, "%u dwords in ring\n", count); 2845 seq_printf(m, "%u dwords in ring\n", count);
2846 i = rdev->cp.rptr;
1820 for (j = 0; j <= count; j++) { 2847 for (j = 0; j <= count; j++) {
1821 i = (rdp + j) & rdev->cp.ptr_mask;
1822 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2848 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2849 i = (i + 1) & rdev->cp.ptr_mask;
1823 } 2850 }
1824 return 0; 2851 return 0;
1825} 2852}