aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c270
1 files changed, 206 insertions, 64 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7fb3d2e0434c..e585a3b947eb 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -99,6 +99,14 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
99 } 99 }
100} 100}
101 101
102/**
103 * dce4_wait_for_vblank - vblank wait asic callback.
104 *
105 * @rdev: radeon_device pointer
106 * @crtc: crtc to wait for vblank on
107 *
108 * Wait for vblank on the requested crtc (evergreen+).
109 */
102void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) 110void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
103{ 111{
104 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; 112 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
@@ -118,18 +126,49 @@ void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
118 } 126 }
119} 127}
120 128
129/**
130 * radeon_irq_kms_pflip_irq_get - pre-pageflip callback.
131 *
132 * @rdev: radeon_device pointer
133 * @crtc: crtc to prepare for pageflip on
134 *
135 * Pre-pageflip callback (evergreen+).
136 * Enables the pageflip irq (vblank irq).
137 */
121void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc) 138void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
122{ 139{
123 /* enable the pflip int */ 140 /* enable the pflip int */
124 radeon_irq_kms_pflip_irq_get(rdev, crtc); 141 radeon_irq_kms_pflip_irq_get(rdev, crtc);
125} 142}
126 143
144/**
145 * evergreen_post_page_flip - pos-pageflip callback.
146 *
147 * @rdev: radeon_device pointer
148 * @crtc: crtc to cleanup pageflip on
149 *
150 * Post-pageflip callback (evergreen+).
151 * Disables the pageflip irq (vblank irq).
152 */
127void evergreen_post_page_flip(struct radeon_device *rdev, int crtc) 153void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
128{ 154{
129 /* disable the pflip int */ 155 /* disable the pflip int */
130 radeon_irq_kms_pflip_irq_put(rdev, crtc); 156 radeon_irq_kms_pflip_irq_put(rdev, crtc);
131} 157}
132 158
159/**
160 * evergreen_page_flip - pageflip callback.
161 *
162 * @rdev: radeon_device pointer
163 * @crtc_id: crtc to cleanup pageflip on
164 * @crtc_base: new address of the crtc (GPU MC address)
165 *
166 * Does the actual pageflip (evergreen+).
167 * During vblank we take the crtc lock and wait for the update_pending
168 * bit to go high, when it does, we release the lock, and allow the
169 * double buffered update to take place.
170 * Returns the current update pending status.
171 */
133u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 172u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
134{ 173{
135 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 174 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -214,6 +253,15 @@ int sumo_get_temp(struct radeon_device *rdev)
214 return actual_temp * 1000; 253 return actual_temp * 1000;
215} 254}
216 255
256/**
257 * sumo_pm_init_profile - Initialize power profiles callback.
258 *
259 * @rdev: radeon_device pointer
260 *
261 * Initialize the power states used in profile mode
262 * (sumo, trinity, SI).
263 * Used for profile mode only.
264 */
217void sumo_pm_init_profile(struct radeon_device *rdev) 265void sumo_pm_init_profile(struct radeon_device *rdev)
218{ 266{
219 int idx; 267 int idx;
@@ -265,6 +313,14 @@ void sumo_pm_init_profile(struct radeon_device *rdev)
265 rdev->pm.power_state[idx].num_clock_modes - 1; 313 rdev->pm.power_state[idx].num_clock_modes - 1;
266} 314}
267 315
316/**
317 * evergreen_pm_misc - set additional pm hw parameters callback.
318 *
319 * @rdev: radeon_device pointer
320 *
321 * Set non-clock parameters associated with a power state
322 * (voltage, etc.) (evergreen+).
323 */
268void evergreen_pm_misc(struct radeon_device *rdev) 324void evergreen_pm_misc(struct radeon_device *rdev)
269{ 325{
270 int req_ps_idx = rdev->pm.requested_power_state_index; 326 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -292,6 +348,13 @@ void evergreen_pm_misc(struct radeon_device *rdev)
292 } 348 }
293} 349}
294 350
351/**
352 * evergreen_pm_prepare - pre-power state change callback.
353 *
354 * @rdev: radeon_device pointer
355 *
356 * Prepare for a power state change (evergreen+).
357 */
295void evergreen_pm_prepare(struct radeon_device *rdev) 358void evergreen_pm_prepare(struct radeon_device *rdev)
296{ 359{
297 struct drm_device *ddev = rdev->ddev; 360 struct drm_device *ddev = rdev->ddev;
@@ -310,6 +373,13 @@ void evergreen_pm_prepare(struct radeon_device *rdev)
310 } 373 }
311} 374}
312 375
376/**
377 * evergreen_pm_finish - post-power state change callback.
378 *
379 * @rdev: radeon_device pointer
380 *
381 * Clean up after a power state change (evergreen+).
382 */
313void evergreen_pm_finish(struct radeon_device *rdev) 383void evergreen_pm_finish(struct radeon_device *rdev)
314{ 384{
315 struct drm_device *ddev = rdev->ddev; 385 struct drm_device *ddev = rdev->ddev;
@@ -328,6 +398,15 @@ void evergreen_pm_finish(struct radeon_device *rdev)
328 } 398 }
329} 399}
330 400
401/**
402 * evergreen_hpd_sense - hpd sense callback.
403 *
404 * @rdev: radeon_device pointer
405 * @hpd: hpd (hotplug detect) pin
406 *
407 * Checks if a digital monitor is connected (evergreen+).
408 * Returns true if connected, false if not connected.
409 */
331bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 410bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
332{ 411{
333 bool connected = false; 412 bool connected = false;
@@ -364,6 +443,14 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
364 return connected; 443 return connected;
365} 444}
366 445
446/**
447 * evergreen_hpd_set_polarity - hpd set polarity callback.
448 *
449 * @rdev: radeon_device pointer
450 * @hpd: hpd (hotplug detect) pin
451 *
452 * Set the polarity of the hpd pin (evergreen+).
453 */
367void evergreen_hpd_set_polarity(struct radeon_device *rdev, 454void evergreen_hpd_set_polarity(struct radeon_device *rdev,
368 enum radeon_hpd_id hpd) 455 enum radeon_hpd_id hpd)
369{ 456{
@@ -424,10 +511,19 @@ void evergreen_hpd_set_polarity(struct radeon_device *rdev,
424 } 511 }
425} 512}
426 513
514/**
515 * evergreen_hpd_init - hpd setup callback.
516 *
517 * @rdev: radeon_device pointer
518 *
519 * Setup the hpd pins used by the card (evergreen+).
520 * Enable the pin, set the polarity, and enable the hpd interrupts.
521 */
427void evergreen_hpd_init(struct radeon_device *rdev) 522void evergreen_hpd_init(struct radeon_device *rdev)
428{ 523{
429 struct drm_device *dev = rdev->ddev; 524 struct drm_device *dev = rdev->ddev;
430 struct drm_connector *connector; 525 struct drm_connector *connector;
526 unsigned enabled = 0;
431 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | 527 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
432 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN; 528 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
433 529
@@ -436,73 +532,72 @@ void evergreen_hpd_init(struct radeon_device *rdev)
436 switch (radeon_connector->hpd.hpd) { 532 switch (radeon_connector->hpd.hpd) {
437 case RADEON_HPD_1: 533 case RADEON_HPD_1:
438 WREG32(DC_HPD1_CONTROL, tmp); 534 WREG32(DC_HPD1_CONTROL, tmp);
439 rdev->irq.hpd[0] = true;
440 break; 535 break;
441 case RADEON_HPD_2: 536 case RADEON_HPD_2:
442 WREG32(DC_HPD2_CONTROL, tmp); 537 WREG32(DC_HPD2_CONTROL, tmp);
443 rdev->irq.hpd[1] = true;
444 break; 538 break;
445 case RADEON_HPD_3: 539 case RADEON_HPD_3:
446 WREG32(DC_HPD3_CONTROL, tmp); 540 WREG32(DC_HPD3_CONTROL, tmp);
447 rdev->irq.hpd[2] = true;
448 break; 541 break;
449 case RADEON_HPD_4: 542 case RADEON_HPD_4:
450 WREG32(DC_HPD4_CONTROL, tmp); 543 WREG32(DC_HPD4_CONTROL, tmp);
451 rdev->irq.hpd[3] = true;
452 break; 544 break;
453 case RADEON_HPD_5: 545 case RADEON_HPD_5:
454 WREG32(DC_HPD5_CONTROL, tmp); 546 WREG32(DC_HPD5_CONTROL, tmp);
455 rdev->irq.hpd[4] = true;
456 break; 547 break;
457 case RADEON_HPD_6: 548 case RADEON_HPD_6:
458 WREG32(DC_HPD6_CONTROL, tmp); 549 WREG32(DC_HPD6_CONTROL, tmp);
459 rdev->irq.hpd[5] = true;
460 break; 550 break;
461 default: 551 default:
462 break; 552 break;
463 } 553 }
464 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); 554 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
555 enabled |= 1 << radeon_connector->hpd.hpd;
465 } 556 }
466 if (rdev->irq.installed) 557 radeon_irq_kms_enable_hpd(rdev, enabled);
467 evergreen_irq_set(rdev);
468} 558}
469 559
560/**
561 * evergreen_hpd_fini - hpd tear down callback.
562 *
563 * @rdev: radeon_device pointer
564 *
565 * Tear down the hpd pins used by the card (evergreen+).
566 * Disable the hpd interrupts.
567 */
470void evergreen_hpd_fini(struct radeon_device *rdev) 568void evergreen_hpd_fini(struct radeon_device *rdev)
471{ 569{
472 struct drm_device *dev = rdev->ddev; 570 struct drm_device *dev = rdev->ddev;
473 struct drm_connector *connector; 571 struct drm_connector *connector;
572 unsigned disabled = 0;
474 573
475 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 574 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
476 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 575 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
477 switch (radeon_connector->hpd.hpd) { 576 switch (radeon_connector->hpd.hpd) {
478 case RADEON_HPD_1: 577 case RADEON_HPD_1:
479 WREG32(DC_HPD1_CONTROL, 0); 578 WREG32(DC_HPD1_CONTROL, 0);
480 rdev->irq.hpd[0] = false;
481 break; 579 break;
482 case RADEON_HPD_2: 580 case RADEON_HPD_2:
483 WREG32(DC_HPD2_CONTROL, 0); 581 WREG32(DC_HPD2_CONTROL, 0);
484 rdev->irq.hpd[1] = false;
485 break; 582 break;
486 case RADEON_HPD_3: 583 case RADEON_HPD_3:
487 WREG32(DC_HPD3_CONTROL, 0); 584 WREG32(DC_HPD3_CONTROL, 0);
488 rdev->irq.hpd[2] = false;
489 break; 585 break;
490 case RADEON_HPD_4: 586 case RADEON_HPD_4:
491 WREG32(DC_HPD4_CONTROL, 0); 587 WREG32(DC_HPD4_CONTROL, 0);
492 rdev->irq.hpd[3] = false;
493 break; 588 break;
494 case RADEON_HPD_5: 589 case RADEON_HPD_5:
495 WREG32(DC_HPD5_CONTROL, 0); 590 WREG32(DC_HPD5_CONTROL, 0);
496 rdev->irq.hpd[4] = false;
497 break; 591 break;
498 case RADEON_HPD_6: 592 case RADEON_HPD_6:
499 WREG32(DC_HPD6_CONTROL, 0); 593 WREG32(DC_HPD6_CONTROL, 0);
500 rdev->irq.hpd[5] = false;
501 break; 594 break;
502 default: 595 default:
503 break; 596 break;
504 } 597 }
598 disabled |= 1 << radeon_connector->hpd.hpd;
505 } 599 }
600 radeon_irq_kms_disable_hpd(rdev, disabled);
506} 601}
507 602
508/* watermark setup */ 603/* watermark setup */
@@ -933,6 +1028,14 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
933 1028
934} 1029}
935 1030
1031/**
1032 * evergreen_bandwidth_update - update display watermarks callback.
1033 *
1034 * @rdev: radeon_device pointer
1035 *
1036 * Update the display watermarks based on the requested mode(s)
1037 * (evergreen+).
1038 */
936void evergreen_bandwidth_update(struct radeon_device *rdev) 1039void evergreen_bandwidth_update(struct radeon_device *rdev)
937{ 1040{
938 struct drm_display_mode *mode0 = NULL; 1041 struct drm_display_mode *mode0 = NULL;
@@ -956,6 +1059,15 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
956 } 1059 }
957} 1060}
958 1061
1062/**
1063 * evergreen_mc_wait_for_idle - wait for MC idle callback.
1064 *
1065 * @rdev: radeon_device pointer
1066 *
1067 * Wait for the MC (memory controller) to be idle.
1068 * (evergreen+).
1069 * Returns 0 if the MC is idle, -1 if not.
1070 */
959int evergreen_mc_wait_for_idle(struct radeon_device *rdev) 1071int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
960{ 1072{
961 unsigned i; 1073 unsigned i;
@@ -1371,12 +1483,28 @@ void evergreen_mc_program(struct radeon_device *rdev)
1371 */ 1483 */
1372void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1484void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1373{ 1485{
1374 struct radeon_ring *ring = &rdev->ring[ib->fence->ring]; 1486 struct radeon_ring *ring = &rdev->ring[ib->ring];
1487 u32 next_rptr;
1375 1488
1376 /* set to DX10/11 mode */ 1489 /* set to DX10/11 mode */
1377 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0)); 1490 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1378 radeon_ring_write(ring, 1); 1491 radeon_ring_write(ring, 1);
1379 /* FIXME: implement */ 1492
1493 if (ring->rptr_save_reg) {
1494 next_rptr = ring->wptr + 3 + 4;
1495 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1496 radeon_ring_write(ring, ((ring->rptr_save_reg -
1497 PACKET3_SET_CONFIG_REG_START) >> 2));
1498 radeon_ring_write(ring, next_rptr);
1499 } else if (rdev->wb.enabled) {
1500 next_rptr = ring->wptr + 5 + 4;
1501 radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
1502 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1503 radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
1504 radeon_ring_write(ring, next_rptr);
1505 radeon_ring_write(ring, 0);
1506 }
1507
1380 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1508 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1381 radeon_ring_write(ring, 1509 radeon_ring_write(ring,
1382#ifdef __BIG_ENDIAN 1510#ifdef __BIG_ENDIAN
@@ -2188,6 +2316,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2188 RREG32(GRBM_STATUS_SE1)); 2316 RREG32(GRBM_STATUS_SE1));
2189 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2317 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2190 RREG32(SRBM_STATUS)); 2318 RREG32(SRBM_STATUS));
2319 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2320 RREG32(CP_STALLED_STAT1));
2321 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2322 RREG32(CP_STALLED_STAT2));
2323 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2324 RREG32(CP_BUSY_STAT));
2325 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2326 RREG32(CP_STAT));
2191 evergreen_mc_stop(rdev, &save); 2327 evergreen_mc_stop(rdev, &save);
2192 if (evergreen_mc_wait_for_idle(rdev)) { 2328 if (evergreen_mc_wait_for_idle(rdev)) {
2193 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2329 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -2225,6 +2361,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2225 RREG32(GRBM_STATUS_SE1)); 2361 RREG32(GRBM_STATUS_SE1));
2226 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2362 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2227 RREG32(SRBM_STATUS)); 2363 RREG32(SRBM_STATUS));
2364 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2365 RREG32(CP_STALLED_STAT1));
2366 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
2367 RREG32(CP_STALLED_STAT2));
2368 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
2369 RREG32(CP_BUSY_STAT));
2370 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2371 RREG32(CP_STAT));
2228 evergreen_mc_resume(rdev, &save); 2372 evergreen_mc_resume(rdev, &save);
2229 return 0; 2373 return 0;
2230} 2374}
@@ -2348,20 +2492,20 @@ int evergreen_irq_set(struct radeon_device *rdev)
2348 2492
2349 if (rdev->family >= CHIP_CAYMAN) { 2493 if (rdev->family >= CHIP_CAYMAN) {
2350 /* enable CP interrupts on all rings */ 2494 /* enable CP interrupts on all rings */
2351 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 2495 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2352 DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); 2496 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2353 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2497 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
2354 } 2498 }
2355 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) { 2499 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
2356 DRM_DEBUG("evergreen_irq_set: sw int cp1\n"); 2500 DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
2357 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE; 2501 cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
2358 } 2502 }
2359 if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) { 2503 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
2360 DRM_DEBUG("evergreen_irq_set: sw int cp2\n"); 2504 DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
2361 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; 2505 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
2362 } 2506 }
2363 } else { 2507 } else {
2364 if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) { 2508 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
2365 DRM_DEBUG("evergreen_irq_set: sw int gfx\n"); 2509 DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
2366 cp_int_cntl |= RB_INT_ENABLE; 2510 cp_int_cntl |= RB_INT_ENABLE;
2367 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 2511 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -2369,32 +2513,32 @@ int evergreen_irq_set(struct radeon_device *rdev)
2369 } 2513 }
2370 2514
2371 if (rdev->irq.crtc_vblank_int[0] || 2515 if (rdev->irq.crtc_vblank_int[0] ||
2372 rdev->irq.pflip[0]) { 2516 atomic_read(&rdev->irq.pflip[0])) {
2373 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2517 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2374 crtc1 |= VBLANK_INT_MASK; 2518 crtc1 |= VBLANK_INT_MASK;
2375 } 2519 }
2376 if (rdev->irq.crtc_vblank_int[1] || 2520 if (rdev->irq.crtc_vblank_int[1] ||
2377 rdev->irq.pflip[1]) { 2521 atomic_read(&rdev->irq.pflip[1])) {
2378 DRM_DEBUG("evergreen_irq_set: vblank 1\n"); 2522 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2379 crtc2 |= VBLANK_INT_MASK; 2523 crtc2 |= VBLANK_INT_MASK;
2380 } 2524 }
2381 if (rdev->irq.crtc_vblank_int[2] || 2525 if (rdev->irq.crtc_vblank_int[2] ||
2382 rdev->irq.pflip[2]) { 2526 atomic_read(&rdev->irq.pflip[2])) {
2383 DRM_DEBUG("evergreen_irq_set: vblank 2\n"); 2527 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2384 crtc3 |= VBLANK_INT_MASK; 2528 crtc3 |= VBLANK_INT_MASK;
2385 } 2529 }
2386 if (rdev->irq.crtc_vblank_int[3] || 2530 if (rdev->irq.crtc_vblank_int[3] ||
2387 rdev->irq.pflip[3]) { 2531 atomic_read(&rdev->irq.pflip[3])) {
2388 DRM_DEBUG("evergreen_irq_set: vblank 3\n"); 2532 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2389 crtc4 |= VBLANK_INT_MASK; 2533 crtc4 |= VBLANK_INT_MASK;
2390 } 2534 }
2391 if (rdev->irq.crtc_vblank_int[4] || 2535 if (rdev->irq.crtc_vblank_int[4] ||
2392 rdev->irq.pflip[4]) { 2536 atomic_read(&rdev->irq.pflip[4])) {
2393 DRM_DEBUG("evergreen_irq_set: vblank 4\n"); 2537 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2394 crtc5 |= VBLANK_INT_MASK; 2538 crtc5 |= VBLANK_INT_MASK;
2395 } 2539 }
2396 if (rdev->irq.crtc_vblank_int[5] || 2540 if (rdev->irq.crtc_vblank_int[5] ||
2397 rdev->irq.pflip[5]) { 2541 atomic_read(&rdev->irq.pflip[5])) {
2398 DRM_DEBUG("evergreen_irq_set: vblank 5\n"); 2542 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2399 crtc6 |= VBLANK_INT_MASK; 2543 crtc6 |= VBLANK_INT_MASK;
2400 } 2544 }
@@ -2676,7 +2820,6 @@ int evergreen_irq_process(struct radeon_device *rdev)
2676 u32 rptr; 2820 u32 rptr;
2677 u32 src_id, src_data; 2821 u32 src_id, src_data;
2678 u32 ring_index; 2822 u32 ring_index;
2679 unsigned long flags;
2680 bool queue_hotplug = false; 2823 bool queue_hotplug = false;
2681 bool queue_hdmi = false; 2824 bool queue_hdmi = false;
2682 2825
@@ -2684,22 +2827,21 @@ int evergreen_irq_process(struct radeon_device *rdev)
2684 return IRQ_NONE; 2827 return IRQ_NONE;
2685 2828
2686 wptr = evergreen_get_ih_wptr(rdev); 2829 wptr = evergreen_get_ih_wptr(rdev);
2830
2831restart_ih:
2832 /* is somebody else already processing irqs? */
2833 if (atomic_xchg(&rdev->ih.lock, 1))
2834 return IRQ_NONE;
2835
2687 rptr = rdev->ih.rptr; 2836 rptr = rdev->ih.rptr;
2688 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2837 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2689 2838
2690 spin_lock_irqsave(&rdev->ih.lock, flags);
2691 if (rptr == wptr) {
2692 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2693 return IRQ_NONE;
2694 }
2695restart_ih:
2696 /* Order reading of wptr vs. reading of IH ring data */ 2839 /* Order reading of wptr vs. reading of IH ring data */
2697 rmb(); 2840 rmb();
2698 2841
2699 /* display interrupts */ 2842 /* display interrupts */
2700 evergreen_irq_ack(rdev); 2843 evergreen_irq_ack(rdev);
2701 2844
2702 rdev->ih.wptr = wptr;
2703 while (rptr != wptr) { 2845 while (rptr != wptr) {
2704 /* wptr/rptr are in bytes! */ 2846 /* wptr/rptr are in bytes! */
2705 ring_index = rptr / 4; 2847 ring_index = rptr / 4;
@@ -2716,7 +2858,7 @@ restart_ih:
2716 rdev->pm.vblank_sync = true; 2858 rdev->pm.vblank_sync = true;
2717 wake_up(&rdev->irq.vblank_queue); 2859 wake_up(&rdev->irq.vblank_queue);
2718 } 2860 }
2719 if (rdev->irq.pflip[0]) 2861 if (atomic_read(&rdev->irq.pflip[0]))
2720 radeon_crtc_handle_flip(rdev, 0); 2862 radeon_crtc_handle_flip(rdev, 0);
2721 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2863 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2722 DRM_DEBUG("IH: D1 vblank\n"); 2864 DRM_DEBUG("IH: D1 vblank\n");
@@ -2742,7 +2884,7 @@ restart_ih:
2742 rdev->pm.vblank_sync = true; 2884 rdev->pm.vblank_sync = true;
2743 wake_up(&rdev->irq.vblank_queue); 2885 wake_up(&rdev->irq.vblank_queue);
2744 } 2886 }
2745 if (rdev->irq.pflip[1]) 2887 if (atomic_read(&rdev->irq.pflip[1]))
2746 radeon_crtc_handle_flip(rdev, 1); 2888 radeon_crtc_handle_flip(rdev, 1);
2747 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; 2889 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
2748 DRM_DEBUG("IH: D2 vblank\n"); 2890 DRM_DEBUG("IH: D2 vblank\n");
@@ -2768,7 +2910,7 @@ restart_ih:
2768 rdev->pm.vblank_sync = true; 2910 rdev->pm.vblank_sync = true;
2769 wake_up(&rdev->irq.vblank_queue); 2911 wake_up(&rdev->irq.vblank_queue);
2770 } 2912 }
2771 if (rdev->irq.pflip[2]) 2913 if (atomic_read(&rdev->irq.pflip[2]))
2772 radeon_crtc_handle_flip(rdev, 2); 2914 radeon_crtc_handle_flip(rdev, 2);
2773 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; 2915 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
2774 DRM_DEBUG("IH: D3 vblank\n"); 2916 DRM_DEBUG("IH: D3 vblank\n");
@@ -2794,7 +2936,7 @@ restart_ih:
2794 rdev->pm.vblank_sync = true; 2936 rdev->pm.vblank_sync = true;
2795 wake_up(&rdev->irq.vblank_queue); 2937 wake_up(&rdev->irq.vblank_queue);
2796 } 2938 }
2797 if (rdev->irq.pflip[3]) 2939 if (atomic_read(&rdev->irq.pflip[3]))
2798 radeon_crtc_handle_flip(rdev, 3); 2940 radeon_crtc_handle_flip(rdev, 3);
2799 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; 2941 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
2800 DRM_DEBUG("IH: D4 vblank\n"); 2942 DRM_DEBUG("IH: D4 vblank\n");
@@ -2820,7 +2962,7 @@ restart_ih:
2820 rdev->pm.vblank_sync = true; 2962 rdev->pm.vblank_sync = true;
2821 wake_up(&rdev->irq.vblank_queue); 2963 wake_up(&rdev->irq.vblank_queue);
2822 } 2964 }
2823 if (rdev->irq.pflip[4]) 2965 if (atomic_read(&rdev->irq.pflip[4]))
2824 radeon_crtc_handle_flip(rdev, 4); 2966 radeon_crtc_handle_flip(rdev, 4);
2825 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; 2967 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
2826 DRM_DEBUG("IH: D5 vblank\n"); 2968 DRM_DEBUG("IH: D5 vblank\n");
@@ -2846,7 +2988,7 @@ restart_ih:
2846 rdev->pm.vblank_sync = true; 2988 rdev->pm.vblank_sync = true;
2847 wake_up(&rdev->irq.vblank_queue); 2989 wake_up(&rdev->irq.vblank_queue);
2848 } 2990 }
2849 if (rdev->irq.pflip[5]) 2991 if (atomic_read(&rdev->irq.pflip[5]))
2850 radeon_crtc_handle_flip(rdev, 5); 2992 radeon_crtc_handle_flip(rdev, 5);
2851 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; 2993 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
2852 DRM_DEBUG("IH: D6 vblank\n"); 2994 DRM_DEBUG("IH: D6 vblank\n");
@@ -2986,7 +3128,6 @@ restart_ih:
2986 break; 3128 break;
2987 case 233: /* GUI IDLE */ 3129 case 233: /* GUI IDLE */
2988 DRM_DEBUG("IH: GUI idle\n"); 3130 DRM_DEBUG("IH: GUI idle\n");
2989 rdev->pm.gui_idle = true;
2990 wake_up(&rdev->irq.idle_queue); 3131 wake_up(&rdev->irq.idle_queue);
2991 break; 3132 break;
2992 default: 3133 default:
@@ -2998,17 +3139,19 @@ restart_ih:
2998 rptr += 16; 3139 rptr += 16;
2999 rptr &= rdev->ih.ptr_mask; 3140 rptr &= rdev->ih.ptr_mask;
3000 } 3141 }
3001 /* make sure wptr hasn't changed while processing */
3002 wptr = evergreen_get_ih_wptr(rdev);
3003 if (wptr != rdev->ih.wptr)
3004 goto restart_ih;
3005 if (queue_hotplug) 3142 if (queue_hotplug)
3006 schedule_work(&rdev->hotplug_work); 3143 schedule_work(&rdev->hotplug_work);
3007 if (queue_hdmi) 3144 if (queue_hdmi)
3008 schedule_work(&rdev->audio_work); 3145 schedule_work(&rdev->audio_work);
3009 rdev->ih.rptr = rptr; 3146 rdev->ih.rptr = rptr;
3010 WREG32(IH_RB_RPTR, rdev->ih.rptr); 3147 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3011 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3148 atomic_set(&rdev->ih.lock, 0);
3149
3150 /* make sure wptr hasn't changed while processing */
3151 wptr = evergreen_get_ih_wptr(rdev);
3152 if (wptr != rptr)
3153 goto restart_ih;
3154
3012 return IRQ_HANDLED; 3155 return IRQ_HANDLED;
3013} 3156}
3014 3157
@@ -3096,13 +3239,11 @@ static int evergreen_startup(struct radeon_device *rdev)
3096 if (r) 3239 if (r)
3097 return r; 3240 return r;
3098 3241
3099 r = radeon_ib_pool_start(rdev); 3242 r = radeon_ib_pool_init(rdev);
3100 if (r) 3243 if (r) {
3101 return r; 3244 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3102
3103 r = radeon_ib_ring_tests(rdev);
3104 if (r)
3105 return r; 3245 return r;
3246 }
3106 3247
3107 r = r600_audio_init(rdev); 3248 r = r600_audio_init(rdev);
3108 if (r) { 3249 if (r) {
@@ -3146,9 +3287,6 @@ int evergreen_suspend(struct radeon_device *rdev)
3146 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3287 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3147 3288
3148 r600_audio_fini(rdev); 3289 r600_audio_fini(rdev);
3149 /* FIXME: we should wait for ring to be empty */
3150 radeon_ib_pool_suspend(rdev);
3151 r600_blit_suspend(rdev);
3152 r700_cp_stop(rdev); 3290 r700_cp_stop(rdev);
3153 ring->ready = false; 3291 ring->ready = false;
3154 evergreen_irq_suspend(rdev); 3292 evergreen_irq_suspend(rdev);
@@ -3234,20 +3372,14 @@ int evergreen_init(struct radeon_device *rdev)
3234 if (r) 3372 if (r)
3235 return r; 3373 return r;
3236 3374
3237 r = radeon_ib_pool_init(rdev);
3238 rdev->accel_working = true; 3375 rdev->accel_working = true;
3239 if (r) {
3240 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3241 rdev->accel_working = false;
3242 }
3243
3244 r = evergreen_startup(rdev); 3376 r = evergreen_startup(rdev);
3245 if (r) { 3377 if (r) {
3246 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3378 dev_err(rdev->dev, "disabling GPU acceleration\n");
3247 r700_cp_fini(rdev); 3379 r700_cp_fini(rdev);
3248 r600_irq_fini(rdev); 3380 r600_irq_fini(rdev);
3249 radeon_wb_fini(rdev); 3381 radeon_wb_fini(rdev);
3250 r100_ib_fini(rdev); 3382 radeon_ib_pool_fini(rdev);
3251 radeon_irq_kms_fini(rdev); 3383 radeon_irq_kms_fini(rdev);
3252 evergreen_pcie_gart_fini(rdev); 3384 evergreen_pcie_gart_fini(rdev);
3253 rdev->accel_working = false; 3385 rdev->accel_working = false;
@@ -3274,7 +3406,7 @@ void evergreen_fini(struct radeon_device *rdev)
3274 r700_cp_fini(rdev); 3406 r700_cp_fini(rdev);
3275 r600_irq_fini(rdev); 3407 r600_irq_fini(rdev);
3276 radeon_wb_fini(rdev); 3408 radeon_wb_fini(rdev);
3277 r100_ib_fini(rdev); 3409 radeon_ib_pool_fini(rdev);
3278 radeon_irq_kms_fini(rdev); 3410 radeon_irq_kms_fini(rdev);
3279 evergreen_pcie_gart_fini(rdev); 3411 evergreen_pcie_gart_fini(rdev);
3280 r600_vram_scratch_fini(rdev); 3412 r600_vram_scratch_fini(rdev);
@@ -3289,7 +3421,8 @@ void evergreen_fini(struct radeon_device *rdev)
3289 3421
3290void evergreen_pcie_gen2_enable(struct radeon_device *rdev) 3422void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3291{ 3423{
3292 u32 link_width_cntl, speed_cntl; 3424 u32 link_width_cntl, speed_cntl, mask;
3425 int ret;
3293 3426
3294 if (radeon_pcie_gen2 == 0) 3427 if (radeon_pcie_gen2 == 0)
3295 return; 3428 return;
@@ -3304,6 +3437,15 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
3304 if (ASIC_IS_X2(rdev)) 3437 if (ASIC_IS_X2(rdev))
3305 return; 3438 return;
3306 3439
3440 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
3441 if (ret != 0)
3442 return;
3443
3444 if (!(mask & DRM_PCIE_SPEED_50))
3445 return;
3446
3447 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
3448
3307 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); 3449 speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3308 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || 3450 if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
3309 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { 3451 (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {