aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/rs600.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/rs600.c')
-rw-r--r--drivers/gpu/drm/radeon/rs600.c231
1 files changed, 224 insertions, 7 deletions
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index a81bc7a21e14..79887cac5b54 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,135 @@
46void rs600_gpu_init(struct radeon_device *rdev); 46void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 47int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48 48
49void rs600_pm_misc(struct radeon_device *rdev)
50{
51 int requested_index = rdev->pm.requested_power_state_index;
52 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
53 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
54 u32 tmp, dyn_pwrmgt_sclk_length, dyn_sclk_vol_cntl;
55 u32 hdp_dyn_cntl, /*mc_host_dyn_cntl,*/ dyn_backbias_cntl;
56
57 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
58 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
59 tmp = RREG32(voltage->gpio.reg);
60 if (voltage->active_high)
61 tmp |= voltage->gpio.mask;
62 else
63 tmp &= ~(voltage->gpio.mask);
64 WREG32(voltage->gpio.reg, tmp);
65 if (voltage->delay)
66 udelay(voltage->delay);
67 } else {
68 tmp = RREG32(voltage->gpio.reg);
69 if (voltage->active_high)
70 tmp &= ~voltage->gpio.mask;
71 else
72 tmp |= voltage->gpio.mask;
73 WREG32(voltage->gpio.reg, tmp);
74 if (voltage->delay)
75 udelay(voltage->delay);
76 }
77 }
78
79 dyn_pwrmgt_sclk_length = RREG32_PLL(DYN_PWRMGT_SCLK_LENGTH);
80 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_HILEN(0xf);
81 dyn_pwrmgt_sclk_length &= ~REDUCED_POWER_SCLK_LOLEN(0xf);
82 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
83 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2) {
84 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(2);
85 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(2);
86 } else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4) {
87 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(4);
88 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(4);
89 }
90 } else {
91 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_HILEN(1);
92 dyn_pwrmgt_sclk_length |= REDUCED_POWER_SCLK_LOLEN(1);
93 }
94 WREG32_PLL(DYN_PWRMGT_SCLK_LENGTH, dyn_pwrmgt_sclk_length);
95
96 dyn_sclk_vol_cntl = RREG32_PLL(DYN_SCLK_VOL_CNTL);
97 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
98 dyn_sclk_vol_cntl |= IO_CG_VOLTAGE_DROP;
99 if (voltage->delay) {
100 dyn_sclk_vol_cntl |= VOLTAGE_DROP_SYNC;
101 dyn_sclk_vol_cntl |= VOLTAGE_DELAY_SEL(voltage->delay);
102 } else
103 dyn_sclk_vol_cntl &= ~VOLTAGE_DROP_SYNC;
104 } else
105 dyn_sclk_vol_cntl &= ~IO_CG_VOLTAGE_DROP;
106 WREG32_PLL(DYN_SCLK_VOL_CNTL, dyn_sclk_vol_cntl);
107
108 hdp_dyn_cntl = RREG32_PLL(HDP_DYN_CNTL);
109 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
110 hdp_dyn_cntl &= ~HDP_FORCEON;
111 else
112 hdp_dyn_cntl |= HDP_FORCEON;
113 WREG32_PLL(HDP_DYN_CNTL, hdp_dyn_cntl);
114#if 0
115 /* mc_host_dyn seems to cause hangs from time to time */
116 mc_host_dyn_cntl = RREG32_PLL(MC_HOST_DYN_CNTL);
117 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN)
118 mc_host_dyn_cntl &= ~MC_HOST_FORCEON;
119 else
120 mc_host_dyn_cntl |= MC_HOST_FORCEON;
121 WREG32_PLL(MC_HOST_DYN_CNTL, mc_host_dyn_cntl);
122#endif
123 dyn_backbias_cntl = RREG32_PLL(DYN_BACKBIAS_CNTL);
124 if (ps->misc & ATOM_PM_MISCINFO2_DYNAMIC_BACK_BIAS_EN)
125 dyn_backbias_cntl |= IO_CG_BACKBIAS_EN;
126 else
127 dyn_backbias_cntl &= ~IO_CG_BACKBIAS_EN;
128 WREG32_PLL(DYN_BACKBIAS_CNTL, dyn_backbias_cntl);
129
130 /* set pcie lanes */
131 if ((rdev->flags & RADEON_IS_PCIE) &&
132 !(rdev->flags & RADEON_IS_IGP) &&
133 rdev->asic->set_pcie_lanes &&
134 (ps->pcie_lanes !=
135 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
136 radeon_set_pcie_lanes(rdev,
137 ps->pcie_lanes);
138 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
139 }
140}
141
142void rs600_pm_prepare(struct radeon_device *rdev)
143{
144 struct drm_device *ddev = rdev->ddev;
145 struct drm_crtc *crtc;
146 struct radeon_crtc *radeon_crtc;
147 u32 tmp;
148
149 /* disable any active CRTCs */
150 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
151 radeon_crtc = to_radeon_crtc(crtc);
152 if (radeon_crtc->enabled) {
153 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
154 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
155 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
156 }
157 }
158}
159
160void rs600_pm_finish(struct radeon_device *rdev)
161{
162 struct drm_device *ddev = rdev->ddev;
163 struct drm_crtc *crtc;
164 struct radeon_crtc *radeon_crtc;
165 u32 tmp;
166
167 /* enable any active CRTCs */
168 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
169 radeon_crtc = to_radeon_crtc(crtc);
170 if (radeon_crtc->enabled) {
171 tmp = RREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset);
172 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
173 WREG32(AVIVO_D1CRTC_CONTROL + radeon_crtc->crtc_offset, tmp);
174 }
175 }
176}
177
49/* hpd for digital panel detect/disconnect */ 178/* hpd for digital panel detect/disconnect */
50bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 179bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
51{ 180{
@@ -147,6 +276,78 @@ void rs600_hpd_fini(struct radeon_device *rdev)
147 } 276 }
148} 277}
149 278
279void rs600_bm_disable(struct radeon_device *rdev)
280{
281 u32 tmp;
282
283 /* disable bus mastering */
284 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
285 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
286 mdelay(1);
287}
288
289int rs600_asic_reset(struct radeon_device *rdev)
290{
291 u32 status, tmp;
292
293 struct rv515_mc_save save;
294
295 /* Stops all mc clients */
296 rv515_mc_stop(rdev, &save);
297 status = RREG32(R_000E40_RBBM_STATUS);
298 if (!G_000E40_GUI_ACTIVE(status)) {
299 return 0;
300 }
301 status = RREG32(R_000E40_RBBM_STATUS);
302 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
303 /* stop CP */
304 WREG32(RADEON_CP_CSQ_CNTL, 0);
305 tmp = RREG32(RADEON_CP_RB_CNTL);
306 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
307 WREG32(RADEON_CP_RB_RPTR_WR, 0);
308 WREG32(RADEON_CP_RB_WPTR, 0);
309 WREG32(RADEON_CP_RB_CNTL, tmp);
310 pci_save_state(rdev->pdev);
311 /* disable bus mastering */
312 rs600_bm_disable(rdev);
313 /* reset GA+VAP */
314 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
315 S_0000F0_SOFT_RESET_GA(1));
316 RREG32(R_0000F0_RBBM_SOFT_RESET);
317 mdelay(500);
318 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
319 mdelay(1);
320 status = RREG32(R_000E40_RBBM_STATUS);
321 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
322 /* reset CP */
323 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
324 RREG32(R_0000F0_RBBM_SOFT_RESET);
325 mdelay(500);
326 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
327 mdelay(1);
328 status = RREG32(R_000E40_RBBM_STATUS);
329 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
330 /* reset MC */
331 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_MC(1));
332 RREG32(R_0000F0_RBBM_SOFT_RESET);
333 mdelay(500);
334 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
335 mdelay(1);
336 status = RREG32(R_000E40_RBBM_STATUS);
337 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
338 /* restore PCI & busmastering */
339 pci_restore_state(rdev->pdev);
340 /* Check if GPU is idle */
341 if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
342 dev_err(rdev->dev, "failed to reset GPU\n");
343 rdev->gpu_lockup = true;
344 return -1;
345 }
346 rv515_mc_resume(rdev, &save);
347 dev_info(rdev->dev, "GPU reset succeed\n");
348 return 0;
349}
350
150/* 351/*
151 * GART. 352 * GART.
152 */ 353 */
@@ -310,6 +511,9 @@ int rs600_irq_set(struct radeon_device *rdev)
310 if (rdev->irq.sw_int) { 511 if (rdev->irq.sw_int) {
311 tmp |= S_000040_SW_INT_EN(1); 512 tmp |= S_000040_SW_INT_EN(1);
312 } 513 }
514 if (rdev->irq.gui_idle) {
515 tmp |= S_000040_GUI_IDLE(1);
516 }
313 if (rdev->irq.crtc_vblank_int[0]) { 517 if (rdev->irq.crtc_vblank_int[0]) {
314 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); 518 mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
315 } 519 }
@@ -332,9 +536,15 @@ int rs600_irq_set(struct radeon_device *rdev)
332static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) 536static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
333{ 537{
334 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 538 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
335 uint32_t irq_mask = ~C_000044_SW_INT; 539 uint32_t irq_mask = S_000044_SW_INT(1);
336 u32 tmp; 540 u32 tmp;
337 541
542 /* the interrupt works, but the status bit is permanently asserted */
543 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
544 if (!rdev->irq.gui_idle_acked)
545 irq_mask |= S_000044_GUI_IDLE_STAT(1);
546 }
547
338 if (G_000044_DISPLAY_INT_STAT(irqs)) { 548 if (G_000044_DISPLAY_INT_STAT(irqs)) {
339 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 549 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
340 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { 550 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
@@ -382,6 +592,9 @@ int rs600_irq_process(struct radeon_device *rdev)
382 uint32_t r500_disp_int; 592 uint32_t r500_disp_int;
383 bool queue_hotplug = false; 593 bool queue_hotplug = false;
384 594
595 /* reset gui idle ack. the status bit is broken */
596 rdev->irq.gui_idle_acked = false;
597
385 status = rs600_irq_ack(rdev, &r500_disp_int); 598 status = rs600_irq_ack(rdev, &r500_disp_int);
386 if (!status && !r500_disp_int) { 599 if (!status && !r500_disp_int) {
387 return IRQ_NONE; 600 return IRQ_NONE;
@@ -390,6 +603,12 @@ int rs600_irq_process(struct radeon_device *rdev)
390 /* SW interrupt */ 603 /* SW interrupt */
391 if (G_000044_SW_INT(status)) 604 if (G_000044_SW_INT(status))
392 radeon_fence_process(rdev); 605 radeon_fence_process(rdev);
606 /* GUI idle */
607 if (G_000040_GUI_IDLE(status)) {
608 rdev->irq.gui_idle_acked = true;
609 rdev->pm.gui_idle = true;
610 wake_up(&rdev->irq.idle_queue);
611 }
393 /* Vertical blank interrupts */ 612 /* Vertical blank interrupts */
394 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 613 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
395 drm_handle_vblank(rdev->ddev, 0); 614 drm_handle_vblank(rdev->ddev, 0);
@@ -411,6 +630,8 @@ int rs600_irq_process(struct radeon_device *rdev)
411 } 630 }
412 status = rs600_irq_ack(rdev, &r500_disp_int); 631 status = rs600_irq_ack(rdev, &r500_disp_int);
413 } 632 }
633 /* reset gui idle ack. the status bit is broken */
634 rdev->irq.gui_idle_acked = false;
414 if (queue_hotplug) 635 if (queue_hotplug)
415 queue_work(rdev->wq, &rdev->hotplug_work); 636 queue_work(rdev->wq, &rdev->hotplug_work);
416 if (rdev->msi_enabled) { 637 if (rdev->msi_enabled) {
@@ -454,7 +675,6 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
454 675
455void rs600_gpu_init(struct radeon_device *rdev) 676void rs600_gpu_init(struct radeon_device *rdev)
456{ 677{
457 r100_hdp_reset(rdev);
458 r420_pipes_init(rdev); 678 r420_pipes_init(rdev);
459 /* Wait for mc idle */ 679 /* Wait for mc idle */
460 if (rs600_mc_wait_for_idle(rdev)) 680 if (rs600_mc_wait_for_idle(rdev))
@@ -601,7 +821,7 @@ int rs600_resume(struct radeon_device *rdev)
601 /* Resume clock before doing reset */ 821 /* Resume clock before doing reset */
602 rv515_clock_startup(rdev); 822 rv515_clock_startup(rdev);
603 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 823 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
604 if (radeon_gpu_reset(rdev)) { 824 if (radeon_asic_reset(rdev)) {
605 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 825 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
606 RREG32(R_000E40_RBBM_STATUS), 826 RREG32(R_000E40_RBBM_STATUS),
607 RREG32(R_0007C0_CP_STAT)); 827 RREG32(R_0007C0_CP_STAT));
@@ -626,7 +846,6 @@ int rs600_suspend(struct radeon_device *rdev)
626 846
627void rs600_fini(struct radeon_device *rdev) 847void rs600_fini(struct radeon_device *rdev)
628{ 848{
629 radeon_pm_fini(rdev);
630 r100_cp_fini(rdev); 849 r100_cp_fini(rdev);
631 r100_wb_fini(rdev); 850 r100_wb_fini(rdev);
632 r100_ib_fini(rdev); 851 r100_ib_fini(rdev);
@@ -664,7 +883,7 @@ int rs600_init(struct radeon_device *rdev)
664 return -EINVAL; 883 return -EINVAL;
665 } 884 }
666 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 885 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
667 if (radeon_gpu_reset(rdev)) { 886 if (radeon_asic_reset(rdev)) {
668 dev_warn(rdev->dev, 887 dev_warn(rdev->dev,
669 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 888 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
670 RREG32(R_000E40_RBBM_STATUS), 889 RREG32(R_000E40_RBBM_STATUS),
@@ -676,8 +895,6 @@ int rs600_init(struct radeon_device *rdev)
676 895
677 /* Initialize clocks */ 896 /* Initialize clocks */
678 radeon_get_clock_info(rdev->ddev); 897 radeon_get_clock_info(rdev->ddev);
679 /* Initialize power management */
680 radeon_pm_init(rdev);
681 /* initialize memory controller */ 898 /* initialize memory controller */
682 rs600_mc_init(rdev); 899 rs600_mc_init(rdev);
683 rs600_debugfs(rdev); 900 rs600_debugfs(rdev);