aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/r100.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r--drivers/gpu/drm/radeon/r100.c729
1 files changed, 517 insertions, 212 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cf60c0b3ef15..cc004b05d63e 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -37,6 +37,7 @@
37#include "rs100d.h" 37#include "rs100d.h"
38#include "rv200d.h" 38#include "rv200d.h"
39#include "rv250d.h" 39#include "rv250d.h"
40#include "atom.h"
40 41
41#include <linux/firmware.h> 42#include <linux/firmware.h>
42#include <linux/platform_device.h> 43#include <linux/platform_device.h>
@@ -67,6 +68,264 @@ MODULE_FIRMWARE(FIRMWARE_R520);
67 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 68 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
68 */ 69 */
69 70
71void r100_pm_get_dynpm_state(struct radeon_device *rdev)
72{
73 int i;
74 rdev->pm.dynpm_can_upclock = true;
75 rdev->pm.dynpm_can_downclock = true;
76
77 switch (rdev->pm.dynpm_planned_action) {
78 case DYNPM_ACTION_MINIMUM:
79 rdev->pm.requested_power_state_index = 0;
80 rdev->pm.dynpm_can_downclock = false;
81 break;
82 case DYNPM_ACTION_DOWNCLOCK:
83 if (rdev->pm.current_power_state_index == 0) {
84 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
85 rdev->pm.dynpm_can_downclock = false;
86 } else {
87 if (rdev->pm.active_crtc_count > 1) {
88 for (i = 0; i < rdev->pm.num_power_states; i++) {
89 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
90 continue;
91 else if (i >= rdev->pm.current_power_state_index) {
92 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
93 break;
94 } else {
95 rdev->pm.requested_power_state_index = i;
96 break;
97 }
98 }
99 } else
100 rdev->pm.requested_power_state_index =
101 rdev->pm.current_power_state_index - 1;
102 }
103 /* don't use the power state if crtcs are active and no display flag is set */
104 if ((rdev->pm.active_crtc_count > 0) &&
105 (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
106 RADEON_PM_MODE_NO_DISPLAY)) {
107 rdev->pm.requested_power_state_index++;
108 }
109 break;
110 case DYNPM_ACTION_UPCLOCK:
111 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
112 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
113 rdev->pm.dynpm_can_upclock = false;
114 } else {
115 if (rdev->pm.active_crtc_count > 1) {
116 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
117 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
118 continue;
119 else if (i <= rdev->pm.current_power_state_index) {
120 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
121 break;
122 } else {
123 rdev->pm.requested_power_state_index = i;
124 break;
125 }
126 }
127 } else
128 rdev->pm.requested_power_state_index =
129 rdev->pm.current_power_state_index + 1;
130 }
131 break;
132 case DYNPM_ACTION_DEFAULT:
133 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
134 rdev->pm.dynpm_can_upclock = false;
135 break;
136 case DYNPM_ACTION_NONE:
137 default:
138 DRM_ERROR("Requested mode for not defined action\n");
139 return;
140 }
141 /* only one clock mode per power state */
142 rdev->pm.requested_clock_mode_index = 0;
143
144 DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
145 rdev->pm.power_state[rdev->pm.requested_power_state_index].
146 clock_info[rdev->pm.requested_clock_mode_index].sclk,
147 rdev->pm.power_state[rdev->pm.requested_power_state_index].
148 clock_info[rdev->pm.requested_clock_mode_index].mclk,
149 rdev->pm.power_state[rdev->pm.requested_power_state_index].
150 pcie_lanes);
151}
152
153void r100_pm_init_profile(struct radeon_device *rdev)
154{
155 /* default */
156 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
157 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
158 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
159 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
160 /* low sh */
161 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
162 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
163 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
164 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
165 /* high sh */
166 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
167 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
168 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
169 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
170 /* low mh */
171 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
172 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
173 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
174 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
175 /* high mh */
176 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
177 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
178 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
179 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
180}
181
182void r100_pm_misc(struct radeon_device *rdev)
183{
184 int requested_index = rdev->pm.requested_power_state_index;
185 struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
186 struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
187 u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
188
189 if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
190 if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
191 tmp = RREG32(voltage->gpio.reg);
192 if (voltage->active_high)
193 tmp |= voltage->gpio.mask;
194 else
195 tmp &= ~(voltage->gpio.mask);
196 WREG32(voltage->gpio.reg, tmp);
197 if (voltage->delay)
198 udelay(voltage->delay);
199 } else {
200 tmp = RREG32(voltage->gpio.reg);
201 if (voltage->active_high)
202 tmp &= ~voltage->gpio.mask;
203 else
204 tmp |= voltage->gpio.mask;
205 WREG32(voltage->gpio.reg, tmp);
206 if (voltage->delay)
207 udelay(voltage->delay);
208 }
209 }
210
211 sclk_cntl = RREG32_PLL(SCLK_CNTL);
212 sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
213 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
214 sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
215 sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
216 if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
217 sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
218 if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
219 sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
220 else
221 sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
222 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
223 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
224 else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
225 sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
226 } else
227 sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
228
229 if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
230 sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
231 if (voltage->delay) {
232 sclk_more_cntl |= VOLTAGE_DROP_SYNC;
233 switch (voltage->delay) {
234 case 33:
235 sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
236 break;
237 case 66:
238 sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
239 break;
240 case 99:
241 sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
242 break;
243 case 132:
244 sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
245 break;
246 }
247 } else
248 sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
249 } else
250 sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
251
252 if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
253 sclk_cntl &= ~FORCE_HDP;
254 else
255 sclk_cntl |= FORCE_HDP;
256
257 WREG32_PLL(SCLK_CNTL, sclk_cntl);
258 WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
259 WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
260
261 /* set pcie lanes */
262 if ((rdev->flags & RADEON_IS_PCIE) &&
263 !(rdev->flags & RADEON_IS_IGP) &&
264 rdev->asic->set_pcie_lanes &&
265 (ps->pcie_lanes !=
266 rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
267 radeon_set_pcie_lanes(rdev,
268 ps->pcie_lanes);
269 DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
270 }
271}
272
273void r100_pm_prepare(struct radeon_device *rdev)
274{
275 struct drm_device *ddev = rdev->ddev;
276 struct drm_crtc *crtc;
277 struct radeon_crtc *radeon_crtc;
278 u32 tmp;
279
280 /* disable any active CRTCs */
281 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
282 radeon_crtc = to_radeon_crtc(crtc);
283 if (radeon_crtc->enabled) {
284 if (radeon_crtc->crtc_id) {
285 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
286 tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
287 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
288 } else {
289 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
290 tmp |= RADEON_CRTC_DISP_REQ_EN_B;
291 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
292 }
293 }
294 }
295}
296
297void r100_pm_finish(struct radeon_device *rdev)
298{
299 struct drm_device *ddev = rdev->ddev;
300 struct drm_crtc *crtc;
301 struct radeon_crtc *radeon_crtc;
302 u32 tmp;
303
304 /* enable any active CRTCs */
305 list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
306 radeon_crtc = to_radeon_crtc(crtc);
307 if (radeon_crtc->enabled) {
308 if (radeon_crtc->crtc_id) {
309 tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
310 tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
311 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
312 } else {
313 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
314 tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
315 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
316 }
317 }
318 }
319}
320
321bool r100_gui_idle(struct radeon_device *rdev)
322{
323 if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
324 return false;
325 else
326 return true;
327}
328
70/* hpd for digital panel detect/disconnect */ 329/* hpd for digital panel detect/disconnect */
71bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) 330bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
72{ 331{
@@ -254,6 +513,9 @@ int r100_irq_set(struct radeon_device *rdev)
254 if (rdev->irq.sw_int) { 513 if (rdev->irq.sw_int) {
255 tmp |= RADEON_SW_INT_ENABLE; 514 tmp |= RADEON_SW_INT_ENABLE;
256 } 515 }
516 if (rdev->irq.gui_idle) {
517 tmp |= RADEON_GUI_IDLE_MASK;
518 }
257 if (rdev->irq.crtc_vblank_int[0]) { 519 if (rdev->irq.crtc_vblank_int[0]) {
258 tmp |= RADEON_CRTC_VBLANK_MASK; 520 tmp |= RADEON_CRTC_VBLANK_MASK;
259 } 521 }
@@ -288,6 +550,12 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
288 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | 550 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
289 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; 551 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
290 552
553 /* the interrupt works, but the status bit is permanently asserted */
554 if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
555 if (!rdev->irq.gui_idle_acked)
556 irq_mask |= RADEON_GUI_IDLE_STAT;
557 }
558
291 if (irqs) { 559 if (irqs) {
292 WREG32(RADEON_GEN_INT_STATUS, irqs); 560 WREG32(RADEON_GEN_INT_STATUS, irqs);
293 } 561 }
@@ -299,6 +567,9 @@ int r100_irq_process(struct radeon_device *rdev)
299 uint32_t status, msi_rearm; 567 uint32_t status, msi_rearm;
300 bool queue_hotplug = false; 568 bool queue_hotplug = false;
301 569
570 /* reset gui idle ack. the status bit is broken */
571 rdev->irq.gui_idle_acked = false;
572
302 status = r100_irq_ack(rdev); 573 status = r100_irq_ack(rdev);
303 if (!status) { 574 if (!status) {
304 return IRQ_NONE; 575 return IRQ_NONE;
@@ -311,6 +582,12 @@ int r100_irq_process(struct radeon_device *rdev)
311 if (status & RADEON_SW_INT_TEST) { 582 if (status & RADEON_SW_INT_TEST) {
312 radeon_fence_process(rdev); 583 radeon_fence_process(rdev);
313 } 584 }
585 /* gui idle interrupt */
586 if (status & RADEON_GUI_IDLE_STAT) {
587 rdev->irq.gui_idle_acked = true;
588 rdev->pm.gui_idle = true;
589 wake_up(&rdev->irq.idle_queue);
590 }
314 /* Vertical blank interrupts */ 591 /* Vertical blank interrupts */
315 if (status & RADEON_CRTC_VBLANK_STAT) { 592 if (status & RADEON_CRTC_VBLANK_STAT) {
316 drm_handle_vblank(rdev->ddev, 0); 593 drm_handle_vblank(rdev->ddev, 0);
@@ -332,6 +609,8 @@ int r100_irq_process(struct radeon_device *rdev)
332 } 609 }
333 status = r100_irq_ack(rdev); 610 status = r100_irq_ack(rdev);
334 } 611 }
612 /* reset gui idle ack. the status bit is broken */
613 rdev->irq.gui_idle_acked = false;
335 if (queue_hotplug) 614 if (queue_hotplug)
336 queue_work(rdev->wq, &rdev->hotplug_work); 615 queue_work(rdev->wq, &rdev->hotplug_work);
337 if (rdev->msi_enabled) { 616 if (rdev->msi_enabled) {
@@ -663,26 +942,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
663 if (r100_debugfs_cp_init(rdev)) { 942 if (r100_debugfs_cp_init(rdev)) {
664 DRM_ERROR("Failed to register debugfs file for CP !\n"); 943 DRM_ERROR("Failed to register debugfs file for CP !\n");
665 } 944 }
666 /* Reset CP */
667 tmp = RREG32(RADEON_CP_CSQ_STAT);
668 if ((tmp & (1 << 31))) {
669 DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
670 WREG32(RADEON_CP_CSQ_MODE, 0);
671 WREG32(RADEON_CP_CSQ_CNTL, 0);
672 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
673 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
674 mdelay(2);
675 WREG32(RADEON_RBBM_SOFT_RESET, 0);
676 tmp = RREG32(RADEON_RBBM_SOFT_RESET);
677 mdelay(2);
678 tmp = RREG32(RADEON_CP_CSQ_STAT);
679 if ((tmp & (1 << 31))) {
680 DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
681 }
682 } else {
683 DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
684 }
685
686 if (!rdev->me_fw) { 945 if (!rdev->me_fw) {
687 r = r100_cp_init_microcode(rdev); 946 r = r100_cp_init_microcode(rdev);
688 if (r) { 947 if (r) {
@@ -787,39 +1046,6 @@ void r100_cp_disable(struct radeon_device *rdev)
787 } 1046 }
788} 1047}
789 1048
790int r100_cp_reset(struct radeon_device *rdev)
791{
792 uint32_t tmp;
793 bool reinit_cp;
794 int i;
795
796 reinit_cp = rdev->cp.ready;
797 rdev->cp.ready = false;
798 WREG32(RADEON_CP_CSQ_MODE, 0);
799 WREG32(RADEON_CP_CSQ_CNTL, 0);
800 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
801 (void)RREG32(RADEON_RBBM_SOFT_RESET);
802 udelay(200);
803 WREG32(RADEON_RBBM_SOFT_RESET, 0);
804 /* Wait to prevent race in RBBM_STATUS */
805 mdelay(1);
806 for (i = 0; i < rdev->usec_timeout; i++) {
807 tmp = RREG32(RADEON_RBBM_STATUS);
808 if (!(tmp & (1 << 16))) {
809 DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
810 tmp);
811 if (reinit_cp) {
812 return r100_cp_init(rdev, rdev->cp.ring_size);
813 }
814 return 0;
815 }
816 DRM_UDELAY(1);
817 }
818 tmp = RREG32(RADEON_RBBM_STATUS);
819 DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
820 return -1;
821}
822
823void r100_cp_commit(struct radeon_device *rdev) 1049void r100_cp_commit(struct radeon_device *rdev)
824{ 1050{
825 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); 1051 WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
@@ -1733,76 +1959,163 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
1733 return -1; 1959 return -1;
1734} 1960}
1735 1961
1736void r100_gpu_init(struct radeon_device *rdev) 1962void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1737{ 1963{
1738 /* TODO: anythings to do here ? pipes ? */ 1964 lockup->last_cp_rptr = cp->rptr;
1739 r100_hdp_reset(rdev); 1965 lockup->last_jiffies = jiffies;
1966}
1967
1968/**
1969 * r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
1970 * @rdev: radeon device structure
1971 * @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
1972 * @cp: radeon_cp structure holding CP information
1973 *
1974 * We don't need to initialize the lockup tracking information as we will either
1975 * have CP rptr to a different value of jiffies wrap around which will force
1976 * initialization of the lockup tracking informations.
1977 *
1978 * A possible false positivie is if we get call after while and last_cp_rptr ==
1979 * the current CP rptr, even if it's unlikely it might happen. To avoid this
1980 * if the elapsed time since last call is bigger than 2 second than we return
1981 * false and update the tracking information. Due to this the caller must call
1982 * r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
1983 * the fencing code should be cautious about that.
1984 *
1985 * Caller should write to the ring to force CP to do something so we don't get
1986 * false positive when CP is just gived nothing to do.
1987 *
1988 **/
1989bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
1990{
1991 unsigned long cjiffies, elapsed;
1992
1993 cjiffies = jiffies;
1994 if (!time_after(cjiffies, lockup->last_jiffies)) {
1995 /* likely a wrap around */
1996 lockup->last_cp_rptr = cp->rptr;
1997 lockup->last_jiffies = jiffies;
1998 return false;
1999 }
2000 if (cp->rptr != lockup->last_cp_rptr) {
2001 /* CP is still working no lockup */
2002 lockup->last_cp_rptr = cp->rptr;
2003 lockup->last_jiffies = jiffies;
2004 return false;
2005 }
2006 elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
2007 if (elapsed >= 3000) {
2008 /* very likely the improbable case where current
2009 * rptr is equal to last recorded, a while ago, rptr
2010 * this is more likely a false positive update tracking
2011 * information which should force us to be recall at
2012 * latter point
2013 */
2014 lockup->last_cp_rptr = cp->rptr;
2015 lockup->last_jiffies = jiffies;
2016 return false;
2017 }
2018 if (elapsed >= 1000) {
2019 dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
2020 return true;
2021 }
2022 /* give a chance to the GPU ... */
2023 return false;
1740} 2024}
1741 2025
1742void r100_hdp_reset(struct radeon_device *rdev) 2026bool r100_gpu_is_lockup(struct radeon_device *rdev)
1743{ 2027{
1744 uint32_t tmp; 2028 u32 rbbm_status;
2029 int r;
1745 2030
1746 tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; 2031 rbbm_status = RREG32(R_000E40_RBBM_STATUS);
1747 tmp |= (7 << 28); 2032 if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
1748 WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); 2033 r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
1749 (void)RREG32(RADEON_HOST_PATH_CNTL); 2034 return false;
1750 udelay(200); 2035 }
1751 WREG32(RADEON_RBBM_SOFT_RESET, 0); 2036 /* force CP activities */
1752 WREG32(RADEON_HOST_PATH_CNTL, tmp); 2037 r = radeon_ring_lock(rdev, 2);
1753 (void)RREG32(RADEON_HOST_PATH_CNTL); 2038 if (!r) {
2039 /* PACKET2 NOP */
2040 radeon_ring_write(rdev, 0x80000000);
2041 radeon_ring_write(rdev, 0x80000000);
2042 radeon_ring_unlock_commit(rdev);
2043 }
2044 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
2045 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
1754} 2046}
1755 2047
1756int r100_rb2d_reset(struct radeon_device *rdev) 2048void r100_bm_disable(struct radeon_device *rdev)
1757{ 2049{
1758 uint32_t tmp; 2050 u32 tmp;
1759 int i;
1760 2051
1761 WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); 2052 /* disable bus mastering */
1762 (void)RREG32(RADEON_RBBM_SOFT_RESET); 2053 tmp = RREG32(R_000030_BUS_CNTL);
1763 udelay(200); 2054 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000044);
1764 WREG32(RADEON_RBBM_SOFT_RESET, 0); 2055 mdelay(1);
1765 /* Wait to prevent race in RBBM_STATUS */ 2056 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000042);
2057 mdelay(1);
2058 WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
2059 tmp = RREG32(RADEON_BUS_CNTL);
2060 mdelay(1);
2061 pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
2062 pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
1766 mdelay(1); 2063 mdelay(1);
1767 for (i = 0; i < rdev->usec_timeout; i++) {
1768 tmp = RREG32(RADEON_RBBM_STATUS);
1769 if (!(tmp & (1 << 26))) {
1770 DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1771 tmp);
1772 return 0;
1773 }
1774 DRM_UDELAY(1);
1775 }
1776 tmp = RREG32(RADEON_RBBM_STATUS);
1777 DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1778 return -1;
1779} 2064}
1780 2065
1781int r100_gpu_reset(struct radeon_device *rdev) 2066int r100_asic_reset(struct radeon_device *rdev)
1782{ 2067{
1783 uint32_t status; 2068 struct r100_mc_save save;
2069 u32 status, tmp;
1784 2070
1785 /* reset order likely matter */ 2071 r100_mc_stop(rdev, &save);
1786 status = RREG32(RADEON_RBBM_STATUS); 2072 status = RREG32(R_000E40_RBBM_STATUS);
1787 /* reset HDP */ 2073 if (!G_000E40_GUI_ACTIVE(status)) {
1788 r100_hdp_reset(rdev); 2074 return 0;
1789 /* reset rb2d */
1790 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1791 r100_rb2d_reset(rdev);
1792 } 2075 }
1793 /* TODO: reset 3D engine */ 2076 status = RREG32(R_000E40_RBBM_STATUS);
2077 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2078 /* stop CP */
2079 WREG32(RADEON_CP_CSQ_CNTL, 0);
2080 tmp = RREG32(RADEON_CP_RB_CNTL);
2081 WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
2082 WREG32(RADEON_CP_RB_RPTR_WR, 0);
2083 WREG32(RADEON_CP_RB_WPTR, 0);
2084 WREG32(RADEON_CP_RB_CNTL, tmp);
2085 /* save PCI state */
2086 pci_save_state(rdev->pdev);
2087 /* disable bus mastering */
2088 r100_bm_disable(rdev);
2089 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_SE(1) |
2090 S_0000F0_SOFT_RESET_RE(1) |
2091 S_0000F0_SOFT_RESET_PP(1) |
2092 S_0000F0_SOFT_RESET_RB(1));
2093 RREG32(R_0000F0_RBBM_SOFT_RESET);
2094 mdelay(500);
2095 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2096 mdelay(1);
2097 status = RREG32(R_000E40_RBBM_STATUS);
2098 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
1794 /* reset CP */ 2099 /* reset CP */
1795 status = RREG32(RADEON_RBBM_STATUS); 2100 WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1));
1796 if (status & (1 << 16)) { 2101 RREG32(R_0000F0_RBBM_SOFT_RESET);
1797 r100_cp_reset(rdev); 2102 mdelay(500);
1798 } 2103 WREG32(R_0000F0_RBBM_SOFT_RESET, 0);
2104 mdelay(1);
2105 status = RREG32(R_000E40_RBBM_STATUS);
2106 dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__, __LINE__, status);
2107 /* restore PCI & busmastering */
2108 pci_restore_state(rdev->pdev);
2109 r100_enable_bm(rdev);
1799 /* Check if GPU is idle */ 2110 /* Check if GPU is idle */
1800 status = RREG32(RADEON_RBBM_STATUS); 2111 if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
1801 if (status & RADEON_RBBM_ACTIVE) { 2112 G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
1802 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); 2113 dev_err(rdev->dev, "failed to reset GPU\n");
2114 rdev->gpu_lockup = true;
1803 return -1; 2115 return -1;
1804 } 2116 }
1805 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status); 2117 r100_mc_resume(rdev, &save);
2118 dev_info(rdev->dev, "GPU reset succeed\n");
1806 return 0; 2119 return 0;
1807} 2120}
1808 2121
@@ -2002,11 +2315,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2002 else 2315 else
2003 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 2316 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
2004 } 2317 }
2005 /* FIXME remove this once we support unmappable VRAM */
2006 if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
2007 rdev->mc.mc_vram_size = rdev->mc.aper_size;
2008 rdev->mc.real_vram_size = rdev->mc.aper_size;
2009 }
2010} 2318}
2011 2319
2012void r100_vga_set_state(struct radeon_device *rdev, bool state) 2320void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -2335,53 +2643,53 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2335 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; 2643 fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
2336 uint32_t temp, data, mem_trcd, mem_trp, mem_tras; 2644 uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
2337 fixed20_12 memtcas_ff[8] = { 2645 fixed20_12 memtcas_ff[8] = {
2338 fixed_init(1), 2646 dfixed_init(1),
2339 fixed_init(2), 2647 dfixed_init(2),
2340 fixed_init(3), 2648 dfixed_init(3),
2341 fixed_init(0), 2649 dfixed_init(0),
2342 fixed_init_half(1), 2650 dfixed_init_half(1),
2343 fixed_init_half(2), 2651 dfixed_init_half(2),
2344 fixed_init(0), 2652 dfixed_init(0),
2345 }; 2653 };
2346 fixed20_12 memtcas_rs480_ff[8] = { 2654 fixed20_12 memtcas_rs480_ff[8] = {
2347 fixed_init(0), 2655 dfixed_init(0),
2348 fixed_init(1), 2656 dfixed_init(1),
2349 fixed_init(2), 2657 dfixed_init(2),
2350 fixed_init(3), 2658 dfixed_init(3),
2351 fixed_init(0), 2659 dfixed_init(0),
2352 fixed_init_half(1), 2660 dfixed_init_half(1),
2353 fixed_init_half(2), 2661 dfixed_init_half(2),
2354 fixed_init_half(3), 2662 dfixed_init_half(3),
2355 }; 2663 };
2356 fixed20_12 memtcas2_ff[8] = { 2664 fixed20_12 memtcas2_ff[8] = {
2357 fixed_init(0), 2665 dfixed_init(0),
2358 fixed_init(1), 2666 dfixed_init(1),
2359 fixed_init(2), 2667 dfixed_init(2),
2360 fixed_init(3), 2668 dfixed_init(3),
2361 fixed_init(4), 2669 dfixed_init(4),
2362 fixed_init(5), 2670 dfixed_init(5),
2363 fixed_init(6), 2671 dfixed_init(6),
2364 fixed_init(7), 2672 dfixed_init(7),
2365 }; 2673 };
2366 fixed20_12 memtrbs[8] = { 2674 fixed20_12 memtrbs[8] = {
2367 fixed_init(1), 2675 dfixed_init(1),
2368 fixed_init_half(1), 2676 dfixed_init_half(1),
2369 fixed_init(2), 2677 dfixed_init(2),
2370 fixed_init_half(2), 2678 dfixed_init_half(2),
2371 fixed_init(3), 2679 dfixed_init(3),
2372 fixed_init_half(3), 2680 dfixed_init_half(3),
2373 fixed_init(4), 2681 dfixed_init(4),
2374 fixed_init_half(4) 2682 dfixed_init_half(4)
2375 }; 2683 };
2376 fixed20_12 memtrbs_r4xx[8] = { 2684 fixed20_12 memtrbs_r4xx[8] = {
2377 fixed_init(4), 2685 dfixed_init(4),
2378 fixed_init(5), 2686 dfixed_init(5),
2379 fixed_init(6), 2687 dfixed_init(6),
2380 fixed_init(7), 2688 dfixed_init(7),
2381 fixed_init(8), 2689 dfixed_init(8),
2382 fixed_init(9), 2690 dfixed_init(9),
2383 fixed_init(10), 2691 dfixed_init(10),
2384 fixed_init(11) 2692 dfixed_init(11)
2385 }; 2693 };
2386 fixed20_12 min_mem_eff; 2694 fixed20_12 min_mem_eff;
2387 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; 2695 fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
@@ -2412,7 +2720,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2412 } 2720 }
2413 } 2721 }
2414 2722
2415 min_mem_eff.full = rfixed_const_8(0); 2723 min_mem_eff.full = dfixed_const_8(0);
2416 /* get modes */ 2724 /* get modes */
2417 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { 2725 if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
2418 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); 2726 uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
@@ -2433,28 +2741,28 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2433 mclk_ff = rdev->pm.mclk; 2741 mclk_ff = rdev->pm.mclk;
2434 2742
2435 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2743 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2436 temp_ff.full = rfixed_const(temp); 2744 temp_ff.full = dfixed_const(temp);
2437 mem_bw.full = rfixed_mul(mclk_ff, temp_ff); 2745 mem_bw.full = dfixed_mul(mclk_ff, temp_ff);
2438 2746
2439 pix_clk.full = 0; 2747 pix_clk.full = 0;
2440 pix_clk2.full = 0; 2748 pix_clk2.full = 0;
2441 peak_disp_bw.full = 0; 2749 peak_disp_bw.full = 0;
2442 if (mode1) { 2750 if (mode1) {
2443 temp_ff.full = rfixed_const(1000); 2751 temp_ff.full = dfixed_const(1000);
2444 pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ 2752 pix_clk.full = dfixed_const(mode1->clock); /* convert to fixed point */
2445 pix_clk.full = rfixed_div(pix_clk, temp_ff); 2753 pix_clk.full = dfixed_div(pix_clk, temp_ff);
2446 temp_ff.full = rfixed_const(pixel_bytes1); 2754 temp_ff.full = dfixed_const(pixel_bytes1);
2447 peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); 2755 peak_disp_bw.full += dfixed_mul(pix_clk, temp_ff);
2448 } 2756 }
2449 if (mode2) { 2757 if (mode2) {
2450 temp_ff.full = rfixed_const(1000); 2758 temp_ff.full = dfixed_const(1000);
2451 pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ 2759 pix_clk2.full = dfixed_const(mode2->clock); /* convert to fixed point */
2452 pix_clk2.full = rfixed_div(pix_clk2, temp_ff); 2760 pix_clk2.full = dfixed_div(pix_clk2, temp_ff);
2453 temp_ff.full = rfixed_const(pixel_bytes2); 2761 temp_ff.full = dfixed_const(pixel_bytes2);
2454 peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); 2762 peak_disp_bw.full += dfixed_mul(pix_clk2, temp_ff);
2455 } 2763 }
2456 2764
2457 mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); 2765 mem_bw.full = dfixed_mul(mem_bw, min_mem_eff);
2458 if (peak_disp_bw.full >= mem_bw.full) { 2766 if (peak_disp_bw.full >= mem_bw.full) {
2459 DRM_ERROR("You may not have enough display bandwidth for current mode\n" 2767 DRM_ERROR("You may not have enough display bandwidth for current mode\n"
2460 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); 2768 "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
@@ -2496,9 +2804,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2496 mem_tras = ((temp >> 12) & 0xf) + 4; 2804 mem_tras = ((temp >> 12) & 0xf) + 4;
2497 } 2805 }
2498 /* convert to FF */ 2806 /* convert to FF */
2499 trcd_ff.full = rfixed_const(mem_trcd); 2807 trcd_ff.full = dfixed_const(mem_trcd);
2500 trp_ff.full = rfixed_const(mem_trp); 2808 trp_ff.full = dfixed_const(mem_trp);
2501 tras_ff.full = rfixed_const(mem_tras); 2809 tras_ff.full = dfixed_const(mem_tras);
2502 2810
2503 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */ 2811 /* Get values from the MEM_SDRAM_MODE_REG register...converting its */
2504 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); 2812 temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
@@ -2516,7 +2824,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2516 /* extra cas latency stored in bits 23-25 0-4 clocks */ 2824 /* extra cas latency stored in bits 23-25 0-4 clocks */
2517 data = (temp >> 23) & 0x7; 2825 data = (temp >> 23) & 0x7;
2518 if (data < 5) 2826 if (data < 5)
2519 tcas_ff.full += rfixed_const(data); 2827 tcas_ff.full += dfixed_const(data);
2520 } 2828 }
2521 2829
2522 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { 2830 if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
@@ -2553,72 +2861,72 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2553 2861
2554 if (rdev->flags & RADEON_IS_AGP) { 2862 if (rdev->flags & RADEON_IS_AGP) {
2555 fixed20_12 agpmode_ff; 2863 fixed20_12 agpmode_ff;
2556 agpmode_ff.full = rfixed_const(radeon_agpmode); 2864 agpmode_ff.full = dfixed_const(radeon_agpmode);
2557 temp_ff.full = rfixed_const_666(16); 2865 temp_ff.full = dfixed_const_666(16);
2558 sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); 2866 sclk_eff_ff.full -= dfixed_mul(agpmode_ff, temp_ff);
2559 } 2867 }
2560 /* TODO PCIE lanes may affect this - agpmode == 16?? */ 2868 /* TODO PCIE lanes may affect this - agpmode == 16?? */
2561 2869
2562 if (ASIC_IS_R300(rdev)) { 2870 if (ASIC_IS_R300(rdev)) {
2563 sclk_delay_ff.full = rfixed_const(250); 2871 sclk_delay_ff.full = dfixed_const(250);
2564 } else { 2872 } else {
2565 if ((rdev->family == CHIP_RV100) || 2873 if ((rdev->family == CHIP_RV100) ||
2566 rdev->flags & RADEON_IS_IGP) { 2874 rdev->flags & RADEON_IS_IGP) {
2567 if (rdev->mc.vram_is_ddr) 2875 if (rdev->mc.vram_is_ddr)
2568 sclk_delay_ff.full = rfixed_const(41); 2876 sclk_delay_ff.full = dfixed_const(41);
2569 else 2877 else
2570 sclk_delay_ff.full = rfixed_const(33); 2878 sclk_delay_ff.full = dfixed_const(33);
2571 } else { 2879 } else {
2572 if (rdev->mc.vram_width == 128) 2880 if (rdev->mc.vram_width == 128)
2573 sclk_delay_ff.full = rfixed_const(57); 2881 sclk_delay_ff.full = dfixed_const(57);
2574 else 2882 else
2575 sclk_delay_ff.full = rfixed_const(41); 2883 sclk_delay_ff.full = dfixed_const(41);
2576 } 2884 }
2577 } 2885 }
2578 2886
2579 mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); 2887 mc_latency_sclk.full = dfixed_div(sclk_delay_ff, sclk_eff_ff);
2580 2888
2581 if (rdev->mc.vram_is_ddr) { 2889 if (rdev->mc.vram_is_ddr) {
2582 if (rdev->mc.vram_width == 32) { 2890 if (rdev->mc.vram_width == 32) {
2583 k1.full = rfixed_const(40); 2891 k1.full = dfixed_const(40);
2584 c = 3; 2892 c = 3;
2585 } else { 2893 } else {
2586 k1.full = rfixed_const(20); 2894 k1.full = dfixed_const(20);
2587 c = 1; 2895 c = 1;
2588 } 2896 }
2589 } else { 2897 } else {
2590 k1.full = rfixed_const(40); 2898 k1.full = dfixed_const(40);
2591 c = 3; 2899 c = 3;
2592 } 2900 }
2593 2901
2594 temp_ff.full = rfixed_const(2); 2902 temp_ff.full = dfixed_const(2);
2595 mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); 2903 mc_latency_mclk.full = dfixed_mul(trcd_ff, temp_ff);
2596 temp_ff.full = rfixed_const(c); 2904 temp_ff.full = dfixed_const(c);
2597 mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); 2905 mc_latency_mclk.full += dfixed_mul(tcas_ff, temp_ff);
2598 temp_ff.full = rfixed_const(4); 2906 temp_ff.full = dfixed_const(4);
2599 mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); 2907 mc_latency_mclk.full += dfixed_mul(tras_ff, temp_ff);
2600 mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); 2908 mc_latency_mclk.full += dfixed_mul(trp_ff, temp_ff);
2601 mc_latency_mclk.full += k1.full; 2909 mc_latency_mclk.full += k1.full;
2602 2910
2603 mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); 2911 mc_latency_mclk.full = dfixed_div(mc_latency_mclk, mclk_ff);
2604 mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); 2912 mc_latency_mclk.full += dfixed_div(temp_ff, sclk_eff_ff);
2605 2913
2606 /* 2914 /*
2607 HW cursor time assuming worst case of full size colour cursor. 2915 HW cursor time assuming worst case of full size colour cursor.
2608 */ 2916 */
2609 temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); 2917 temp_ff.full = dfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
2610 temp_ff.full += trcd_ff.full; 2918 temp_ff.full += trcd_ff.full;
2611 if (temp_ff.full < tras_ff.full) 2919 if (temp_ff.full < tras_ff.full)
2612 temp_ff.full = tras_ff.full; 2920 temp_ff.full = tras_ff.full;
2613 cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); 2921 cur_latency_mclk.full = dfixed_div(temp_ff, mclk_ff);
2614 2922
2615 temp_ff.full = rfixed_const(cur_size); 2923 temp_ff.full = dfixed_const(cur_size);
2616 cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); 2924 cur_latency_sclk.full = dfixed_div(temp_ff, sclk_eff_ff);
2617 /* 2925 /*
2618 Find the total latency for the display data. 2926 Find the total latency for the display data.
2619 */ 2927 */
2620 disp_latency_overhead.full = rfixed_const(8); 2928 disp_latency_overhead.full = dfixed_const(8);
2621 disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); 2929 disp_latency_overhead.full = dfixed_div(disp_latency_overhead, sclk_ff);
2622 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; 2930 mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
2623 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; 2931 mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
2624 2932
@@ -2646,16 +2954,16 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2646 /* 2954 /*
2647 Find the drain rate of the display buffer. 2955 Find the drain rate of the display buffer.
2648 */ 2956 */
2649 temp_ff.full = rfixed_const((16/pixel_bytes1)); 2957 temp_ff.full = dfixed_const((16/pixel_bytes1));
2650 disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); 2958 disp_drain_rate.full = dfixed_div(pix_clk, temp_ff);
2651 2959
2652 /* 2960 /*
2653 Find the critical point of the display buffer. 2961 Find the critical point of the display buffer.
2654 */ 2962 */
2655 crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); 2963 crit_point_ff.full = dfixed_mul(disp_drain_rate, disp_latency);
2656 crit_point_ff.full += rfixed_const_half(0); 2964 crit_point_ff.full += dfixed_const_half(0);
2657 2965
2658 critical_point = rfixed_trunc(crit_point_ff); 2966 critical_point = dfixed_trunc(crit_point_ff);
2659 2967
2660 if (rdev->disp_priority == 2) { 2968 if (rdev->disp_priority == 2) {
2661 critical_point = 0; 2969 critical_point = 0;
@@ -2726,8 +3034,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2726 /* 3034 /*
2727 Find the drain rate of the display buffer. 3035 Find the drain rate of the display buffer.
2728 */ 3036 */
2729 temp_ff.full = rfixed_const((16/pixel_bytes2)); 3037 temp_ff.full = dfixed_const((16/pixel_bytes2));
2730 disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); 3038 disp_drain_rate2.full = dfixed_div(pix_clk2, temp_ff);
2731 3039
2732 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); 3040 grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
2733 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); 3041 grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
@@ -2748,8 +3056,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2748 critical_point2 = 0; 3056 critical_point2 = 0;
2749 else { 3057 else {
2750 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; 3058 temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
2751 temp_ff.full = rfixed_const(temp); 3059 temp_ff.full = dfixed_const(temp);
2752 temp_ff.full = rfixed_mul(mclk_ff, temp_ff); 3060 temp_ff.full = dfixed_mul(mclk_ff, temp_ff);
2753 if (sclk_ff.full < temp_ff.full) 3061 if (sclk_ff.full < temp_ff.full)
2754 temp_ff.full = sclk_ff.full; 3062 temp_ff.full = sclk_ff.full;
2755 3063
@@ -2757,15 +3065,15 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2757 3065
2758 if (mode1) { 3066 if (mode1) {
2759 temp_ff.full = read_return_rate.full - disp_drain_rate.full; 3067 temp_ff.full = read_return_rate.full - disp_drain_rate.full;
2760 time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); 3068 time_disp1_drop_priority.full = dfixed_div(crit_point_ff, temp_ff);
2761 } else { 3069 } else {
2762 time_disp1_drop_priority.full = 0; 3070 time_disp1_drop_priority.full = 0;
2763 } 3071 }
2764 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; 3072 crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
2765 crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); 3073 crit_point_ff.full = dfixed_mul(crit_point_ff, disp_drain_rate2);
2766 crit_point_ff.full += rfixed_const_half(0); 3074 crit_point_ff.full += dfixed_const_half(0);
2767 3075
2768 critical_point2 = rfixed_trunc(crit_point_ff); 3076 critical_point2 = dfixed_trunc(crit_point_ff);
2769 3077
2770 if (rdev->disp_priority == 2) { 3078 if (rdev->disp_priority == 2) {
2771 critical_point2 = 0; 3079 critical_point2 = 0;
@@ -3399,7 +3707,7 @@ static int r100_startup(struct radeon_device *rdev)
3399 /* Resume clock */ 3707 /* Resume clock */
3400 r100_clock_startup(rdev); 3708 r100_clock_startup(rdev);
3401 /* Initialize GPU configuration (# pipes, ...) */ 3709 /* Initialize GPU configuration (# pipes, ...) */
3402 r100_gpu_init(rdev); 3710// r100_gpu_init(rdev);
3403 /* Initialize GART (initialize after TTM so we can allocate 3711 /* Initialize GART (initialize after TTM so we can allocate
3404 * memory through TTM but finalize after TTM) */ 3712 * memory through TTM but finalize after TTM) */
3405 r100_enable_bm(rdev); 3713 r100_enable_bm(rdev);
@@ -3436,7 +3744,7 @@ int r100_resume(struct radeon_device *rdev)
3436 /* Resume clock before doing reset */ 3744 /* Resume clock before doing reset */
3437 r100_clock_startup(rdev); 3745 r100_clock_startup(rdev);
3438 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3746 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3439 if (radeon_gpu_reset(rdev)) { 3747 if (radeon_asic_reset(rdev)) {
3440 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3748 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3441 RREG32(R_000E40_RBBM_STATUS), 3749 RREG32(R_000E40_RBBM_STATUS),
3442 RREG32(R_0007C0_CP_STAT)); 3750 RREG32(R_0007C0_CP_STAT));
@@ -3462,7 +3770,6 @@ int r100_suspend(struct radeon_device *rdev)
3462 3770
3463void r100_fini(struct radeon_device *rdev) 3771void r100_fini(struct radeon_device *rdev)
3464{ 3772{
3465 radeon_pm_fini(rdev);
3466 r100_cp_fini(rdev); 3773 r100_cp_fini(rdev);
3467 r100_wb_fini(rdev); 3774 r100_wb_fini(rdev);
3468 r100_ib_fini(rdev); 3775 r100_ib_fini(rdev);
@@ -3505,7 +3812,7 @@ int r100_init(struct radeon_device *rdev)
3505 return r; 3812 return r;
3506 } 3813 }
3507 /* Reset gpu before posting otherwise ATOM will enter infinite loop */ 3814 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
3508 if (radeon_gpu_reset(rdev)) { 3815 if (radeon_asic_reset(rdev)) {
3509 dev_warn(rdev->dev, 3816 dev_warn(rdev->dev,
3510 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", 3817 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
3511 RREG32(R_000E40_RBBM_STATUS), 3818 RREG32(R_000E40_RBBM_STATUS),
@@ -3518,8 +3825,6 @@ int r100_init(struct radeon_device *rdev)
3518 r100_errata(rdev); 3825 r100_errata(rdev);
3519 /* Initialize clocks */ 3826 /* Initialize clocks */
3520 radeon_get_clock_info(rdev->ddev); 3827 radeon_get_clock_info(rdev->ddev);
3521 /* Initialize power management */
3522 radeon_pm_init(rdev);
3523 /* initialize AGP */ 3828 /* initialize AGP */
3524 if (rdev->flags & RADEON_IS_AGP) { 3829 if (rdev->flags & RADEON_IS_AGP) {
3525 r = radeon_agp_init(rdev); 3830 r = radeon_agp_init(rdev);