diff options
author | Matthew Garrett <mjg@redhat.com> | 2010-04-26 15:45:23 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-05-18 04:21:16 -0400 |
commit | 2aba631c008e7d82e3ec45dd32bec1ea63a963cf (patch) | |
tree | 5a66db121184a84392802999b84164780239cdec /drivers | |
parent | a424816fb37f894a37585cf86dfdd6b8b1dc681f (diff) |
radeon: Unify PM entry paths
There's a moderate amount of effort involved in setting the card up for
clock transitions, so unify the codepaths to make it easier to implement.
Signed-off-by: Matthew Garrett <mjg@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 90 |
1 files changed, 32 insertions, 58 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index c703ae326bc3..a61de1f9ff64 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -29,13 +29,13 @@ | |||
29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
30 | #define RADEON_WAIT_IDLE_TIMEOUT 200 | 30 | #define RADEON_WAIT_IDLE_TIMEOUT 200 |
31 | 31 | ||
32 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); | ||
33 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | ||
34 | static void radeon_pm_idle_work_handler(struct work_struct *work); | 32 | static void radeon_pm_idle_work_handler(struct work_struct *work); |
35 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | 33 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); |
36 | 34 | ||
37 | static void radeon_pm_set_power_mode_static_locked(struct radeon_device *rdev) | 35 | static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) |
38 | { | 36 | { |
37 | int i; | ||
38 | |||
39 | mutex_lock(&rdev->cp.mutex); | 39 | mutex_lock(&rdev->cp.mutex); |
40 | 40 | ||
41 | /* wait for GPU idle */ | 41 | /* wait for GPU idle */ |
@@ -48,13 +48,33 @@ static void radeon_pm_set_power_mode_static_locked(struct radeon_device *rdev) | |||
48 | rdev->irq.gui_idle = false; | 48 | rdev->irq.gui_idle = false; |
49 | radeon_irq_set(rdev); | 49 | radeon_irq_set(rdev); |
50 | 50 | ||
51 | radeon_set_power_state(rdev, true); | 51 | if (!static_switch) { |
52 | 52 | for (i = 0; i < rdev->num_crtc; i++) { | |
53 | if (rdev->pm.active_crtcs & (1 << i)) { | ||
54 | rdev->pm.req_vblank |= (1 << i); | ||
55 | drm_vblank_get(rdev->ddev, i); | ||
56 | } | ||
57 | } | ||
58 | } | ||
59 | |||
60 | radeon_set_power_state(rdev, static_switch); | ||
61 | |||
62 | if (!static_switch) { | ||
63 | for (i = 0; i < rdev->num_crtc; i++) { | ||
64 | if (rdev->pm.req_vblank & (1 << i)) { | ||
65 | rdev->pm.req_vblank &= ~(1 << i); | ||
66 | drm_vblank_put(rdev->ddev, i); | ||
67 | } | ||
68 | } | ||
69 | } | ||
70 | |||
53 | /* update display watermarks based on new power state */ | 71 | /* update display watermarks based on new power state */ |
54 | radeon_update_bandwidth_info(rdev); | 72 | radeon_update_bandwidth_info(rdev); |
55 | if (rdev->pm.active_crtc_count) | 73 | if (rdev->pm.active_crtc_count) |
56 | radeon_bandwidth_update(rdev); | 74 | radeon_bandwidth_update(rdev); |
57 | 75 | ||
76 | rdev->pm.planned_action = PM_ACTION_NONE; | ||
77 | |||
58 | mutex_unlock(&rdev->cp.mutex); | 78 | mutex_unlock(&rdev->cp.mutex); |
59 | } | 79 | } |
60 | 80 | ||
@@ -95,7 +115,7 @@ static ssize_t radeon_set_power_state_static(struct device *dev, | |||
95 | rdev->pm.planned_action = PM_ACTION_NONE; | 115 | rdev->pm.planned_action = PM_ACTION_NONE; |
96 | rdev->pm.requested_power_state_index = ps; | 116 | rdev->pm.requested_power_state_index = ps; |
97 | rdev->pm.requested_clock_mode_index = cm; | 117 | rdev->pm.requested_clock_mode_index = cm; |
98 | radeon_pm_set_power_mode_static_locked(rdev); | 118 | radeon_pm_set_clocks(rdev, true); |
99 | } | 119 | } |
100 | } else | 120 | } else |
101 | DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); | 121 | DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); |
@@ -257,14 +277,14 @@ void radeon_pm_fini(struct radeon_device *rdev) | |||
257 | /* reset default clocks */ | 277 | /* reset default clocks */ |
258 | rdev->pm.state = PM_STATE_DISABLED; | 278 | rdev->pm.state = PM_STATE_DISABLED; |
259 | rdev->pm.planned_action = PM_ACTION_DEFAULT; | 279 | rdev->pm.planned_action = PM_ACTION_DEFAULT; |
260 | radeon_pm_set_clocks(rdev); | 280 | radeon_pm_set_clocks(rdev, false); |
261 | } else if ((rdev->pm.current_power_state_index != | 281 | } else if ((rdev->pm.current_power_state_index != |
262 | rdev->pm.default_power_state_index) || | 282 | rdev->pm.default_power_state_index) || |
263 | (rdev->pm.current_clock_mode_index != 0)) { | 283 | (rdev->pm.current_clock_mode_index != 0)) { |
264 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | 284 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; |
265 | rdev->pm.requested_clock_mode_index = 0; | 285 | rdev->pm.requested_clock_mode_index = 0; |
266 | mutex_lock(&rdev->pm.mutex); | 286 | mutex_lock(&rdev->pm.mutex); |
267 | radeon_pm_set_power_mode_static_locked(rdev); | 287 | radeon_pm_set_clocks(rdev, true); |
268 | mutex_unlock(&rdev->pm.mutex); | 288 | mutex_unlock(&rdev->pm.mutex); |
269 | } | 289 | } |
270 | 290 | ||
@@ -303,7 +323,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
303 | 323 | ||
304 | rdev->pm.state = PM_STATE_PAUSED; | 324 | rdev->pm.state = PM_STATE_PAUSED; |
305 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 325 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; |
306 | radeon_pm_set_clocks(rdev); | 326 | radeon_pm_set_clocks(rdev, false); |
307 | 327 | ||
308 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); | 328 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); |
309 | } | 329 | } |
@@ -313,7 +333,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
313 | if (rdev->pm.state == PM_STATE_MINIMUM) { | 333 | if (rdev->pm.state == PM_STATE_MINIMUM) { |
314 | rdev->pm.state = PM_STATE_ACTIVE; | 334 | rdev->pm.state = PM_STATE_ACTIVE; |
315 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 335 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; |
316 | radeon_pm_set_clocks(rdev); | 336 | radeon_pm_set_clocks(rdev, false); |
317 | 337 | ||
318 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 338 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, |
319 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 339 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
@@ -329,7 +349,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
329 | 349 | ||
330 | rdev->pm.state = PM_STATE_MINIMUM; | 350 | rdev->pm.state = PM_STATE_MINIMUM; |
331 | rdev->pm.planned_action = PM_ACTION_MINIMUM; | 351 | rdev->pm.planned_action = PM_ACTION_MINIMUM; |
332 | radeon_pm_set_clocks(rdev); | 352 | radeon_pm_set_clocks(rdev, false); |
333 | } | 353 | } |
334 | } | 354 | } |
335 | 355 | ||
@@ -400,52 +420,6 @@ bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) | |||
400 | finish ? "exit" : "entry"); | 420 | finish ? "exit" : "entry"); |
401 | return in_vbl; | 421 | return in_vbl; |
402 | } | 422 | } |
403 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | ||
404 | { | ||
405 | /*radeon_fence_wait_last(rdev);*/ | ||
406 | |||
407 | radeon_set_power_state(rdev, false); | ||
408 | rdev->pm.planned_action = PM_ACTION_NONE; | ||
409 | } | ||
410 | |||
411 | static void radeon_pm_set_clocks(struct radeon_device *rdev) | ||
412 | { | ||
413 | int i; | ||
414 | |||
415 | radeon_get_power_state(rdev, rdev->pm.planned_action); | ||
416 | mutex_lock(&rdev->cp.mutex); | ||
417 | |||
418 | /* wait for GPU idle */ | ||
419 | rdev->pm.gui_idle = false; | ||
420 | rdev->irq.gui_idle = true; | ||
421 | radeon_irq_set(rdev); | ||
422 | wait_event_interruptible_timeout( | ||
423 | rdev->irq.idle_queue, rdev->pm.gui_idle, | ||
424 | msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); | ||
425 | rdev->irq.gui_idle = false; | ||
426 | radeon_irq_set(rdev); | ||
427 | |||
428 | for (i = 0; i < rdev->num_crtc; i++) { | ||
429 | if (rdev->pm.active_crtcs & (1 << i)) { | ||
430 | rdev->pm.req_vblank |= (1 << i); | ||
431 | drm_vblank_get(rdev->ddev, i); | ||
432 | } | ||
433 | } | ||
434 | radeon_pm_set_clocks_locked(rdev); | ||
435 | for (i = 0; i < rdev->num_crtc; i++) { | ||
436 | if (rdev->pm.req_vblank & (1 << i)) { | ||
437 | rdev->pm.req_vblank &= ~(1 << i); | ||
438 | drm_vblank_put(rdev->ddev, i); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | /* update display watermarks based on new power state */ | ||
443 | radeon_update_bandwidth_info(rdev); | ||
444 | if (rdev->pm.active_crtc_count) | ||
445 | radeon_bandwidth_update(rdev); | ||
446 | |||
447 | mutex_unlock(&rdev->cp.mutex); | ||
448 | } | ||
449 | 423 | ||
450 | static void radeon_pm_idle_work_handler(struct work_struct *work) | 424 | static void radeon_pm_idle_work_handler(struct work_struct *work) |
451 | { | 425 | { |
@@ -493,7 +467,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
493 | 467 | ||
494 | if (rdev->pm.planned_action != PM_ACTION_NONE && | 468 | if (rdev->pm.planned_action != PM_ACTION_NONE && |
495 | jiffies > rdev->pm.action_timeout) { | 469 | jiffies > rdev->pm.action_timeout) { |
496 | radeon_pm_set_clocks(rdev); | 470 | radeon_pm_set_clocks(rdev, false); |
497 | } | 471 | } |
498 | } | 472 | } |
499 | mutex_unlock(&rdev->pm.mutex); | 473 | mutex_unlock(&rdev->pm.mutex); |