aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_pm.c
diff options
context:
space:
mode:
authorRafał Miłecki <zajec5@gmail.com>2009-12-22 17:02:16 -0500
committerDave Airlie <airlied@redhat.com>2010-02-08 18:32:26 -0500
commitc913e23a145ae07b6f9f88aae8cd5ad06b5729ff (patch)
treeaebde8632096f5b24f0a1592800e1ec1e7926c0b /drivers/gpu/drm/radeon/radeon_pm.c
parent18917b60bca094e7830e4046e336d024f73f1c90 (diff)
drm/radeon/kms: add dynamic engine reclocking (V9)
V2: reorganize functions, fix modesetting calls V3: rebase patch, use radeon's workqueue V4: enable on tested chipsets only, request VBLANK IRQs V5: enable PM on older hardware (IRQs, mode_fixup, dpms) V6: use separate dynpm module parameter V7: drop RADEON_ prefix, set minimum mode for dpms off V8: update legacy encoder call, fix order in rs600 IRQ V9: update compute_clocks call in legacy, not only DPMS_OFF Signed-off-by: Rafał Miłecki <zajec5@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_pm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c241
1 files changed, 239 insertions, 2 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..a9c61f435c06 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -22,17 +22,253 @@
22#include "drmP.h" 22#include "drmP.h"
23#include "radeon.h" 23#include "radeon.h"
24 24
25int radeon_debugfs_pm_init(struct radeon_device *rdev); 25#define RADEON_IDLE_LOOP_MS 100
26#define RADEON_RECLOCK_DELAY_MS 200
27
28static void radeon_pm_check_limits(struct radeon_device *rdev);
29static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
30static void radeon_pm_set_clocks(struct radeon_device *rdev);
31static void radeon_pm_reclock_work_handler(struct work_struct *work);
32static void radeon_pm_idle_work_handler(struct work_struct *work);
33static int radeon_debugfs_pm_init(struct radeon_device *rdev);
34
35static const char *pm_state_names[4] = {
36 "PM_STATE_DISABLED",
37 "PM_STATE_MINIMUM",
38 "PM_STATE_PAUSED",
39 "PM_STATE_ACTIVE"
40};
26 41
27int radeon_pm_init(struct radeon_device *rdev) 42int radeon_pm_init(struct radeon_device *rdev)
28{ 43{
44 rdev->pm.state = PM_STATE_DISABLED;
45 rdev->pm.planned_action = PM_ACTION_NONE;
46 rdev->pm.downclocked = false;
47 rdev->pm.vblank_callback = false;
48
49 radeon_pm_check_limits(rdev);
50
29 if (radeon_debugfs_pm_init(rdev)) { 51 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for PM!\n"); 52 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 53 }
32 54
55 INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler);
56 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
57
58 if (radeon_dynpm != -1 && radeon_dynpm) {
59 rdev->pm.state = PM_STATE_PAUSED;
60 DRM_INFO("radeon: dynamic power management enabled\n");
61 }
62
63 DRM_INFO("radeon: power management initialized\n");
64
33 return 0; 65 return 0;
34} 66}
35 67
68static void radeon_pm_check_limits(struct radeon_device *rdev)
69{
70 rdev->pm.min_gpu_engine_clock = rdev->clock.default_sclk - 5000;
71 rdev->pm.min_gpu_memory_clock = rdev->clock.default_mclk - 5000;
72}
73
74void radeon_pm_compute_clocks(struct radeon_device *rdev)
75{
76 struct drm_device *ddev = rdev->ddev;
77 struct drm_connector *connector;
78 struct radeon_crtc *radeon_crtc;
79 int count = 0;
80
81 if (rdev->pm.state == PM_STATE_DISABLED)
82 return;
83
84 mutex_lock(&rdev->pm.mutex);
85
86 rdev->pm.active_crtcs = 0;
87 list_for_each_entry(connector,
88 &ddev->mode_config.connector_list, head) {
89 if (connector->encoder &&
90 connector->dpms != DRM_MODE_DPMS_OFF) {
91 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
92 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
93 ++count;
94 }
95 }
96
97 if (count > 1) {
98 if (rdev->pm.state == PM_STATE_ACTIVE) {
99 wait_queue_head_t wait;
100 init_waitqueue_head(&wait);
101
102 cancel_delayed_work(&rdev->pm.idle_work);
103
104 rdev->pm.state = PM_STATE_PAUSED;
105 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
106 rdev->pm.vblank_callback = true;
107
108 mutex_unlock(&rdev->pm.mutex);
109
110 wait_event_timeout(wait, !rdev->pm.downclocked,
111 msecs_to_jiffies(300));
112 if (!rdev->pm.downclocked)
113 radeon_pm_set_clocks(rdev);
114
115 DRM_DEBUG("radeon: dynamic power management deactivated\n");
116 } else {
117 mutex_unlock(&rdev->pm.mutex);
118 }
119 } else if (count == 1) {
120 rdev->pm.min_mode_engine_clock = rdev->pm.min_gpu_engine_clock;
121 rdev->pm.min_mode_memory_clock = rdev->pm.min_gpu_memory_clock;
122 /* TODO: Increase clocks if needed for current mode */
123
124 if (rdev->pm.state == PM_STATE_MINIMUM) {
125 rdev->pm.state = PM_STATE_ACTIVE;
126 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
127 radeon_pm_set_clocks_locked(rdev);
128
129 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
130 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
131 }
132 else if (rdev->pm.state == PM_STATE_PAUSED) {
133 rdev->pm.state = PM_STATE_ACTIVE;
134 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
135 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
136 DRM_DEBUG("radeon: dynamic power management activated\n");
137 }
138
139 mutex_unlock(&rdev->pm.mutex);
140 }
141 else { /* count == 0 */
142 if (rdev->pm.state != PM_STATE_MINIMUM) {
143 cancel_delayed_work(&rdev->pm.idle_work);
144
145 rdev->pm.state = PM_STATE_MINIMUM;
146 rdev->pm.planned_action = PM_ACTION_MINIMUM;
147 radeon_pm_set_clocks_locked(rdev);
148 }
149
150 mutex_unlock(&rdev->pm.mutex);
151 }
152}
153
154static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
155{
156 /*radeon_fence_wait_last(rdev);*/
157 switch (rdev->pm.planned_action) {
158 case PM_ACTION_UPCLOCK:
159 radeon_set_engine_clock(rdev, rdev->clock.default_sclk);
160 rdev->pm.downclocked = false;
161 break;
162 case PM_ACTION_DOWNCLOCK:
163 radeon_set_engine_clock(rdev,
164 rdev->pm.min_mode_engine_clock);
165 rdev->pm.downclocked = true;
166 break;
167 case PM_ACTION_MINIMUM:
168 radeon_set_engine_clock(rdev,
169 rdev->pm.min_gpu_engine_clock);
170 break;
171 case PM_ACTION_NONE:
172 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
173 break;
174 }
175
176 rdev->pm.planned_action = PM_ACTION_NONE;
177}
178
179static void radeon_pm_set_clocks(struct radeon_device *rdev)
180{
181 mutex_lock(&rdev->pm.mutex);
182 /* new VBLANK irq may come before handling previous one */
183 if (rdev->pm.vblank_callback) {
184 mutex_lock(&rdev->cp.mutex);
185 if (rdev->pm.req_vblank & (1 << 0)) {
186 rdev->pm.req_vblank &= ~(1 << 0);
187 drm_vblank_put(rdev->ddev, 0);
188 }
189 if (rdev->pm.req_vblank & (1 << 1)) {
190 rdev->pm.req_vblank &= ~(1 << 1);
191 drm_vblank_put(rdev->ddev, 1);
192 }
193 rdev->pm.vblank_callback = false;
194 radeon_pm_set_clocks_locked(rdev);
195 mutex_unlock(&rdev->cp.mutex);
196 }
197 mutex_unlock(&rdev->pm.mutex);
198}
199
200static void radeon_pm_reclock_work_handler(struct work_struct *work)
201{
202 struct radeon_device *rdev;
203 rdev = container_of(work, struct radeon_device,
204 pm.reclock_work);
205 radeon_pm_set_clocks(rdev);
206}
207
208static void radeon_pm_idle_work_handler(struct work_struct *work)
209{
210 struct radeon_device *rdev;
211 rdev = container_of(work, struct radeon_device,
212 pm.idle_work.work);
213
214 mutex_lock(&rdev->pm.mutex);
215 if (rdev->pm.state == PM_STATE_ACTIVE &&
216 !rdev->pm.vblank_callback) {
217 unsigned long irq_flags;
218 int not_processed = 0;
219
220 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
221 if (!list_empty(&rdev->fence_drv.emited)) {
222 struct list_head *ptr;
223 list_for_each(ptr, &rdev->fence_drv.emited) {
224 /* count up to 3, that's enought info */
225 if (++not_processed >= 3)
226 break;
227 }
228 }
229 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
230
231 if (not_processed >= 3) { /* should upclock */
232 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
233 rdev->pm.planned_action = PM_ACTION_NONE;
234 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
235 rdev->pm.downclocked) {
236 rdev->pm.planned_action =
237 PM_ACTION_UPCLOCK;
238 rdev->pm.action_timeout = jiffies +
239 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
240 }
241 } else if (not_processed == 0) { /* should downclock */
242 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
243 rdev->pm.planned_action = PM_ACTION_NONE;
244 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
245 !rdev->pm.downclocked) {
246 rdev->pm.planned_action =
247 PM_ACTION_DOWNCLOCK;
248 rdev->pm.action_timeout = jiffies +
249 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
250 }
251 }
252
253 if (rdev->pm.planned_action != PM_ACTION_NONE &&
254 jiffies > rdev->pm.action_timeout) {
255 if (rdev->pm.active_crtcs & (1 << 0)) {
256 rdev->pm.req_vblank |= (1 << 0);
257 drm_vblank_get(rdev->ddev, 0);
258 }
259 if (rdev->pm.active_crtcs & (1 << 1)) {
260 rdev->pm.req_vblank |= (1 << 1);
261 drm_vblank_get(rdev->ddev, 1);
262 }
263 rdev->pm.vblank_callback = true;
264 }
265 }
266 mutex_unlock(&rdev->pm.mutex);
267
268 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
269 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
270}
271
36/* 272/*
37 * Debugfs info 273 * Debugfs info
38 */ 274 */
@@ -44,6 +280,7 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 280 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 281 struct radeon_device *rdev = dev->dev_private;
46 282
283 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 284 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 285 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 286 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
@@ -58,7 +295,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
58}; 295};
59#endif 296#endif
60 297
61int radeon_debugfs_pm_init(struct radeon_device *rdev) 298static int radeon_debugfs_pm_init(struct radeon_device *rdev)
62{ 299{
63#if defined(CONFIG_DEBUG_FS) 300#if defined(CONFIG_DEBUG_FS)
64 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 301 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));