aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_pm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
1 files changed, 397 insertions, 2 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc320..d4d1c39a0e99 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
18 * OTHER DEALINGS IN THE SOFTWARE. 18 * OTHER DEALINGS IN THE SOFTWARE.
19 * 19 *
20 * Authors: Rafał Miłecki <zajec5@gmail.com> 20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
21 */ 22 */
22#include "drmP.h" 23#include "drmP.h"
23#include "radeon.h" 24#include "radeon.h"
25#include "avivod.h"
24 26
25int radeon_debugfs_pm_init(struct radeon_device *rdev); 27#define RADEON_IDLE_LOOP_MS 100
28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work);
34static int radeon_debugfs_pm_init(struct radeon_device *rdev);
35
36static const char *pm_state_names[4] = {
37 "PM_STATE_DISABLED",
38 "PM_STATE_MINIMUM",
39 "PM_STATE_PAUSED",
40 "PM_STATE_ACTIVE"
41};
42
43static const char *pm_state_types[5] = {
44 "Default",
45 "Powersave",
46 "Battery",
47 "Balanced",
48 "Performance",
49};
50
51static void radeon_print_power_mode_info(struct radeon_device *rdev)
52{
53 int i, j;
54 bool is_default;
55
56 DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
59 is_default = true;
60 else
61 is_default = false;
62 DRM_INFO("State %d %s %s\n", i,
63 pm_state_types[rdev->pm.power_state[i].type],
64 is_default ? "(default)" : "");
65 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
66 DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
67 DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
68 for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
69 if (rdev->flags & RADEON_IS_IGP)
70 DRM_INFO("\t\t%d engine: %d\n",
71 j,
72 rdev->pm.power_state[i].clock_info[j].sclk * 10);
73 else
74 DRM_INFO("\t\t%d engine/memory: %d/%d\n",
75 j,
76 rdev->pm.power_state[i].clock_info[j].sclk * 10,
77 rdev->pm.power_state[i].clock_info[j].mclk * 10);
78 }
79 }
80}
81
82static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
83 enum radeon_pm_state_type type)
84{
85 int i, j;
86 enum radeon_pm_state_type wanted_types[2];
87 int wanted_count;
88
89 switch (type) {
90 case POWER_STATE_TYPE_DEFAULT:
91 default:
92 return rdev->pm.default_power_state;
93 case POWER_STATE_TYPE_POWERSAVE:
94 if (rdev->flags & RADEON_IS_MOBILITY) {
95 wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
96 wanted_types[1] = POWER_STATE_TYPE_BATTERY;
97 wanted_count = 2;
98 } else {
99 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
100 wanted_count = 1;
101 }
102 break;
103 case POWER_STATE_TYPE_BATTERY:
104 if (rdev->flags & RADEON_IS_MOBILITY) {
105 wanted_types[0] = POWER_STATE_TYPE_BATTERY;
106 wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
107 wanted_count = 2;
108 } else {
109 wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
110 wanted_count = 1;
111 }
112 break;
113 case POWER_STATE_TYPE_BALANCED:
114 case POWER_STATE_TYPE_PERFORMANCE:
115 wanted_types[0] = type;
116 wanted_count = 1;
117 break;
118 }
119
120 for (i = 0; i < wanted_count; i++) {
121 for (j = 0; j < rdev->pm.num_power_states; j++) {
122 if (rdev->pm.power_state[j].type == wanted_types[i])
123 return &rdev->pm.power_state[j];
124 }
125 }
126
127 return rdev->pm.default_power_state;
128}
129
130static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
131 struct radeon_power_state *power_state,
132 enum radeon_pm_clock_mode_type type)
133{
134 switch (type) {
135 case POWER_MODE_TYPE_DEFAULT:
136 default:
137 return power_state->default_clock_mode;
138 case POWER_MODE_TYPE_LOW:
139 return &power_state->clock_info[0];
140 case POWER_MODE_TYPE_MID:
141 if (power_state->num_clock_modes > 2)
142 return &power_state->clock_info[1];
143 else
144 return &power_state->clock_info[0];
145 break;
146 case POWER_MODE_TYPE_HIGH:
147 return &power_state->clock_info[power_state->num_clock_modes - 1];
148 }
149
150}
151
152static void radeon_get_power_state(struct radeon_device *rdev,
153 enum radeon_pm_action action)
154{
155 switch (action) {
156 case PM_ACTION_MINIMUM:
157 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
158 rdev->pm.requested_clock_mode =
159 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
160 break;
161 case PM_ACTION_DOWNCLOCK:
162 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
163 rdev->pm.requested_clock_mode =
164 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
165 break;
166 case PM_ACTION_UPCLOCK:
167 rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
168 rdev->pm.requested_clock_mode =
169 radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
170 break;
171 case PM_ACTION_NONE:
172 default:
173 DRM_ERROR("Requested mode for not defined action\n");
174 return;
175 }
176 DRM_INFO("Requested: e: %d m: %d p: %d\n",
177 rdev->pm.requested_clock_mode->sclk,
178 rdev->pm.requested_clock_mode->mclk,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180}
181
182static void radeon_set_power_state(struct radeon_device *rdev)
183{
184 /* if *_clock_mode are the same, *_power_state are as well */
185 if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
186 return;
187
188 DRM_INFO("Setting: e: %d m: %d p: %d\n",
189 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
192 /* set pcie lanes */
193 /* set voltage */
194 /* set engine clock */
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
196 /* set memory clock */
197
198 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
200}
26 201
27int radeon_pm_init(struct radeon_device *rdev) 202int radeon_pm_init(struct radeon_device *rdev)
28{ 203{
204 rdev->pm.state = PM_STATE_DISABLED;
205 rdev->pm.planned_action = PM_ACTION_NONE;
206 rdev->pm.downclocked = false;
207
208 if (rdev->bios) {
209 if (rdev->is_atom_bios)
210 radeon_atombios_get_power_modes(rdev);
211 else
212 radeon_combios_get_power_modes(rdev);
213 radeon_print_power_mode_info(rdev);
214 }
215
29 if (radeon_debugfs_pm_init(rdev)) { 216 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for PM!\n"); 217 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 218 }
32 219
220 INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
221
222 if (radeon_dynpm != -1 && radeon_dynpm) {
223 rdev->pm.state = PM_STATE_PAUSED;
224 DRM_INFO("radeon: dynamic power management enabled\n");
225 }
226
227 DRM_INFO("radeon: power management initialized\n");
228
33 return 0; 229 return 0;
34} 230}
35 231
232void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{
234 struct drm_device *ddev = rdev->ddev;
235 struct drm_connector *connector;
236 struct radeon_crtc *radeon_crtc;
237 int count = 0;
238
239 if (rdev->pm.state == PM_STATE_DISABLED)
240 return;
241
242 mutex_lock(&rdev->pm.mutex);
243
244 rdev->pm.active_crtcs = 0;
245 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count;
252 }
253 }
254
255 if (count > 1) {
256 if (rdev->pm.state == PM_STATE_ACTIVE) {
257 cancel_delayed_work(&rdev->pm.idle_work);
258
259 rdev->pm.state = PM_STATE_PAUSED;
260 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
261 if (rdev->pm.downclocked)
262 radeon_pm_set_clocks(rdev);
263
264 DRM_DEBUG("radeon: dynamic power management deactivated\n");
265 }
266 } else if (count == 1) {
267 /* TODO: Increase clocks if needed for current mode */
268
269 if (rdev->pm.state == PM_STATE_MINIMUM) {
270 rdev->pm.state = PM_STATE_ACTIVE;
271 rdev->pm.planned_action = PM_ACTION_UPCLOCK;
272 radeon_pm_set_clocks(rdev);
273
274 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
275 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
276 }
277 else if (rdev->pm.state == PM_STATE_PAUSED) {
278 rdev->pm.state = PM_STATE_ACTIVE;
279 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
280 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
281 DRM_DEBUG("radeon: dynamic power management activated\n");
282 }
283 }
284 else { /* count == 0 */
285 if (rdev->pm.state != PM_STATE_MINIMUM) {
286 cancel_delayed_work(&rdev->pm.idle_work);
287
288 rdev->pm.state = PM_STATE_MINIMUM;
289 rdev->pm.planned_action = PM_ACTION_MINIMUM;
290 radeon_pm_set_clocks(rdev);
291 }
292 }
293
294 mutex_unlock(&rdev->pm.mutex);
295}
296
297static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
298{
299 u32 stat_crtc1 = 0, stat_crtc2 = 0;
300 bool in_vbl = true;
301
302 if (ASIC_IS_AVIVO(rdev)) {
303 if (rdev->pm.active_crtcs & (1 << 0)) {
304 stat_crtc1 = RREG32(D1CRTC_STATUS);
305 if (!(stat_crtc1 & 1))
306 in_vbl = false;
307 }
308 if (rdev->pm.active_crtcs & (1 << 1)) {
309 stat_crtc2 = RREG32(D2CRTC_STATUS);
310 if (!(stat_crtc2 & 1))
311 in_vbl = false;
312 }
313 }
314 if (in_vbl == false)
315 DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
316 stat_crtc2, finish ? "exit" : "entry");
317 return in_vbl;
318}
319static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
320{
321 /*radeon_fence_wait_last(rdev);*/
322 switch (rdev->pm.planned_action) {
323 case PM_ACTION_UPCLOCK:
324 rdev->pm.downclocked = false;
325 break;
326 case PM_ACTION_DOWNCLOCK:
327 rdev->pm.downclocked = true;
328 break;
329 case PM_ACTION_MINIMUM:
330 break;
331 case PM_ACTION_NONE:
332 DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
333 break;
334 }
335
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE;
341}
342
343static void radeon_pm_set_clocks(struct radeon_device *rdev)
344{
345 radeon_get_power_state(rdev, rdev->pm.planned_action);
346 mutex_lock(&rdev->cp.mutex);
347
348 if (rdev->pm.active_crtcs & (1 << 0)) {
349 rdev->pm.req_vblank |= (1 << 0);
350 drm_vblank_get(rdev->ddev, 0);
351 }
352 if (rdev->pm.active_crtcs & (1 << 1)) {
353 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1);
355 }
356 if (rdev->pm.active_crtcs)
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0);
363 }
364 if (rdev->pm.req_vblank & (1 << 1)) {
365 rdev->pm.req_vblank &= ~(1 << 1);
366 drm_vblank_put(rdev->ddev, 1);
367 }
368
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex);
371}
372
373static void radeon_pm_idle_work_handler(struct work_struct *work)
374{
375 struct radeon_device *rdev;
376 rdev = container_of(work, struct radeon_device,
377 pm.idle_work.work);
378
379 mutex_lock(&rdev->pm.mutex);
380 if (rdev->pm.state == PM_STATE_ACTIVE) {
381 unsigned long irq_flags;
382 int not_processed = 0;
383
384 read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
385 if (!list_empty(&rdev->fence_drv.emited)) {
386 struct list_head *ptr;
387 list_for_each(ptr, &rdev->fence_drv.emited) {
388 /* count up to 3, that's enought info */
389 if (++not_processed >= 3)
390 break;
391 }
392 }
393 read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
394
395 if (not_processed >= 3) { /* should upclock */
396 if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
397 rdev->pm.planned_action = PM_ACTION_NONE;
398 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
399 rdev->pm.downclocked) {
400 rdev->pm.planned_action =
401 PM_ACTION_UPCLOCK;
402 rdev->pm.action_timeout = jiffies +
403 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
404 }
405 } else if (not_processed == 0) { /* should downclock */
406 if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
407 rdev->pm.planned_action = PM_ACTION_NONE;
408 } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
409 !rdev->pm.downclocked) {
410 rdev->pm.planned_action =
411 PM_ACTION_DOWNCLOCK;
412 rdev->pm.action_timeout = jiffies +
413 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
414 }
415 }
416
417 if (rdev->pm.planned_action != PM_ACTION_NONE &&
418 jiffies > rdev->pm.action_timeout) {
419 radeon_pm_set_clocks(rdev);
420 }
421 }
422 mutex_unlock(&rdev->pm.mutex);
423
424 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
425 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
426}
427
36/* 428/*
37 * Debugfs info 429 * Debugfs info
38 */ 430 */
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 436 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 437 struct radeon_device *rdev = dev->dev_private;
46 438
439 seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
47 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); 440 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
48 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 441 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
49 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); 442 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
50 if (rdev->asic->get_memory_clock) 443 if (rdev->asic->get_memory_clock)
51 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 444 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
445 if (rdev->asic->get_pcie_lanes)
446 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
52 447
53 return 0; 448 return 0;
54} 449}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
58}; 453};
59#endif 454#endif
60 455
61int radeon_debugfs_pm_init(struct radeon_device *rdev) 456static int radeon_debugfs_pm_init(struct radeon_device *rdev)
62{ 457{
63#if defined(CONFIG_DEBUG_FS) 458#if defined(CONFIG_DEBUG_FS)
64 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); 459 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));