diff options
author | Alex Deucher <alexdeucher@gmail.com> | 2010-05-07 15:10:16 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-05-18 04:21:52 -0400 |
commit | ce8f53709bf440100cb9d31b1303291551cf517f (patch) | |
tree | 6785882522ae04486ae900b5c8dbc700dadad2f3 /drivers/gpu/drm/radeon/radeon_pm.c | |
parent | d7311171c4cc8d6231427f7ac5056b939a184b80 (diff) |
drm/radeon/kms/pm: rework power management
- Separate dynpm and profile based power management methods. You can select the pm method
by echoing the selected method ("dynpm" or "profile") to power_method in sysfs.
- Expose basic 4 profile in profile method
"default" - default clocks
"auto" - select between low and high based on ac/dc state
"low" - DC, low power mode
"high" - AC, performance mode
The current base profile is "default", but it should switched to "auto" once we've tested
on more systems. Switching the state is a matter of echoing the requested profile to
power_profile in sysfs. The lowest power states are selected automatically when dpms turns
the monitors off in all states but default.
- Remove dynamic fence-based reclocking for the moment. We can revisit this later once we
have basic pm in.
- Move pm init/fini to modesetting path. pm is tightly coupled with display state. Make sure
display side is initialized before pm.
- Add pm suspend/resume functions to make sure pm state is properly reinitialized on resume.
- Remove dynpm module option. It's now selectable via sysfs.
Signed-off-by: Alex Deucher <alexdeucher@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_pm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 628 |
1 files changed, 383 insertions, 245 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 0dfa508fe5f2..1827317704a2 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -23,14 +23,98 @@ | |||
23 | #include "drmP.h" | 23 | #include "drmP.h" |
24 | #include "radeon.h" | 24 | #include "radeon.h" |
25 | #include "avivod.h" | 25 | #include "avivod.h" |
26 | #ifdef CONFIG_ACPI | ||
27 | #include <linux/acpi.h> | ||
28 | #endif | ||
29 | #include <linux/power_supply.h> | ||
26 | 30 | ||
27 | #define RADEON_IDLE_LOOP_MS 100 | 31 | #define RADEON_IDLE_LOOP_MS 100 |
28 | #define RADEON_RECLOCK_DELAY_MS 200 | 32 | #define RADEON_RECLOCK_DELAY_MS 200 |
29 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | 33 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 |
30 | #define RADEON_WAIT_IDLE_TIMEOUT 200 | 34 | #define RADEON_WAIT_IDLE_TIMEOUT 200 |
31 | 35 | ||
32 | static void radeon_pm_idle_work_handler(struct work_struct *work); | 36 | static void radeon_dynpm_idle_work_handler(struct work_struct *work); |
33 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | 37 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); |
38 | static bool radeon_pm_in_vbl(struct radeon_device *rdev); | ||
39 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); | ||
40 | static void radeon_pm_update_profile(struct radeon_device *rdev); | ||
41 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | ||
42 | |||
43 | #define ACPI_AC_CLASS "ac_adapter" | ||
44 | |||
45 | #ifdef CONFIG_ACPI | ||
46 | static int radeon_acpi_event(struct notifier_block *nb, | ||
47 | unsigned long val, | ||
48 | void *data) | ||
49 | { | ||
50 | struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb); | ||
51 | struct acpi_bus_event *entry = (struct acpi_bus_event *)data; | ||
52 | |||
53 | if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { | ||
54 | if (power_supply_is_system_supplied() > 0) | ||
55 | DRM_INFO("pm: AC\n"); | ||
56 | else | ||
57 | DRM_INFO("pm: DC\n"); | ||
58 | |||
59 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | ||
60 | if (rdev->pm.profile == PM_PROFILE_AUTO) { | ||
61 | mutex_lock(&rdev->pm.mutex); | ||
62 | radeon_pm_update_profile(rdev); | ||
63 | radeon_pm_set_clocks(rdev); | ||
64 | mutex_unlock(&rdev->pm.mutex); | ||
65 | } | ||
66 | } | ||
67 | } | ||
68 | |||
69 | return NOTIFY_OK; | ||
70 | } | ||
71 | #endif | ||
72 | |||
73 | static void radeon_pm_update_profile(struct radeon_device *rdev) | ||
74 | { | ||
75 | switch (rdev->pm.profile) { | ||
76 | case PM_PROFILE_DEFAULT: | ||
77 | rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; | ||
78 | break; | ||
79 | case PM_PROFILE_AUTO: | ||
80 | if (power_supply_is_system_supplied() > 0) { | ||
81 | if (rdev->pm.active_crtc_count > 1) | ||
82 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; | ||
83 | else | ||
84 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; | ||
85 | } else { | ||
86 | if (rdev->pm.active_crtc_count > 1) | ||
87 | rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; | ||
88 | else | ||
89 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | ||
90 | } | ||
91 | break; | ||
92 | case PM_PROFILE_LOW: | ||
93 | if (rdev->pm.active_crtc_count > 1) | ||
94 | rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; | ||
95 | else | ||
96 | rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; | ||
97 | break; | ||
98 | case PM_PROFILE_HIGH: | ||
99 | if (rdev->pm.active_crtc_count > 1) | ||
100 | rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; | ||
101 | else | ||
102 | rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | if (rdev->pm.active_crtc_count == 0) { | ||
107 | rdev->pm.requested_power_state_index = | ||
108 | rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; | ||
109 | rdev->pm.requested_clock_mode_index = | ||
110 | rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; | ||
111 | } else { | ||
112 | rdev->pm.requested_power_state_index = | ||
113 | rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; | ||
114 | rdev->pm.requested_clock_mode_index = | ||
115 | rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; | ||
116 | } | ||
117 | } | ||
34 | 118 | ||
35 | static void radeon_unmap_vram_bos(struct radeon_device *rdev) | 119 | static void radeon_unmap_vram_bos(struct radeon_device *rdev) |
36 | { | 120 | { |
@@ -54,12 +138,93 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev) | |||
54 | ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); | 138 | ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo); |
55 | } | 139 | } |
56 | 140 | ||
57 | static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) | 141 | static void radeon_sync_with_vblank(struct radeon_device *rdev) |
58 | { | 142 | { |
59 | int i; | 143 | if (rdev->pm.active_crtcs) { |
144 | rdev->pm.vblank_sync = false; | ||
145 | wait_event_timeout( | ||
146 | rdev->irq.vblank_queue, rdev->pm.vblank_sync, | ||
147 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static void radeon_set_power_state(struct radeon_device *rdev) | ||
152 | { | ||
153 | u32 sclk, mclk; | ||
154 | |||
155 | if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && | ||
156 | (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) | ||
157 | return; | ||
158 | |||
159 | if (radeon_gui_idle(rdev)) { | ||
160 | sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
161 | clock_info[rdev->pm.requested_clock_mode_index].sclk; | ||
162 | if (sclk > rdev->clock.default_sclk) | ||
163 | sclk = rdev->clock.default_sclk; | ||
164 | |||
165 | mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. | ||
166 | clock_info[rdev->pm.requested_clock_mode_index].mclk; | ||
167 | if (mclk > rdev->clock.default_mclk) | ||
168 | mclk = rdev->clock.default_mclk; | ||
169 | |||
170 | /* voltage, pcie lanes, etc.*/ | ||
171 | radeon_pm_misc(rdev); | ||
172 | |||
173 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
174 | radeon_sync_with_vblank(rdev); | ||
175 | |||
176 | if (!radeon_pm_in_vbl(rdev)) | ||
177 | return; | ||
178 | |||
179 | radeon_pm_prepare(rdev); | ||
180 | /* set engine clock */ | ||
181 | if (sclk != rdev->pm.current_sclk) { | ||
182 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
183 | radeon_set_engine_clock(rdev, sclk); | ||
184 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
185 | rdev->pm.current_sclk = sclk; | ||
186 | DRM_INFO("Setting: e: %d\n", sclk); | ||
187 | } | ||
188 | |||
189 | /* set memory clock */ | ||
190 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
191 | radeon_pm_debug_check_in_vbl(rdev, false); | ||
192 | radeon_set_memory_clock(rdev, mclk); | ||
193 | radeon_pm_debug_check_in_vbl(rdev, true); | ||
194 | rdev->pm.current_mclk = mclk; | ||
195 | DRM_INFO("Setting: m: %d\n", mclk); | ||
196 | } | ||
197 | radeon_pm_finish(rdev); | ||
198 | } else { | ||
199 | /* set engine clock */ | ||
200 | if (sclk != rdev->pm.current_sclk) { | ||
201 | radeon_sync_with_vblank(rdev); | ||
202 | radeon_pm_prepare(rdev); | ||
203 | radeon_set_engine_clock(rdev, sclk); | ||
204 | radeon_pm_finish(rdev); | ||
205 | rdev->pm.current_sclk = sclk; | ||
206 | DRM_INFO("Setting: e: %d\n", sclk); | ||
207 | } | ||
208 | /* set memory clock */ | ||
209 | if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) { | ||
210 | radeon_sync_with_vblank(rdev); | ||
211 | radeon_pm_prepare(rdev); | ||
212 | radeon_set_memory_clock(rdev, mclk); | ||
213 | radeon_pm_finish(rdev); | ||
214 | rdev->pm.current_mclk = mclk; | ||
215 | DRM_INFO("Setting: m: %d\n", mclk); | ||
216 | } | ||
217 | } | ||
60 | 218 | ||
61 | if (rdev->pm.state != PM_STATE_DISABLED) | 219 | rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; |
62 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 220 | rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; |
221 | } else | ||
222 | DRM_INFO("pm: GUI not idle!!!\n"); | ||
223 | } | ||
224 | |||
225 | static void radeon_pm_set_clocks(struct radeon_device *rdev) | ||
226 | { | ||
227 | int i; | ||
63 | 228 | ||
64 | mutex_lock(&rdev->ddev->struct_mutex); | 229 | mutex_lock(&rdev->ddev->struct_mutex); |
65 | mutex_lock(&rdev->vram_mutex); | 230 | mutex_lock(&rdev->vram_mutex); |
@@ -67,27 +232,31 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) | |||
67 | 232 | ||
68 | /* gui idle int has issues on older chips it seems */ | 233 | /* gui idle int has issues on older chips it seems */ |
69 | if (rdev->family >= CHIP_R600) { | 234 | if (rdev->family >= CHIP_R600) { |
70 | /* wait for GPU idle */ | 235 | if (rdev->irq.installed) { |
71 | rdev->pm.gui_idle = false; | 236 | /* wait for GPU idle */ |
72 | rdev->irq.gui_idle = true; | 237 | rdev->pm.gui_idle = false; |
73 | radeon_irq_set(rdev); | 238 | rdev->irq.gui_idle = true; |
74 | wait_event_interruptible_timeout( | 239 | radeon_irq_set(rdev); |
75 | rdev->irq.idle_queue, rdev->pm.gui_idle, | 240 | wait_event_interruptible_timeout( |
76 | msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); | 241 | rdev->irq.idle_queue, rdev->pm.gui_idle, |
77 | rdev->irq.gui_idle = false; | 242 | msecs_to_jiffies(RADEON_WAIT_IDLE_TIMEOUT)); |
78 | radeon_irq_set(rdev); | 243 | rdev->irq.gui_idle = false; |
244 | radeon_irq_set(rdev); | ||
245 | } | ||
79 | } else { | 246 | } else { |
80 | struct radeon_fence *fence; | 247 | if (rdev->cp.ready) { |
81 | radeon_ring_alloc(rdev, 64); | 248 | struct radeon_fence *fence; |
82 | radeon_fence_create(rdev, &fence); | 249 | radeon_ring_alloc(rdev, 64); |
83 | radeon_fence_emit(rdev, fence); | 250 | radeon_fence_create(rdev, &fence); |
84 | radeon_ring_commit(rdev); | 251 | radeon_fence_emit(rdev, fence); |
85 | radeon_fence_wait(fence, false); | 252 | radeon_ring_commit(rdev); |
86 | radeon_fence_unref(&fence); | 253 | radeon_fence_wait(fence, false); |
254 | radeon_fence_unref(&fence); | ||
255 | } | ||
87 | } | 256 | } |
88 | radeon_unmap_vram_bos(rdev); | 257 | radeon_unmap_vram_bos(rdev); |
89 | 258 | ||
90 | if (!static_switch) { | 259 | if (rdev->irq.installed) { |
91 | for (i = 0; i < rdev->num_crtc; i++) { | 260 | for (i = 0; i < rdev->num_crtc; i++) { |
92 | if (rdev->pm.active_crtcs & (1 << i)) { | 261 | if (rdev->pm.active_crtcs & (1 << i)) { |
93 | rdev->pm.req_vblank |= (1 << i); | 262 | rdev->pm.req_vblank |= (1 << i); |
@@ -96,9 +265,9 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) | |||
96 | } | 265 | } |
97 | } | 266 | } |
98 | 267 | ||
99 | radeon_set_power_state(rdev, static_switch); | 268 | radeon_set_power_state(rdev); |
100 | 269 | ||
101 | if (!static_switch) { | 270 | if (rdev->irq.installed) { |
102 | for (i = 0; i < rdev->num_crtc; i++) { | 271 | for (i = 0; i < rdev->num_crtc; i++) { |
103 | if (rdev->pm.req_vblank & (1 << i)) { | 272 | if (rdev->pm.req_vblank & (1 << i)) { |
104 | rdev->pm.req_vblank &= ~(1 << i); | 273 | rdev->pm.req_vblank &= ~(1 << i); |
@@ -112,230 +281,195 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch) | |||
112 | if (rdev->pm.active_crtc_count) | 281 | if (rdev->pm.active_crtc_count) |
113 | radeon_bandwidth_update(rdev); | 282 | radeon_bandwidth_update(rdev); |
114 | 283 | ||
115 | rdev->pm.planned_action = PM_ACTION_NONE; | 284 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
116 | 285 | ||
117 | mutex_unlock(&rdev->cp.mutex); | 286 | mutex_unlock(&rdev->cp.mutex); |
118 | mutex_unlock(&rdev->vram_mutex); | 287 | mutex_unlock(&rdev->vram_mutex); |
119 | mutex_unlock(&rdev->ddev->struct_mutex); | 288 | mutex_unlock(&rdev->ddev->struct_mutex); |
120 | } | 289 | } |
121 | 290 | ||
122 | static ssize_t radeon_get_power_state_static(struct device *dev, | 291 | static ssize_t radeon_get_pm_profile(struct device *dev, |
123 | struct device_attribute *attr, | 292 | struct device_attribute *attr, |
124 | char *buf) | 293 | char *buf) |
125 | { | 294 | { |
126 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 295 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
127 | struct radeon_device *rdev = ddev->dev_private; | 296 | struct radeon_device *rdev = ddev->dev_private; |
297 | int cp = rdev->pm.profile; | ||
128 | 298 | ||
129 | return snprintf(buf, PAGE_SIZE, "%d.%d\n", rdev->pm.current_power_state_index, | 299 | return snprintf(buf, PAGE_SIZE, "%s\n", |
130 | rdev->pm.current_clock_mode_index); | 300 | (cp == PM_PROFILE_AUTO) ? "auto" : |
301 | (cp == PM_PROFILE_LOW) ? "low" : | ||
302 | (cp == PM_PROFILE_HIGH) ? "high" : "default"); | ||
131 | } | 303 | } |
132 | 304 | ||
133 | static ssize_t radeon_set_power_state_static(struct device *dev, | 305 | static ssize_t radeon_set_pm_profile(struct device *dev, |
134 | struct device_attribute *attr, | 306 | struct device_attribute *attr, |
135 | const char *buf, | 307 | const char *buf, |
136 | size_t count) | 308 | size_t count) |
137 | { | 309 | { |
138 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 310 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
139 | struct radeon_device *rdev = ddev->dev_private; | 311 | struct radeon_device *rdev = ddev->dev_private; |
140 | int ps, cm; | ||
141 | |||
142 | if (sscanf(buf, "%u.%u", &ps, &cm) != 2) { | ||
143 | DRM_ERROR("Invalid power state!\n"); | ||
144 | return count; | ||
145 | } | ||
146 | 312 | ||
147 | mutex_lock(&rdev->pm.mutex); | 313 | mutex_lock(&rdev->pm.mutex); |
148 | if ((ps >= 0) && (ps < rdev->pm.num_power_states) && | 314 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
149 | (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) { | 315 | if (strncmp("default", buf, strlen("default")) == 0) |
150 | if ((rdev->pm.active_crtc_count > 0) && | 316 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
151 | (rdev->pm.power_state[ps].clock_info[cm].flags & RADEON_PM_MODE_NO_DISPLAY)) { | 317 | else if (strncmp("auto", buf, strlen("auto")) == 0) |
152 | DRM_ERROR("Invalid power state for display: %d.%d\n", ps, cm); | 318 | rdev->pm.profile = PM_PROFILE_AUTO; |
153 | } else if ((rdev->pm.active_crtc_count > 1) && | 319 | else if (strncmp("low", buf, strlen("low")) == 0) |
154 | (rdev->pm.power_state[ps].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)) { | 320 | rdev->pm.profile = PM_PROFILE_LOW; |
155 | DRM_ERROR("Invalid power state for multi-head: %d.%d\n", ps, cm); | 321 | else if (strncmp("high", buf, strlen("high")) == 0) |
156 | } else { | 322 | rdev->pm.profile = PM_PROFILE_HIGH; |
157 | /* disable dynpm */ | 323 | else { |
158 | rdev->pm.state = PM_STATE_DISABLED; | 324 | DRM_ERROR("invalid power profile!\n"); |
159 | rdev->pm.planned_action = PM_ACTION_NONE; | 325 | goto fail; |
160 | rdev->pm.requested_power_state_index = ps; | ||
161 | rdev->pm.requested_clock_mode_index = cm; | ||
162 | radeon_pm_set_clocks(rdev, true); | ||
163 | } | 326 | } |
164 | } else | 327 | radeon_pm_update_profile(rdev); |
165 | DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); | 328 | radeon_pm_set_clocks(rdev); |
329 | } | ||
330 | fail: | ||
166 | mutex_unlock(&rdev->pm.mutex); | 331 | mutex_unlock(&rdev->pm.mutex); |
167 | 332 | ||
168 | return count; | 333 | return count; |
169 | } | 334 | } |
170 | 335 | ||
171 | static ssize_t radeon_get_dynpm(struct device *dev, | 336 | static ssize_t radeon_get_pm_method(struct device *dev, |
172 | struct device_attribute *attr, | 337 | struct device_attribute *attr, |
173 | char *buf) | 338 | char *buf) |
174 | { | 339 | { |
175 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 340 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
176 | struct radeon_device *rdev = ddev->dev_private; | 341 | struct radeon_device *rdev = ddev->dev_private; |
342 | int pm = rdev->pm.pm_method; | ||
177 | 343 | ||
178 | return snprintf(buf, PAGE_SIZE, "%s\n", | 344 | return snprintf(buf, PAGE_SIZE, "%s\n", |
179 | (rdev->pm.state == PM_STATE_DISABLED) ? "disabled" : "enabled"); | 345 | (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile"); |
180 | } | 346 | } |
181 | 347 | ||
182 | static ssize_t radeon_set_dynpm(struct device *dev, | 348 | static ssize_t radeon_set_pm_method(struct device *dev, |
183 | struct device_attribute *attr, | 349 | struct device_attribute *attr, |
184 | const char *buf, | 350 | const char *buf, |
185 | size_t count) | 351 | size_t count) |
186 | { | 352 | { |
187 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); | 353 | struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); |
188 | struct radeon_device *rdev = ddev->dev_private; | 354 | struct radeon_device *rdev = ddev->dev_private; |
189 | int tmp = simple_strtoul(buf, NULL, 10); | ||
190 | 355 | ||
191 | if (tmp == 0) { | 356 | |
192 | /* update power mode info */ | 357 | if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { |
193 | radeon_pm_compute_clocks(rdev); | ||
194 | /* disable dynpm */ | ||
195 | mutex_lock(&rdev->pm.mutex); | 358 | mutex_lock(&rdev->pm.mutex); |
196 | rdev->pm.state = PM_STATE_DISABLED; | 359 | rdev->pm.pm_method = PM_METHOD_DYNPM; |
197 | rdev->pm.planned_action = PM_ACTION_NONE; | 360 | rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; |
361 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | ||
198 | mutex_unlock(&rdev->pm.mutex); | 362 | mutex_unlock(&rdev->pm.mutex); |
199 | DRM_INFO("radeon: dynamic power management disabled\n"); | 363 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
200 | } else if (tmp == 1) { | 364 | mutex_lock(&rdev->pm.mutex); |
201 | if (rdev->pm.num_power_states > 1) { | 365 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
202 | /* enable dynpm */ | 366 | /* disable dynpm */ |
203 | mutex_lock(&rdev->pm.mutex); | 367 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
204 | rdev->pm.state = PM_STATE_PAUSED; | 368 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
205 | rdev->pm.planned_action = PM_ACTION_DEFAULT; | 369 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
206 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 370 | mutex_unlock(&rdev->pm.mutex); |
207 | mutex_unlock(&rdev->pm.mutex); | 371 | } else { |
208 | /* update power mode info */ | 372 | DRM_ERROR("invalid power method!\n"); |
209 | radeon_pm_compute_clocks(rdev); | 373 | goto fail; |
210 | DRM_INFO("radeon: dynamic power management enabled\n"); | 374 | } |
211 | } else | 375 | radeon_pm_compute_clocks(rdev); |
212 | DRM_ERROR("dynpm not valid on this system\n"); | 376 | fail: |
213 | } else | ||
214 | DRM_ERROR("Invalid setting: %d\n", tmp); | ||
215 | |||
216 | return count; | 377 | return count; |
217 | } | 378 | } |
218 | 379 | ||
219 | static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, radeon_get_power_state_static, radeon_set_power_state_static); | 380 | static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); |
220 | static DEVICE_ATTR(dynpm, S_IRUGO | S_IWUSR, radeon_get_dynpm, radeon_set_dynpm); | 381 | static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); |
221 | |||
222 | 382 | ||
223 | static const char *pm_state_names[4] = { | 383 | void radeon_pm_suspend(struct radeon_device *rdev) |
224 | "PM_STATE_DISABLED", | ||
225 | "PM_STATE_MINIMUM", | ||
226 | "PM_STATE_PAUSED", | ||
227 | "PM_STATE_ACTIVE" | ||
228 | }; | ||
229 | |||
230 | static const char *pm_state_types[5] = { | ||
231 | "", | ||
232 | "Powersave", | ||
233 | "Battery", | ||
234 | "Balanced", | ||
235 | "Performance", | ||
236 | }; | ||
237 | |||
238 | static void radeon_print_power_mode_info(struct radeon_device *rdev) | ||
239 | { | 384 | { |
240 | int i, j; | 385 | mutex_lock(&rdev->pm.mutex); |
241 | bool is_default; | 386 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
242 | 387 | rdev->pm.current_power_state_index = -1; | |
243 | DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); | 388 | rdev->pm.current_clock_mode_index = -1; |
244 | for (i = 0; i < rdev->pm.num_power_states; i++) { | 389 | rdev->pm.current_sclk = 0; |
245 | if (rdev->pm.default_power_state_index == i) | 390 | rdev->pm.current_mclk = 0; |
246 | is_default = true; | 391 | mutex_unlock(&rdev->pm.mutex); |
247 | else | ||
248 | is_default = false; | ||
249 | DRM_INFO("State %d %s %s\n", i, | ||
250 | pm_state_types[rdev->pm.power_state[i].type], | ||
251 | is_default ? "(default)" : ""); | ||
252 | if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) | ||
253 | DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].pcie_lanes); | ||
254 | if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) | ||
255 | DRM_INFO("\tSingle display only\n"); | ||
256 | DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); | ||
257 | for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { | ||
258 | if (rdev->flags & RADEON_IS_IGP) | ||
259 | DRM_INFO("\t\t%d engine: %d\n", | ||
260 | j, | ||
261 | rdev->pm.power_state[i].clock_info[j].sclk * 10); | ||
262 | else | ||
263 | DRM_INFO("\t\t%d engine/memory: %d/%d\n", | ||
264 | j, | ||
265 | rdev->pm.power_state[i].clock_info[j].sclk * 10, | ||
266 | rdev->pm.power_state[i].clock_info[j].mclk * 10); | ||
267 | if (rdev->pm.power_state[i].clock_info[j].flags & RADEON_PM_MODE_NO_DISPLAY) | ||
268 | DRM_INFO("\t\tNo display only\n"); | ||
269 | } | ||
270 | } | ||
271 | } | 392 | } |
272 | 393 | ||
273 | void radeon_sync_with_vblank(struct radeon_device *rdev) | 394 | void radeon_pm_resume(struct radeon_device *rdev) |
274 | { | 395 | { |
275 | if (rdev->pm.active_crtcs) { | 396 | radeon_pm_compute_clocks(rdev); |
276 | rdev->pm.vblank_sync = false; | ||
277 | wait_event_timeout( | ||
278 | rdev->irq.vblank_queue, rdev->pm.vblank_sync, | ||
279 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); | ||
280 | } | ||
281 | } | 397 | } |
282 | 398 | ||
283 | int radeon_pm_init(struct radeon_device *rdev) | 399 | int radeon_pm_init(struct radeon_device *rdev) |
284 | { | 400 | { |
285 | rdev->pm.state = PM_STATE_DISABLED; | 401 | /* default to profile method */ |
286 | rdev->pm.planned_action = PM_ACTION_NONE; | 402 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
287 | rdev->pm.can_upclock = true; | 403 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
288 | rdev->pm.can_downclock = true; | 404 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
405 | rdev->pm.dynpm_can_upclock = true; | ||
406 | rdev->pm.dynpm_can_downclock = true; | ||
407 | rdev->pm.current_sclk = 0; | ||
408 | rdev->pm.current_mclk = 0; | ||
289 | 409 | ||
290 | if (rdev->bios) { | 410 | if (rdev->bios) { |
291 | if (rdev->is_atom_bios) | 411 | if (rdev->is_atom_bios) |
292 | radeon_atombios_get_power_modes(rdev); | 412 | radeon_atombios_get_power_modes(rdev); |
293 | else | 413 | else |
294 | radeon_combios_get_power_modes(rdev); | 414 | radeon_combios_get_power_modes(rdev); |
295 | radeon_print_power_mode_info(rdev); | 415 | radeon_pm_init_profile(rdev); |
416 | rdev->pm.current_power_state_index = -1; | ||
417 | rdev->pm.current_clock_mode_index = -1; | ||
296 | } | 418 | } |
297 | 419 | ||
298 | if (radeon_debugfs_pm_init(rdev)) { | 420 | if (rdev->pm.num_power_states > 1) { |
299 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 421 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
300 | } | 422 | mutex_lock(&rdev->pm.mutex); |
423 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
424 | radeon_pm_update_profile(rdev); | ||
425 | radeon_pm_set_clocks(rdev); | ||
426 | mutex_unlock(&rdev->pm.mutex); | ||
427 | } | ||
301 | 428 | ||
302 | /* where's the best place to put this? */ | 429 | /* where's the best place to put these? */ |
303 | device_create_file(rdev->dev, &dev_attr_power_state); | 430 | device_create_file(rdev->dev, &dev_attr_power_profile); |
304 | device_create_file(rdev->dev, &dev_attr_dynpm); | 431 | device_create_file(rdev->dev, &dev_attr_power_method); |
305 | 432 | ||
306 | INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); | 433 | #ifdef CONFIG_ACPI |
434 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | ||
435 | register_acpi_notifier(&rdev->acpi_nb); | ||
436 | #endif | ||
437 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
307 | 438 | ||
308 | if ((radeon_dynpm != -1 && radeon_dynpm) && (rdev->pm.num_power_states > 1)) { | 439 | if (radeon_debugfs_pm_init(rdev)) { |
309 | rdev->pm.state = PM_STATE_PAUSED; | 440 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
310 | DRM_INFO("radeon: dynamic power management enabled\n"); | 441 | } |
311 | } | ||
312 | 442 | ||
313 | DRM_INFO("radeon: power management initialized\n"); | 443 | DRM_INFO("radeon: power management initialized\n"); |
444 | } | ||
314 | 445 | ||
315 | return 0; | 446 | return 0; |
316 | } | 447 | } |
317 | 448 | ||
318 | void radeon_pm_fini(struct radeon_device *rdev) | 449 | void radeon_pm_fini(struct radeon_device *rdev) |
319 | { | 450 | { |
320 | if (rdev->pm.state != PM_STATE_DISABLED) { | 451 | if (rdev->pm.num_power_states > 1) { |
321 | /* cancel work */ | ||
322 | cancel_delayed_work_sync(&rdev->pm.idle_work); | ||
323 | /* reset default clocks */ | ||
324 | rdev->pm.state = PM_STATE_DISABLED; | ||
325 | rdev->pm.planned_action = PM_ACTION_DEFAULT; | ||
326 | radeon_pm_set_clocks(rdev, true); | ||
327 | } else if ((rdev->pm.current_power_state_index != | ||
328 | rdev->pm.default_power_state_index) || | ||
329 | (rdev->pm.current_clock_mode_index != 0)) { | ||
330 | rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; | ||
331 | rdev->pm.requested_clock_mode_index = 0; | ||
332 | mutex_lock(&rdev->pm.mutex); | 452 | mutex_lock(&rdev->pm.mutex); |
333 | radeon_pm_set_clocks(rdev, true); | 453 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
454 | rdev->pm.profile = PM_PROFILE_DEFAULT; | ||
455 | radeon_pm_update_profile(rdev); | ||
456 | radeon_pm_set_clocks(rdev); | ||
457 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
458 | /* cancel work */ | ||
459 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); | ||
460 | /* reset default clocks */ | ||
461 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | ||
462 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | ||
463 | radeon_pm_set_clocks(rdev); | ||
464 | } | ||
334 | mutex_unlock(&rdev->pm.mutex); | 465 | mutex_unlock(&rdev->pm.mutex); |
335 | } | ||
336 | 466 | ||
337 | device_remove_file(rdev->dev, &dev_attr_power_state); | 467 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
338 | device_remove_file(rdev->dev, &dev_attr_dynpm); | 468 | device_remove_file(rdev->dev, &dev_attr_power_method); |
469 | #ifdef CONFIG_ACPI | ||
470 | unregister_acpi_notifier(&rdev->acpi_nb); | ||
471 | #endif | ||
472 | } | ||
339 | 473 | ||
340 | if (rdev->pm.i2c_bus) | 474 | if (rdev->pm.i2c_bus) |
341 | radeon_i2c_destroy(rdev->pm.i2c_bus); | 475 | radeon_i2c_destroy(rdev->pm.i2c_bus); |
@@ -347,6 +481,9 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
347 | struct drm_crtc *crtc; | 481 | struct drm_crtc *crtc; |
348 | struct radeon_crtc *radeon_crtc; | 482 | struct radeon_crtc *radeon_crtc; |
349 | 483 | ||
484 | if (rdev->pm.num_power_states < 2) | ||
485 | return; | ||
486 | |||
350 | mutex_lock(&rdev->pm.mutex); | 487 | mutex_lock(&rdev->pm.mutex); |
351 | 488 | ||
352 | rdev->pm.active_crtcs = 0; | 489 | rdev->pm.active_crtcs = 0; |
@@ -360,55 +497,56 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
360 | } | 497 | } |
361 | } | 498 | } |
362 | 499 | ||
363 | if (rdev->pm.state == PM_STATE_DISABLED) { | 500 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
364 | mutex_unlock(&rdev->pm.mutex); | 501 | radeon_pm_update_profile(rdev); |
365 | return; | 502 | radeon_pm_set_clocks(rdev); |
366 | } | 503 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
367 | 504 | if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { | |
368 | /* Note, radeon_pm_set_clocks is called with static_switch set | 505 | if (rdev->pm.active_crtc_count > 1) { |
369 | * to true since we always want to statically set the clocks, | 506 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { |
370 | * not wait for vbl. | 507 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
371 | */ | 508 | |
372 | if (rdev->pm.active_crtc_count > 1) { | 509 | rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; |
373 | if (rdev->pm.state == PM_STATE_ACTIVE) { | 510 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
374 | cancel_delayed_work(&rdev->pm.idle_work); | 511 | radeon_pm_get_dynpm_state(rdev); |
375 | 512 | radeon_pm_set_clocks(rdev); | |
376 | rdev->pm.state = PM_STATE_PAUSED; | 513 | |
377 | rdev->pm.planned_action = PM_ACTION_DEFAULT; | 514 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); |
378 | radeon_pm_set_clocks(rdev, true); | 515 | } |
379 | 516 | } else if (rdev->pm.active_crtc_count == 1) { | |
380 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); | 517 | /* TODO: Increase clocks if needed for current mode */ |
381 | } | 518 | |
382 | } else if (rdev->pm.active_crtc_count == 1) { | 519 | if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { |
383 | /* TODO: Increase clocks if needed for current mode */ | 520 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
384 | 521 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; | |
385 | if (rdev->pm.state == PM_STATE_MINIMUM) { | 522 | radeon_pm_get_dynpm_state(rdev); |
386 | rdev->pm.state = PM_STATE_ACTIVE; | 523 | radeon_pm_set_clocks(rdev); |
387 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 524 | |
388 | radeon_pm_set_clocks(rdev, true); | 525 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
389 | 526 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | |
390 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 527 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { |
391 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 528 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
392 | } else if (rdev->pm.state == PM_STATE_PAUSED) { | 529 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
393 | rdev->pm.state = PM_STATE_ACTIVE; | 530 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
394 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 531 | DRM_DEBUG("radeon: dynamic power management activated\n"); |
395 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 532 | } |
396 | DRM_DEBUG("radeon: dynamic power management activated\n"); | 533 | } else { /* count == 0 */ |
397 | } | 534 | if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { |
398 | } else { /* count == 0 */ | 535 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); |
399 | if (rdev->pm.state != PM_STATE_MINIMUM) { | 536 | |
400 | cancel_delayed_work(&rdev->pm.idle_work); | 537 | rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; |
401 | 538 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; | |
402 | rdev->pm.state = PM_STATE_MINIMUM; | 539 | radeon_pm_get_dynpm_state(rdev); |
403 | rdev->pm.planned_action = PM_ACTION_MINIMUM; | 540 | radeon_pm_set_clocks(rdev); |
404 | radeon_pm_set_clocks(rdev, true); | 541 | } |
542 | } | ||
405 | } | 543 | } |
406 | } | 544 | } |
407 | 545 | ||
408 | mutex_unlock(&rdev->pm.mutex); | 546 | mutex_unlock(&rdev->pm.mutex); |
409 | } | 547 | } |
410 | 548 | ||
411 | bool radeon_pm_in_vbl(struct radeon_device *rdev) | 549 | static bool radeon_pm_in_vbl(struct radeon_device *rdev) |
412 | { | 550 | { |
413 | u32 stat_crtc = 0, vbl = 0, position = 0; | 551 | u32 stat_crtc = 0, vbl = 0, position = 0; |
414 | bool in_vbl = true; | 552 | bool in_vbl = true; |
@@ -480,7 +618,7 @@ bool radeon_pm_in_vbl(struct radeon_device *rdev) | |||
480 | return in_vbl; | 618 | return in_vbl; |
481 | } | 619 | } |
482 | 620 | ||
483 | bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) | 621 | static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) |
484 | { | 622 | { |
485 | u32 stat_crtc = 0; | 623 | u32 stat_crtc = 0; |
486 | bool in_vbl = radeon_pm_in_vbl(rdev); | 624 | bool in_vbl = radeon_pm_in_vbl(rdev); |
@@ -491,16 +629,16 @@ bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) | |||
491 | return in_vbl; | 629 | return in_vbl; |
492 | } | 630 | } |
493 | 631 | ||
494 | static void radeon_pm_idle_work_handler(struct work_struct *work) | 632 | static void radeon_dynpm_idle_work_handler(struct work_struct *work) |
495 | { | 633 | { |
496 | struct radeon_device *rdev; | 634 | struct radeon_device *rdev; |
497 | int resched; | 635 | int resched; |
498 | rdev = container_of(work, struct radeon_device, | 636 | rdev = container_of(work, struct radeon_device, |
499 | pm.idle_work.work); | 637 | pm.dynpm_idle_work.work); |
500 | 638 | ||
501 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 639 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
502 | mutex_lock(&rdev->pm.mutex); | 640 | mutex_lock(&rdev->pm.mutex); |
503 | if (rdev->pm.state == PM_STATE_ACTIVE) { | 641 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { |
504 | unsigned long irq_flags; | 642 | unsigned long irq_flags; |
505 | int not_processed = 0; | 643 | int not_processed = 0; |
506 | 644 | ||
@@ -516,23 +654,23 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
516 | read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 654 | read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
517 | 655 | ||
518 | if (not_processed >= 3) { /* should upclock */ | 656 | if (not_processed >= 3) { /* should upclock */ |
519 | if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { | 657 | if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { |
520 | rdev->pm.planned_action = PM_ACTION_NONE; | 658 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
521 | } else if (rdev->pm.planned_action == PM_ACTION_NONE && | 659 | } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && |
522 | rdev->pm.can_upclock) { | 660 | rdev->pm.dynpm_can_upclock) { |
523 | rdev->pm.planned_action = | 661 | rdev->pm.dynpm_planned_action = |
524 | PM_ACTION_UPCLOCK; | 662 | DYNPM_ACTION_UPCLOCK; |
525 | rdev->pm.action_timeout = jiffies + | 663 | rdev->pm.dynpm_action_timeout = jiffies + |
526 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); | 664 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); |
527 | } | 665 | } |
528 | } else if (not_processed == 0) { /* should downclock */ | 666 | } else if (not_processed == 0) { /* should downclock */ |
529 | if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { | 667 | if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { |
530 | rdev->pm.planned_action = PM_ACTION_NONE; | 668 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
531 | } else if (rdev->pm.planned_action == PM_ACTION_NONE && | 669 | } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && |
532 | rdev->pm.can_downclock) { | 670 | rdev->pm.dynpm_can_downclock) { |
533 | rdev->pm.planned_action = | 671 | rdev->pm.dynpm_planned_action = |
534 | PM_ACTION_DOWNCLOCK; | 672 | DYNPM_ACTION_DOWNCLOCK; |
535 | rdev->pm.action_timeout = jiffies + | 673 | rdev->pm.dynpm_action_timeout = jiffies + |
536 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); | 674 | msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); |
537 | } | 675 | } |
538 | } | 676 | } |
@@ -540,15 +678,16 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
540 | /* Note, radeon_pm_set_clocks is called with static_switch set | 678 | /* Note, radeon_pm_set_clocks is called with static_switch set |
541 | * to false since we want to wait for vbl to avoid flicker. | 679 | * to false since we want to wait for vbl to avoid flicker. |
542 | */ | 680 | */ |
543 | if (rdev->pm.planned_action != PM_ACTION_NONE && | 681 | if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && |
544 | jiffies > rdev->pm.action_timeout) { | 682 | jiffies > rdev->pm.dynpm_action_timeout) { |
545 | radeon_pm_set_clocks(rdev, false); | 683 | radeon_pm_get_dynpm_state(rdev); |
684 | radeon_pm_set_clocks(rdev); | ||
546 | } | 685 | } |
547 | } | 686 | } |
548 | mutex_unlock(&rdev->pm.mutex); | 687 | mutex_unlock(&rdev->pm.mutex); |
549 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 688 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
550 | 689 | ||
551 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 690 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, |
552 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 691 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
553 | } | 692 | } |
554 | 693 | ||
@@ -563,7 +702,6 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
563 | struct drm_device *dev = node->minor->dev; | 702 | struct drm_device *dev = node->minor->dev; |
564 | struct radeon_device *rdev = dev->dev_private; | 703 | struct radeon_device *rdev = dev->dev_private; |
565 | 704 | ||
566 | seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); | ||
567 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); | 705 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); |
568 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); | 706 | seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
569 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); | 707 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); |