aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_pm.c
diff options
context:
space:
mode:
authorMatthew Garrett <mjg@redhat.com>2010-04-26 15:57:01 -0400
committerDave Airlie <airlied@redhat.com>2010-05-18 04:21:19 -0400
commit8f5b5e632cd55d9acf10ba498b858fd996bd1a39 (patch)
tree576ccaf0fad16bb5ff9032f17214ca60c1374a69 /drivers/gpu/drm/radeon/radeon_pm.c
parent5876dd249e8e47c730cac090bf6edd88e5f04327 (diff)
radeon: Take drm struct_mutex over reclocking
We need to block the drm core from doing anything that may touch our vram during reclock, so take the drm mutex for the duration. Signed-off-by: Matthew Garrett <mjg@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_pm.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index da35bd7f38d..c9390ea56f9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -131,6 +131,7 @@ static ssize_t radeon_set_power_state_static(struct device *dev,
131 return count; 131 return count;
132 } 132 }
133 133
134 mutex_lock(&rdev->ddev->struct_mutex);
134 mutex_lock(&rdev->pm.mutex); 135 mutex_lock(&rdev->pm.mutex);
135 if ((ps >= 0) && (ps < rdev->pm.num_power_states) && 136 if ((ps >= 0) && (ps < rdev->pm.num_power_states) &&
136 (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) { 137 (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) {
@@ -148,6 +149,7 @@ static ssize_t radeon_set_power_state_static(struct device *dev,
148 } else 149 } else
149 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); 150 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm);
150 mutex_unlock(&rdev->pm.mutex); 151 mutex_unlock(&rdev->pm.mutex);
152 mutex_unlock(&rdev->ddev->struct_mutex);
151 153
152 return count; 154 return count;
153} 155}
@@ -184,11 +186,13 @@ static ssize_t radeon_set_dynpm(struct device *dev,
184 } else if (tmp == 1) { 186 } else if (tmp == 1) {
185 if (rdev->pm.num_power_states > 1) { 187 if (rdev->pm.num_power_states > 1) {
186 /* enable dynpm */ 188 /* enable dynpm */
189 mutex_lock(&rdev->ddev->struct_mutex);
187 mutex_lock(&rdev->pm.mutex); 190 mutex_lock(&rdev->pm.mutex);
188 rdev->pm.state = PM_STATE_PAUSED; 191 rdev->pm.state = PM_STATE_PAUSED;
189 rdev->pm.planned_action = PM_ACTION_DEFAULT; 192 rdev->pm.planned_action = PM_ACTION_DEFAULT;
190 radeon_get_power_state(rdev, rdev->pm.planned_action); 193 radeon_get_power_state(rdev, rdev->pm.planned_action);
191 mutex_unlock(&rdev->pm.mutex); 194 mutex_unlock(&rdev->pm.mutex);
195 mutex_unlock(&rdev->ddev->struct_mutex);
192 /* update power mode info */ 196 /* update power mode info */
193 radeon_pm_compute_clocks(rdev); 197 radeon_pm_compute_clocks(rdev);
194 DRM_INFO("radeon: dynamic power management enabled\n"); 198 DRM_INFO("radeon: dynamic power management enabled\n");
@@ -311,9 +315,11 @@ void radeon_pm_fini(struct radeon_device *rdev)
311 (rdev->pm.current_clock_mode_index != 0)) { 315 (rdev->pm.current_clock_mode_index != 0)) {
312 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 316 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
313 rdev->pm.requested_clock_mode_index = 0; 317 rdev->pm.requested_clock_mode_index = 0;
318 mutex_lock(&rdev->ddev->struct_mutex);
314 mutex_lock(&rdev->pm.mutex); 319 mutex_lock(&rdev->pm.mutex);
315 radeon_pm_set_clocks(rdev, true); 320 radeon_pm_set_clocks(rdev, true);
316 mutex_unlock(&rdev->pm.mutex); 321 mutex_unlock(&rdev->pm.mutex);
322 mutex_unlock(&rdev->ddev->struct_mutex);
317 } 323 }
318 324
319 device_remove_file(rdev->dev, &dev_attr_power_state); 325 device_remove_file(rdev->dev, &dev_attr_power_state);
@@ -332,6 +338,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
332 if (rdev->pm.state == PM_STATE_DISABLED) 338 if (rdev->pm.state == PM_STATE_DISABLED)
333 return; 339 return;
334 340
341 mutex_lock(&rdev->ddev->struct_mutex);
335 mutex_lock(&rdev->pm.mutex); 342 mutex_lock(&rdev->pm.mutex);
336 343
337 rdev->pm.active_crtcs = 0; 344 rdev->pm.active_crtcs = 0;
@@ -382,6 +389,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
382 } 389 }
383 390
384 mutex_unlock(&rdev->pm.mutex); 391 mutex_unlock(&rdev->pm.mutex);
392 mutex_unlock(&rdev->ddev->struct_mutex);
385} 393}
386 394
387bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 395bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
@@ -455,6 +463,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
455 rdev = container_of(work, struct radeon_device, 463 rdev = container_of(work, struct radeon_device,
456 pm.idle_work.work); 464 pm.idle_work.work);
457 465
466 mutex_lock(&rdev->ddev->struct_mutex);
458 mutex_lock(&rdev->pm.mutex); 467 mutex_lock(&rdev->pm.mutex);
459 if (rdev->pm.state == PM_STATE_ACTIVE) { 468 if (rdev->pm.state == PM_STATE_ACTIVE) {
460 unsigned long irq_flags; 469 unsigned long irq_flags;
@@ -499,6 +508,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
499 } 508 }
500 } 509 }
501 mutex_unlock(&rdev->pm.mutex); 510 mutex_unlock(&rdev->pm.mutex);
511 mutex_unlock(&rdev->ddev->struct_mutex);
502 512
503 queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 513 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
504 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 514 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));