aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorMatthew Garrett <mjg@redhat.com>2010-04-27 17:16:58 -0400
committerDave Airlie <airlied@redhat.com>2010-05-18 04:21:27 -0400
commit612e06ce9c78840c3a1a207dfbe489a059d87c28 (patch)
tree0e208e70737362a77173826962240bef975133e0 /drivers/gpu
parentc37d230af450472183e70947f8e2aa8101a96603 (diff)
radeon: Fix locking in power management paths
The ttm code could take vram_mutex followed by cp_mutex, while the reclocking code would do the reverse. Hilarity could ensue. Signed-off-by: Matthew Garrett <mjg@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 7cc54c804cb0..134b19537d11 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -61,6 +61,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
61 if (!static_switch) 61 if (!static_switch)
62 radeon_get_power_state(rdev, rdev->pm.planned_action); 62 radeon_get_power_state(rdev, rdev->pm.planned_action);
63 63
64 mutex_lock(&rdev->ddev->struct_mutex);
65 mutex_lock(&rdev->vram_mutex);
64 mutex_lock(&rdev->cp.mutex); 66 mutex_lock(&rdev->cp.mutex);
65 67
66 /* wait for GPU idle */ 68 /* wait for GPU idle */
@@ -73,8 +75,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
73 rdev->irq.gui_idle = false; 75 rdev->irq.gui_idle = false;
74 radeon_irq_set(rdev); 76 radeon_irq_set(rdev);
75 77
76 mutex_lock(&rdev->vram_mutex);
77
78 radeon_unmap_vram_bos(rdev); 78 radeon_unmap_vram_bos(rdev);
79 79
80 if (!static_switch) { 80 if (!static_switch) {
@@ -97,8 +97,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
97 } 97 }
98 } 98 }
99 99
100 mutex_unlock(&rdev->vram_mutex);
101
102 /* update display watermarks based on new power state */ 100 /* update display watermarks based on new power state */
103 radeon_update_bandwidth_info(rdev); 101 radeon_update_bandwidth_info(rdev);
104 if (rdev->pm.active_crtc_count) 102 if (rdev->pm.active_crtc_count)
@@ -107,6 +105,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
107 rdev->pm.planned_action = PM_ACTION_NONE; 105 rdev->pm.planned_action = PM_ACTION_NONE;
108 106
109 mutex_unlock(&rdev->cp.mutex); 107 mutex_unlock(&rdev->cp.mutex);
108 mutex_unlock(&rdev->vram_mutex);
109 mutex_unlock(&rdev->ddev->struct_mutex);
110} 110}
111 111
112static ssize_t radeon_get_power_state_static(struct device *dev, 112static ssize_t radeon_get_power_state_static(struct device *dev,
@@ -134,7 +134,6 @@ static ssize_t radeon_set_power_state_static(struct device *dev,
134 return count; 134 return count;
135 } 135 }
136 136
137 mutex_lock(&rdev->ddev->struct_mutex);
138 mutex_lock(&rdev->pm.mutex); 137 mutex_lock(&rdev->pm.mutex);
139 if ((ps >= 0) && (ps < rdev->pm.num_power_states) && 138 if ((ps >= 0) && (ps < rdev->pm.num_power_states) &&
140 (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) { 139 (cm >= 0) && (cm < rdev->pm.power_state[ps].num_clock_modes)) {
@@ -152,7 +151,6 @@ static ssize_t radeon_set_power_state_static(struct device *dev,
152 } else 151 } else
153 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm); 152 DRM_ERROR("Invalid power state: %d.%d\n\n", ps, cm);
154 mutex_unlock(&rdev->pm.mutex); 153 mutex_unlock(&rdev->pm.mutex);
155 mutex_unlock(&rdev->ddev->struct_mutex);
156 154
157 return count; 155 return count;
158} 156}
@@ -189,13 +187,11 @@ static ssize_t radeon_set_dynpm(struct device *dev,
189 } else if (tmp == 1) { 187 } else if (tmp == 1) {
190 if (rdev->pm.num_power_states > 1) { 188 if (rdev->pm.num_power_states > 1) {
191 /* enable dynpm */ 189 /* enable dynpm */
192 mutex_lock(&rdev->ddev->struct_mutex);
193 mutex_lock(&rdev->pm.mutex); 190 mutex_lock(&rdev->pm.mutex);
194 rdev->pm.state = PM_STATE_PAUSED; 191 rdev->pm.state = PM_STATE_PAUSED;
195 rdev->pm.planned_action = PM_ACTION_DEFAULT; 192 rdev->pm.planned_action = PM_ACTION_DEFAULT;
196 radeon_get_power_state(rdev, rdev->pm.planned_action); 193 radeon_get_power_state(rdev, rdev->pm.planned_action);
197 mutex_unlock(&rdev->pm.mutex); 194 mutex_unlock(&rdev->pm.mutex);
198 mutex_unlock(&rdev->ddev->struct_mutex);
199 /* update power mode info */ 195 /* update power mode info */
200 radeon_pm_compute_clocks(rdev); 196 radeon_pm_compute_clocks(rdev);
201 DRM_INFO("radeon: dynamic power management enabled\n"); 197 DRM_INFO("radeon: dynamic power management enabled\n");
@@ -318,11 +314,9 @@ void radeon_pm_fini(struct radeon_device *rdev)
318 (rdev->pm.current_clock_mode_index != 0)) { 314 (rdev->pm.current_clock_mode_index != 0)) {
319 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index; 315 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
320 rdev->pm.requested_clock_mode_index = 0; 316 rdev->pm.requested_clock_mode_index = 0;
321 mutex_lock(&rdev->ddev->struct_mutex);
322 mutex_lock(&rdev->pm.mutex); 317 mutex_lock(&rdev->pm.mutex);
323 radeon_pm_set_clocks(rdev, true); 318 radeon_pm_set_clocks(rdev, true);
324 mutex_unlock(&rdev->pm.mutex); 319 mutex_unlock(&rdev->pm.mutex);
325 mutex_unlock(&rdev->ddev->struct_mutex);
326 } 320 }
327 321
328 device_remove_file(rdev->dev, &dev_attr_power_state); 322 device_remove_file(rdev->dev, &dev_attr_power_state);
@@ -341,7 +335,6 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
341 if (rdev->pm.state == PM_STATE_DISABLED) 335 if (rdev->pm.state == PM_STATE_DISABLED)
342 return; 336 return;
343 337
344 mutex_lock(&rdev->ddev->struct_mutex);
345 mutex_lock(&rdev->pm.mutex); 338 mutex_lock(&rdev->pm.mutex);
346 339
347 rdev->pm.active_crtcs = 0; 340 rdev->pm.active_crtcs = 0;
@@ -392,7 +385,6 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
392 } 385 }
393 386
394 mutex_unlock(&rdev->pm.mutex); 387 mutex_unlock(&rdev->pm.mutex);
395 mutex_unlock(&rdev->ddev->struct_mutex);
396} 388}
397 389
398bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 390bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
@@ -468,7 +460,6 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
468 pm.idle_work.work); 460 pm.idle_work.work);
469 461
470 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 462 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
471 mutex_lock(&rdev->ddev->struct_mutex);
472 mutex_lock(&rdev->pm.mutex); 463 mutex_lock(&rdev->pm.mutex);
473 if (rdev->pm.state == PM_STATE_ACTIVE) { 464 if (rdev->pm.state == PM_STATE_ACTIVE) {
474 unsigned long irq_flags; 465 unsigned long irq_flags;
@@ -513,7 +504,6 @@ static void radeon_pm_idle_work_handler(struct work_struct *work)
513 } 504 }
514 } 505 }
515 mutex_unlock(&rdev->pm.mutex); 506 mutex_unlock(&rdev->pm.mutex);
516 mutex_unlock(&rdev->ddev->struct_mutex);
517 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 507 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
518 508
519 queue_delayed_work(rdev->wq, &rdev->pm.idle_work, 509 queue_delayed_work(rdev->wq, &rdev->pm.idle_work,