diff options
author | Rafał Miłecki <zajec5@gmail.com> | 2010-01-07 18:22:47 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-02-08 18:32:33 -0500 |
commit | 73a6d3fc104827db574e4bd206a025299fef0bb1 (patch) | |
tree | c5f3b9f63bf1bf10b307dcedaa77024237a267b0 /drivers/gpu/drm | |
parent | 20d6c346f69ec68f3f4956c726d830c978f911a8 (diff) |
drm/radeon/kms: use wait queue (events) for VBLANK sync
This already simplifies code significally and makes it maintaible
in case of adding memory reclocking plus voltage changing in future.
Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 93 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rs600.c | 6 |
6 files changed, 41 insertions, 74 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 05502bf042b9..346ae3d7e0d4 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -312,13 +312,11 @@ int r100_irq_process(struct radeon_device *rdev) | |||
312 | /* Vertical blank interrupts */ | 312 | /* Vertical blank interrupts */ |
313 | if (status & RADEON_CRTC_VBLANK_STAT) { | 313 | if (status & RADEON_CRTC_VBLANK_STAT) { |
314 | drm_handle_vblank(rdev->ddev, 0); | 314 | drm_handle_vblank(rdev->ddev, 0); |
315 | if (rdev->pm.vblank_callback) | 315 | wake_up(&rdev->irq.vblank_queue); |
316 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
317 | } | 316 | } |
318 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 317 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
319 | drm_handle_vblank(rdev->ddev, 1); | 318 | drm_handle_vblank(rdev->ddev, 1); |
320 | if (rdev->pm.vblank_callback) | 319 | wake_up(&rdev->irq.vblank_queue); |
321 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
322 | } | 320 | } |
323 | if (status & RADEON_FP_DETECT_STAT) { | 321 | if (status & RADEON_FP_DETECT_STAT) { |
324 | queue_hotplug = true; | 322 | queue_hotplug = true; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 65daf55af2d9..05769fa77a21 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2744,8 +2744,7 @@ restart_ih: | |||
2744 | case 0: /* D1 vblank */ | 2744 | case 0: /* D1 vblank */ |
2745 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | 2745 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { |
2746 | drm_handle_vblank(rdev->ddev, 0); | 2746 | drm_handle_vblank(rdev->ddev, 0); |
2747 | if (rdev->pm.vblank_callback) | 2747 | wake_up(&rdev->irq.vblank_queue); |
2748 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
2749 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | 2748 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; |
2750 | DRM_DEBUG("IH: D1 vblank\n"); | 2749 | DRM_DEBUG("IH: D1 vblank\n"); |
2751 | } | 2750 | } |
@@ -2766,8 +2765,7 @@ restart_ih: | |||
2766 | case 0: /* D2 vblank */ | 2765 | case 0: /* D2 vblank */ |
2767 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | 2766 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { |
2768 | drm_handle_vblank(rdev->ddev, 1); | 2767 | drm_handle_vblank(rdev->ddev, 1); |
2769 | if (rdev->pm.vblank_callback) | 2768 | wake_up(&rdev->irq.vblank_queue); |
2770 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
2771 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | 2769 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; |
2772 | DRM_DEBUG("IH: D2 vblank\n"); | 2770 | DRM_DEBUG("IH: D2 vblank\n"); |
2773 | } | 2771 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index da1177375976..3f353131bb38 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -351,6 +351,7 @@ struct radeon_irq { | |||
351 | bool sw_int; | 351 | bool sw_int; |
352 | /* FIXME: use a define max crtc rather than hardcode it */ | 352 | /* FIXME: use a define max crtc rather than hardcode it */ |
353 | bool crtc_vblank_int[2]; | 353 | bool crtc_vblank_int[2]; |
354 | wait_queue_head_t vblank_queue; | ||
354 | /* FIXME: use defines for max hpd/dacs */ | 355 | /* FIXME: use defines for max hpd/dacs */ |
355 | bool hpd[6]; | 356 | bool hpd[6]; |
356 | spinlock_t sw_lock; | 357 | spinlock_t sw_lock; |
@@ -657,13 +658,11 @@ struct radeon_power_state { | |||
657 | 658 | ||
658 | struct radeon_pm { | 659 | struct radeon_pm { |
659 | struct mutex mutex; | 660 | struct mutex mutex; |
660 | struct work_struct reclock_work; | ||
661 | struct delayed_work idle_work; | 661 | struct delayed_work idle_work; |
662 | enum radeon_pm_state state; | 662 | enum radeon_pm_state state; |
663 | enum radeon_pm_action planned_action; | 663 | enum radeon_pm_action planned_action; |
664 | unsigned long action_timeout; | 664 | unsigned long action_timeout; |
665 | bool downclocked; | 665 | bool downclocked; |
666 | bool vblank_callback; | ||
667 | int active_crtcs; | 666 | int active_crtcs; |
668 | int req_vblank; | 667 | int req_vblank; |
669 | fixed20_12 max_bandwidth; | 668 | fixed20_12 max_bandwidth; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a1c937d03845..c90f8d370266 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -645,6 +645,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
645 | mutex_init(&rdev->pm.mutex); | 645 | mutex_init(&rdev->pm.mutex); |
646 | rwlock_init(&rdev->fence_drv.lock); | 646 | rwlock_init(&rdev->fence_drv.lock); |
647 | INIT_LIST_HEAD(&rdev->gem.objects); | 647 | INIT_LIST_HEAD(&rdev->gem.objects); |
648 | init_waitqueue_head(&rdev->irq.vblank_queue); | ||
648 | 649 | ||
649 | /* setup workqueue */ | 650 | /* setup workqueue */ |
650 | rdev->wq = create_workqueue("radeon"); | 651 | rdev->wq = create_workqueue("radeon"); |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 1cecd7346ab9..a8e151ec1351 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -25,10 +25,10 @@ | |||
25 | 25 | ||
26 | #define RADEON_IDLE_LOOP_MS 100 | 26 | #define RADEON_IDLE_LOOP_MS 100 |
27 | #define RADEON_RECLOCK_DELAY_MS 200 | 27 | #define RADEON_RECLOCK_DELAY_MS 200 |
28 | #define RADEON_WAIT_VBLANK_TIMEOUT 200 | ||
28 | 29 | ||
29 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); | 30 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); |
30 | static void radeon_pm_set_clocks(struct radeon_device *rdev); | 31 | static void radeon_pm_set_clocks(struct radeon_device *rdev); |
31 | static void radeon_pm_reclock_work_handler(struct work_struct *work); | ||
32 | static void radeon_pm_idle_work_handler(struct work_struct *work); | 32 | static void radeon_pm_idle_work_handler(struct work_struct *work); |
33 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); | 33 | static int radeon_debugfs_pm_init(struct radeon_device *rdev); |
34 | 34 | ||
@@ -214,7 +214,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
214 | rdev->pm.state = PM_STATE_DISABLED; | 214 | rdev->pm.state = PM_STATE_DISABLED; |
215 | rdev->pm.planned_action = PM_ACTION_NONE; | 215 | rdev->pm.planned_action = PM_ACTION_NONE; |
216 | rdev->pm.downclocked = false; | 216 | rdev->pm.downclocked = false; |
217 | rdev->pm.vblank_callback = false; | ||
218 | 217 | ||
219 | if (rdev->bios) { | 218 | if (rdev->bios) { |
220 | if (rdev->is_atom_bios) | 219 | if (rdev->is_atom_bios) |
@@ -228,7 +227,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
228 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 227 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
229 | } | 228 | } |
230 | 229 | ||
231 | INIT_WORK(&rdev->pm.reclock_work, radeon_pm_reclock_work_handler); | ||
232 | INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); | 230 | INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); |
233 | 231 | ||
234 | if (radeon_dynpm != -1 && radeon_dynpm) { | 232 | if (radeon_dynpm != -1 && radeon_dynpm) { |
@@ -266,26 +264,14 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
266 | 264 | ||
267 | if (count > 1) { | 265 | if (count > 1) { |
268 | if (rdev->pm.state == PM_STATE_ACTIVE) { | 266 | if (rdev->pm.state == PM_STATE_ACTIVE) { |
269 | wait_queue_head_t wait; | ||
270 | init_waitqueue_head(&wait); | ||
271 | |||
272 | cancel_delayed_work(&rdev->pm.idle_work); | 267 | cancel_delayed_work(&rdev->pm.idle_work); |
273 | 268 | ||
274 | rdev->pm.state = PM_STATE_PAUSED; | 269 | rdev->pm.state = PM_STATE_PAUSED; |
275 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 270 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; |
276 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 271 | if (rdev->pm.downclocked) |
277 | rdev->pm.vblank_callback = true; | ||
278 | |||
279 | mutex_unlock(&rdev->pm.mutex); | ||
280 | |||
281 | wait_event_timeout(wait, !rdev->pm.downclocked, | ||
282 | msecs_to_jiffies(300)); | ||
283 | if (!rdev->pm.downclocked) | ||
284 | radeon_pm_set_clocks(rdev); | 272 | radeon_pm_set_clocks(rdev); |
285 | 273 | ||
286 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); | 274 | DRM_DEBUG("radeon: dynamic power management deactivated\n"); |
287 | } else { | ||
288 | mutex_unlock(&rdev->pm.mutex); | ||
289 | } | 275 | } |
290 | } else if (count == 1) { | 276 | } else if (count == 1) { |
291 | /* TODO: Increase clocks if needed for current mode */ | 277 | /* TODO: Increase clocks if needed for current mode */ |
@@ -293,8 +279,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
293 | if (rdev->pm.state == PM_STATE_MINIMUM) { | 279 | if (rdev->pm.state == PM_STATE_MINIMUM) { |
294 | rdev->pm.state = PM_STATE_ACTIVE; | 280 | rdev->pm.state = PM_STATE_ACTIVE; |
295 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; | 281 | rdev->pm.planned_action = PM_ACTION_UPCLOCK; |
296 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 282 | radeon_pm_set_clocks(rdev); |
297 | radeon_pm_set_clocks_locked(rdev); | ||
298 | 283 | ||
299 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, | 284 | queue_delayed_work(rdev->wq, &rdev->pm.idle_work, |
300 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 285 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
@@ -305,8 +290,6 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
305 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 290 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
306 | DRM_DEBUG("radeon: dynamic power management activated\n"); | 291 | DRM_DEBUG("radeon: dynamic power management activated\n"); |
307 | } | 292 | } |
308 | |||
309 | mutex_unlock(&rdev->pm.mutex); | ||
310 | } | 293 | } |
311 | else { /* count == 0 */ | 294 | else { /* count == 0 */ |
312 | if (rdev->pm.state != PM_STATE_MINIMUM) { | 295 | if (rdev->pm.state != PM_STATE_MINIMUM) { |
@@ -314,12 +297,11 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
314 | 297 | ||
315 | rdev->pm.state = PM_STATE_MINIMUM; | 298 | rdev->pm.state = PM_STATE_MINIMUM; |
316 | rdev->pm.planned_action = PM_ACTION_MINIMUM; | 299 | rdev->pm.planned_action = PM_ACTION_MINIMUM; |
317 | radeon_get_power_state(rdev, rdev->pm.planned_action); | 300 | radeon_pm_set_clocks(rdev); |
318 | radeon_pm_set_clocks_locked(rdev); | ||
319 | } | 301 | } |
320 | |||
321 | mutex_unlock(&rdev->pm.mutex); | ||
322 | } | 302 | } |
303 | |||
304 | mutex_unlock(&rdev->pm.mutex); | ||
323 | } | 305 | } |
324 | 306 | ||
325 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | 307 | static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) |
@@ -344,31 +326,32 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) | |||
344 | 326 | ||
345 | static void radeon_pm_set_clocks(struct radeon_device *rdev) | 327 | static void radeon_pm_set_clocks(struct radeon_device *rdev) |
346 | { | 328 | { |
347 | mutex_lock(&rdev->pm.mutex); | 329 | radeon_get_power_state(rdev, rdev->pm.planned_action); |
348 | /* new VBLANK irq may come before handling previous one */ | 330 | mutex_lock(&rdev->cp.mutex); |
349 | if (rdev->pm.vblank_callback) { | 331 | |
350 | mutex_lock(&rdev->cp.mutex); | 332 | if (rdev->pm.active_crtcs & (1 << 0)) { |
351 | if (rdev->pm.req_vblank & (1 << 0)) { | 333 | rdev->pm.req_vblank |= (1 << 0); |
352 | rdev->pm.req_vblank &= ~(1 << 0); | 334 | drm_vblank_get(rdev->ddev, 0); |
353 | drm_vblank_put(rdev->ddev, 0); | 335 | } |
354 | } | 336 | if (rdev->pm.active_crtcs & (1 << 1)) { |
355 | if (rdev->pm.req_vblank & (1 << 1)) { | 337 | rdev->pm.req_vblank |= (1 << 1); |
356 | rdev->pm.req_vblank &= ~(1 << 1); | 338 | drm_vblank_get(rdev->ddev, 1); |
357 | drm_vblank_put(rdev->ddev, 1); | 339 | } |
358 | } | 340 | if (rdev->pm.active_crtcs) |
359 | rdev->pm.vblank_callback = false; | 341 | wait_event_interruptible_timeout( |
360 | radeon_pm_set_clocks_locked(rdev); | 342 | rdev->irq.vblank_queue, 0, |
361 | mutex_unlock(&rdev->cp.mutex); | 343 | msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); |
344 | if (rdev->pm.req_vblank & (1 << 0)) { | ||
345 | rdev->pm.req_vblank &= ~(1 << 0); | ||
346 | drm_vblank_put(rdev->ddev, 0); | ||
347 | } | ||
348 | if (rdev->pm.req_vblank & (1 << 1)) { | ||
349 | rdev->pm.req_vblank &= ~(1 << 1); | ||
350 | drm_vblank_put(rdev->ddev, 1); | ||
362 | } | 351 | } |
363 | mutex_unlock(&rdev->pm.mutex); | ||
364 | } | ||
365 | 352 | ||
366 | static void radeon_pm_reclock_work_handler(struct work_struct *work) | 353 | radeon_pm_set_clocks_locked(rdev); |
367 | { | 354 | mutex_unlock(&rdev->cp.mutex); |
368 | struct radeon_device *rdev; | ||
369 | rdev = container_of(work, struct radeon_device, | ||
370 | pm.reclock_work); | ||
371 | radeon_pm_set_clocks(rdev); | ||
372 | } | 355 | } |
373 | 356 | ||
374 | static void radeon_pm_idle_work_handler(struct work_struct *work) | 357 | static void radeon_pm_idle_work_handler(struct work_struct *work) |
@@ -378,8 +361,7 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
378 | pm.idle_work.work); | 361 | pm.idle_work.work); |
379 | 362 | ||
380 | mutex_lock(&rdev->pm.mutex); | 363 | mutex_lock(&rdev->pm.mutex); |
381 | if (rdev->pm.state == PM_STATE_ACTIVE && | 364 | if (rdev->pm.state == PM_STATE_ACTIVE) { |
382 | !rdev->pm.vblank_callback) { | ||
383 | unsigned long irq_flags; | 365 | unsigned long irq_flags; |
384 | int not_processed = 0; | 366 | int not_processed = 0; |
385 | 367 | ||
@@ -417,17 +399,8 @@ static void radeon_pm_idle_work_handler(struct work_struct *work) | |||
417 | } | 399 | } |
418 | 400 | ||
419 | if (rdev->pm.planned_action != PM_ACTION_NONE && | 401 | if (rdev->pm.planned_action != PM_ACTION_NONE && |
420 | jiffies > rdev->pm.action_timeout) { | 402 | jiffies > rdev->pm.action_timeout) { |
421 | if (rdev->pm.active_crtcs & (1 << 0)) { | 403 | radeon_pm_set_clocks(rdev); |
422 | rdev->pm.req_vblank |= (1 << 0); | ||
423 | drm_vblank_get(rdev->ddev, 0); | ||
424 | } | ||
425 | if (rdev->pm.active_crtcs & (1 << 1)) { | ||
426 | rdev->pm.req_vblank |= (1 << 1); | ||
427 | drm_vblank_get(rdev->ddev, 1); | ||
428 | } | ||
429 | radeon_get_power_state(rdev, rdev->pm.planned_action); | ||
430 | rdev->pm.vblank_callback = true; | ||
431 | } | 404 | } |
432 | } | 405 | } |
433 | mutex_unlock(&rdev->pm.mutex); | 406 | mutex_unlock(&rdev->pm.mutex); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index a581fdead4dd..979b00034de9 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -408,13 +408,11 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
408 | /* Vertical blank interrupts */ | 408 | /* Vertical blank interrupts */ |
409 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { | 409 | if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { |
410 | drm_handle_vblank(rdev->ddev, 0); | 410 | drm_handle_vblank(rdev->ddev, 0); |
411 | if (rdev->pm.vblank_callback) | 411 | wake_up(&rdev->irq.vblank_queue); |
412 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
413 | } | 412 | } |
414 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { | 413 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { |
415 | drm_handle_vblank(rdev->ddev, 1); | 414 | drm_handle_vblank(rdev->ddev, 1); |
416 | if (rdev->pm.vblank_callback) | 415 | wake_up(&rdev->irq.vblank_queue); |
417 | queue_work(rdev->wq, &rdev->pm.reclock_work); | ||
418 | } | 416 | } |
419 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | 417 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { |
420 | queue_hotplug = true; | 418 | queue_hotplug = true; |