diff options
author | Tejun Heo <tj@kernel.org> | 2011-01-03 08:49:32 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-01-05 23:32:16 -0500 |
commit | 32c87fca2fac490e34a9fa900b45f2fbb4faacf9 (patch) | |
tree | 181e9c1d1493124f0a0f5a8a65be4bf29053b77e /drivers/gpu/drm/radeon/radeon_pm.c | |
parent | af5dd83b873efd4e1477f2265b6fa15a825aff26 (diff) |
drm/radeon: use system_wq instead of dev_priv->wq
With cmwq, there's no reason for radeon to use a dedicated workqueue.
Drop dev_priv->wq and use system_wq instead.
Because radeon_driver_irq_uninstall_kms() may be called from
unsleepable context, the work items can't be flushed from there.
Instead, init and flush from radeon_irq_kms_init/fini().
While at it, simplify canceling/flushing of rdev->pm.dynpm_idle_work.
Always initialize and sync cancel instead of being unnecessarily smart
about it.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Alex Deucher <alexdeucher@gmail.com>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_pm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_pm.c | 47 |
1 files changed, 16 insertions, 31 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4de7776bd1c5..0afd26ccccfa 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -405,20 +405,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, | |||
405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 405 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
406 | mutex_unlock(&rdev->pm.mutex); | 406 | mutex_unlock(&rdev->pm.mutex); |
407 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { | 407 | } else if (strncmp("profile", buf, strlen("profile")) == 0) { |
408 | bool flush_wq = false; | ||
409 | |||
410 | mutex_lock(&rdev->pm.mutex); | 408 | mutex_lock(&rdev->pm.mutex); |
411 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | ||
412 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
413 | flush_wq = true; | ||
414 | } | ||
415 | /* disable dynpm */ | 409 | /* disable dynpm */ |
416 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 410 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
417 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; | 411 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; |
418 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 412 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
419 | mutex_unlock(&rdev->pm.mutex); | 413 | mutex_unlock(&rdev->pm.mutex); |
420 | if (flush_wq) | 414 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
421 | flush_workqueue(rdev->wq); | ||
422 | } else { | 415 | } else { |
423 | DRM_ERROR("invalid power method!\n"); | 416 | DRM_ERROR("invalid power method!\n"); |
424 | goto fail; | 417 | goto fail; |
@@ -524,18 +517,14 @@ static void radeon_hwmon_fini(struct radeon_device *rdev) | |||
524 | 517 | ||
525 | void radeon_pm_suspend(struct radeon_device *rdev) | 518 | void radeon_pm_suspend(struct radeon_device *rdev) |
526 | { | 519 | { |
527 | bool flush_wq = false; | ||
528 | |||
529 | mutex_lock(&rdev->pm.mutex); | 520 | mutex_lock(&rdev->pm.mutex); |
530 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 521 | if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
531 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
532 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) | 522 | if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) |
533 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; | 523 | rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; |
534 | flush_wq = true; | ||
535 | } | 524 | } |
536 | mutex_unlock(&rdev->pm.mutex); | 525 | mutex_unlock(&rdev->pm.mutex); |
537 | if (flush_wq) | 526 | |
538 | flush_workqueue(rdev->wq); | 527 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
539 | } | 528 | } |
540 | 529 | ||
541 | void radeon_pm_resume(struct radeon_device *rdev) | 530 | void radeon_pm_resume(struct radeon_device *rdev) |
@@ -550,8 +539,8 @@ void radeon_pm_resume(struct radeon_device *rdev) | |||
550 | if (rdev->pm.pm_method == PM_METHOD_DYNPM | 539 | if (rdev->pm.pm_method == PM_METHOD_DYNPM |
551 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { | 540 | && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { |
552 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 541 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
553 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 542 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
554 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 543 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
555 | } | 544 | } |
556 | mutex_unlock(&rdev->pm.mutex); | 545 | mutex_unlock(&rdev->pm.mutex); |
557 | radeon_pm_compute_clocks(rdev); | 546 | radeon_pm_compute_clocks(rdev); |
@@ -585,6 +574,9 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
585 | ret = radeon_hwmon_init(rdev); | 574 | ret = radeon_hwmon_init(rdev); |
586 | if (ret) | 575 | if (ret) |
587 | return ret; | 576 | return ret; |
577 | |||
578 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
579 | |||
588 | if (rdev->pm.num_power_states > 1) { | 580 | if (rdev->pm.num_power_states > 1) { |
589 | /* where's the best place to put these? */ | 581 | /* where's the best place to put these? */ |
590 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); | 582 | ret = device_create_file(rdev->dev, &dev_attr_power_profile); |
@@ -598,8 +590,6 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
598 | rdev->acpi_nb.notifier_call = radeon_acpi_event; | 590 | rdev->acpi_nb.notifier_call = radeon_acpi_event; |
599 | register_acpi_notifier(&rdev->acpi_nb); | 591 | register_acpi_notifier(&rdev->acpi_nb); |
600 | #endif | 592 | #endif |
601 | INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); | ||
602 | |||
603 | if (radeon_debugfs_pm_init(rdev)) { | 593 | if (radeon_debugfs_pm_init(rdev)) { |
604 | DRM_ERROR("Failed to register debugfs file for PM!\n"); | 594 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
605 | } | 595 | } |
@@ -613,25 +603,20 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
613 | void radeon_pm_fini(struct radeon_device *rdev) | 603 | void radeon_pm_fini(struct radeon_device *rdev) |
614 | { | 604 | { |
615 | if (rdev->pm.num_power_states > 1) { | 605 | if (rdev->pm.num_power_states > 1) { |
616 | bool flush_wq = false; | ||
617 | |||
618 | mutex_lock(&rdev->pm.mutex); | 606 | mutex_lock(&rdev->pm.mutex); |
619 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { | 607 | if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
620 | rdev->pm.profile = PM_PROFILE_DEFAULT; | 608 | rdev->pm.profile = PM_PROFILE_DEFAULT; |
621 | radeon_pm_update_profile(rdev); | 609 | radeon_pm_update_profile(rdev); |
622 | radeon_pm_set_clocks(rdev); | 610 | radeon_pm_set_clocks(rdev); |
623 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { | 611 | } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { |
624 | /* cancel work */ | ||
625 | cancel_delayed_work(&rdev->pm.dynpm_idle_work); | ||
626 | flush_wq = true; | ||
627 | /* reset default clocks */ | 612 | /* reset default clocks */ |
628 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; | 613 | rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; |
629 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; | 614 | rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; |
630 | radeon_pm_set_clocks(rdev); | 615 | radeon_pm_set_clocks(rdev); |
631 | } | 616 | } |
632 | mutex_unlock(&rdev->pm.mutex); | 617 | mutex_unlock(&rdev->pm.mutex); |
633 | if (flush_wq) | 618 | |
634 | flush_workqueue(rdev->wq); | 619 | cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); |
635 | 620 | ||
636 | device_remove_file(rdev->dev, &dev_attr_power_profile); | 621 | device_remove_file(rdev->dev, &dev_attr_power_profile); |
637 | device_remove_file(rdev->dev, &dev_attr_power_method); | 622 | device_remove_file(rdev->dev, &dev_attr_power_method); |
@@ -690,12 +675,12 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) | |||
690 | radeon_pm_get_dynpm_state(rdev); | 675 | radeon_pm_get_dynpm_state(rdev); |
691 | radeon_pm_set_clocks(rdev); | 676 | radeon_pm_set_clocks(rdev); |
692 | 677 | ||
693 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 678 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
694 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 679 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
695 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { | 680 | } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { |
696 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; | 681 | rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; |
697 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 682 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
698 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 683 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
699 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); | 684 | DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); |
700 | } | 685 | } |
701 | } else { /* count == 0 */ | 686 | } else { /* count == 0 */ |
@@ -800,8 +785,8 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work) | |||
800 | radeon_pm_set_clocks(rdev); | 785 | radeon_pm_set_clocks(rdev); |
801 | } | 786 | } |
802 | 787 | ||
803 | queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, | 788 | schedule_delayed_work(&rdev->pm.dynpm_idle_work, |
804 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); | 789 | msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); |
805 | } | 790 | } |
806 | mutex_unlock(&rdev->pm.mutex); | 791 | mutex_unlock(&rdev->pm.mutex); |
807 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 792 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |