diff options
Diffstat (limited to 'drivers/gpu/drm/drm_crtc_helper.c')
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 41 |
1 files changed, 12 insertions, 29 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7b1eb2fcf616..7e31d4348340 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -831,17 +831,14 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
831 | } | 831 | } |
832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
833 | 833 | ||
834 | static struct slow_work_ops output_poll_ops; | ||
835 | |||
836 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 834 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
837 | static void output_poll_execute(struct slow_work *work) | 835 | static void output_poll_execute(struct work_struct *work) |
838 | { | 836 | { |
839 | struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); | 837 | struct delayed_work *delayed_work = to_delayed_work(work); |
840 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); | 838 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); |
841 | struct drm_connector *connector; | 839 | struct drm_connector *connector; |
842 | enum drm_connector_status old_status, status; | 840 | enum drm_connector_status old_status, status; |
843 | bool repoll = false, changed = false; | 841 | bool repoll = false, changed = false; |
844 | int ret; | ||
845 | 842 | ||
846 | mutex_lock(&dev->mode_config.mutex); | 843 | mutex_lock(&dev->mode_config.mutex); |
847 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 844 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
@@ -876,18 +873,15 @@ static void output_poll_execute(struct slow_work *work) | |||
876 | dev->mode_config.funcs->output_poll_changed(dev); | 873 | dev->mode_config.funcs->output_poll_changed(dev); |
877 | } | 874 | } |
878 | 875 | ||
879 | if (repoll) { | 876 | if (repoll) |
880 | ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); | 877 | queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); |
881 | if (ret) | ||
882 | DRM_ERROR("delayed enqueue failed %d\n", ret); | ||
883 | } | ||
884 | } | 878 | } |
885 | 879 | ||
886 | void drm_kms_helper_poll_disable(struct drm_device *dev) | 880 | void drm_kms_helper_poll_disable(struct drm_device *dev) |
887 | { | 881 | { |
888 | if (!dev->mode_config.poll_enabled) | 882 | if (!dev->mode_config.poll_enabled) |
889 | return; | 883 | return; |
890 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 884 | cancel_delayed_work_sync(&dev->mode_config.output_poll_work); |
891 | } | 885 | } |
892 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | 886 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
893 | 887 | ||
@@ -895,26 +889,20 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
895 | { | 889 | { |
896 | bool poll = false; | 890 | bool poll = false; |
897 | struct drm_connector *connector; | 891 | struct drm_connector *connector; |
898 | int ret; | ||
899 | 892 | ||
900 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 893 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
901 | if (connector->polled) | 894 | if (connector->polled) |
902 | poll = true; | 895 | poll = true; |
903 | } | 896 | } |
904 | 897 | ||
905 | if (poll) { | 898 | if (poll) |
906 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | 899 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
907 | if (ret) | ||
908 | DRM_ERROR("delayed enqueue failed %d\n", ret); | ||
909 | } | ||
910 | } | 900 | } |
911 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); | 901 | EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
912 | 902 | ||
913 | void drm_kms_helper_poll_init(struct drm_device *dev) | 903 | void drm_kms_helper_poll_init(struct drm_device *dev) |
914 | { | 904 | { |
915 | slow_work_register_user(THIS_MODULE); | 905 | INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); |
916 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
917 | &output_poll_ops); | ||
918 | dev->mode_config.poll_enabled = true; | 906 | dev->mode_config.poll_enabled = true; |
919 | 907 | ||
920 | drm_kms_helper_poll_enable(dev); | 908 | drm_kms_helper_poll_enable(dev); |
@@ -924,7 +912,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); | |||
924 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 912 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
925 | { | 913 | { |
926 | drm_kms_helper_poll_disable(dev); | 914 | drm_kms_helper_poll_disable(dev); |
927 | slow_work_unregister_user(THIS_MODULE); | ||
928 | } | 915 | } |
929 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 916 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
930 | 917 | ||
@@ -932,12 +919,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
932 | { | 919 | { |
933 | if (!dev->mode_config.poll_enabled) | 920 | if (!dev->mode_config.poll_enabled) |
934 | return; | 921 | return; |
935 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 922 | /* kill timer and schedule immediate execution, this doesn't block */ |
936 | /* schedule a slow work asap */ | 923 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
937 | delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); | 924 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); |
938 | } | 925 | } |
939 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 926 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
940 | |||
941 | static struct slow_work_ops output_poll_ops = { | ||
942 | .execute = output_poll_execute, | ||
943 | }; | ||