diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 15:42:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-07 15:42:58 -0400 |
commit | 3b7433b8a8a83c87972065b1852b7dcae691e464 (patch) | |
tree | 93fa2c003f8baef5ab0733b53bac77961ed5240c /drivers/gpu | |
parent | 4a386c3e177ca2fbc70c9283d0b46537844763a0 (diff) | |
parent | 6ee0578b4daaea01c96b172c6aacca43fd9807a6 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (55 commits)
workqueue: mark init_workqueues() as early_initcall()
workqueue: explain for_each_*cwq_cpu() iterators
fscache: fix build on !CONFIG_SYSCTL
slow-work: kill it
gfs2: use workqueue instead of slow-work
drm: use workqueue instead of slow-work
cifs: use workqueue instead of slow-work
fscache: drop references to slow-work
fscache: convert operation to use workqueue instead of slow-work
fscache: convert object to use workqueue instead of slow-work
workqueue: fix how cpu number is stored in work->data
workqueue: fix mayday_mask handling on UP
workqueue: fix build problem on !CONFIG_SMP
workqueue: fix locking in retry path of maybe_create_worker()
async: use workqueue for worker pool
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
workqueue: implement unbound workqueue
workqueue: prepare for WQ_UNBOUND implementation
libata: take advantage of cmwq and remove concurrency limitations
workqueue: fix worker management invocation without pending works
...
Fixed up conflicts in fs/cifs/* as per Tejun. Other trivial conflicts in
include/linux/workqueue.h, kernel/trace/Kconfig and kernel/workqueue.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 29 |
1 files changed, 10 insertions, 19 deletions
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 11fe9c870d17..45981304feb8 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -831,13 +831,11 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
831 | } | 831 | } |
832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
833 | 833 | ||
834 | static struct slow_work_ops output_poll_ops; | ||
835 | |||
836 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 834 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
837 | static void output_poll_execute(struct slow_work *work) | 835 | static void output_poll_execute(struct work_struct *work) |
838 | { | 836 | { |
839 | struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); | 837 | struct delayed_work *delayed_work = to_delayed_work(work); |
840 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); | 838 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); |
841 | struct drm_connector *connector; | 839 | struct drm_connector *connector; |
842 | enum drm_connector_status old_status, status; | 840 | enum drm_connector_status old_status, status; |
843 | bool repoll = false, changed = false; | 841 | bool repoll = false, changed = false; |
@@ -877,7 +875,7 @@ static void output_poll_execute(struct slow_work *work) | |||
877 | } | 875 | } |
878 | 876 | ||
879 | if (repoll) { | 877 | if (repoll) { |
880 | ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); | 878 | ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); |
881 | if (ret) | 879 | if (ret) |
882 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 880 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
883 | } | 881 | } |
@@ -887,7 +885,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) | |||
887 | { | 885 | { |
888 | if (!dev->mode_config.poll_enabled) | 886 | if (!dev->mode_config.poll_enabled) |
889 | return; | 887 | return; |
890 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 888 | cancel_delayed_work_sync(&dev->mode_config.output_poll_work); |
891 | } | 889 | } |
892 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | 890 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
893 | 891 | ||
@@ -903,7 +901,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
903 | } | 901 | } |
904 | 902 | ||
905 | if (poll) { | 903 | if (poll) { |
906 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | 904 | ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
907 | if (ret) | 905 | if (ret) |
908 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 906 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
909 | } | 907 | } |
@@ -912,9 +910,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_enable); | |||
912 | 910 | ||
913 | void drm_kms_helper_poll_init(struct drm_device *dev) | 911 | void drm_kms_helper_poll_init(struct drm_device *dev) |
914 | { | 912 | { |
915 | slow_work_register_user(THIS_MODULE); | 913 | INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); |
916 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
917 | &output_poll_ops); | ||
918 | dev->mode_config.poll_enabled = true; | 914 | dev->mode_config.poll_enabled = true; |
919 | 915 | ||
920 | drm_kms_helper_poll_enable(dev); | 916 | drm_kms_helper_poll_enable(dev); |
@@ -924,7 +920,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); | |||
924 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 920 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
925 | { | 921 | { |
926 | drm_kms_helper_poll_disable(dev); | 922 | drm_kms_helper_poll_disable(dev); |
927 | slow_work_unregister_user(THIS_MODULE); | ||
928 | } | 923 | } |
929 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 924 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
930 | 925 | ||
@@ -932,12 +927,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
932 | { | 927 | { |
933 | if (!dev->mode_config.poll_enabled) | 928 | if (!dev->mode_config.poll_enabled) |
934 | return; | 929 | return; |
935 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 930 | /* kill timer and schedule immediate execution, this doesn't block */ |
936 | /* schedule a slow work asap */ | 931 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
937 | delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); | 932 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); |
938 | } | 933 | } |
939 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 934 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
940 | |||
941 | static struct slow_work_ops output_poll_ops = { | ||
942 | .execute = output_poll_execute, | ||
943 | }; | ||