diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/osl.c | 40 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 20 | ||||
-rw-r--r-- | drivers/ata/libata-eh.c | 4 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 10 | ||||
-rw-r--r-- | drivers/ata/libata-sff.c | 9 | ||||
-rw-r--r-- | drivers/ata/libata.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_crtc_helper.c | 29 | ||||
-rw-r--r-- | drivers/media/video/ivtv/ivtv-driver.c | 26 | ||||
-rw-r--r-- | drivers/media/video/ivtv/ivtv-driver.h | 8 | ||||
-rw-r--r-- | drivers/media/video/ivtv/ivtv-irq.c | 15 | ||||
-rw-r--r-- | drivers/media/video/ivtv/ivtv-irq.h | 2 |
11 files changed, 59 insertions, 105 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 78418ce4fc78..46cce391fa46 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -191,36 +191,11 @@ acpi_status __init acpi_os_initialize(void) | |||
191 | return AE_OK; | 191 | return AE_OK; |
192 | } | 192 | } |
193 | 193 | ||
194 | static void bind_to_cpu0(struct work_struct *work) | ||
195 | { | ||
196 | set_cpus_allowed_ptr(current, cpumask_of(0)); | ||
197 | kfree(work); | ||
198 | } | ||
199 | |||
200 | static void bind_workqueue(struct workqueue_struct *wq) | ||
201 | { | ||
202 | struct work_struct *work; | ||
203 | |||
204 | work = kzalloc(sizeof(struct work_struct), GFP_KERNEL); | ||
205 | INIT_WORK(work, bind_to_cpu0); | ||
206 | queue_work(wq, work); | ||
207 | } | ||
208 | |||
209 | acpi_status acpi_os_initialize1(void) | 194 | acpi_status acpi_os_initialize1(void) |
210 | { | 195 | { |
211 | /* | 196 | kacpid_wq = create_workqueue("kacpid"); |
212 | * On some machines, a software-initiated SMI causes corruption unless | 197 | kacpi_notify_wq = create_workqueue("kacpi_notify"); |
213 | * the SMI runs on CPU 0. An SMI can be initiated by any AML, but | 198 | kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); |
214 | * typically it's done in GPE-related methods that are run via | ||
215 | * workqueues, so we can avoid the known corruption cases by binding | ||
216 | * the workqueues to CPU 0. | ||
217 | */ | ||
218 | kacpid_wq = create_singlethread_workqueue("kacpid"); | ||
219 | bind_workqueue(kacpid_wq); | ||
220 | kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify"); | ||
221 | bind_workqueue(kacpi_notify_wq); | ||
222 | kacpi_hotplug_wq = create_singlethread_workqueue("kacpi_hotplug"); | ||
223 | bind_workqueue(kacpi_hotplug_wq); | ||
224 | BUG_ON(!kacpid_wq); | 199 | BUG_ON(!kacpid_wq); |
225 | BUG_ON(!kacpi_notify_wq); | 200 | BUG_ON(!kacpi_notify_wq); |
226 | BUG_ON(!kacpi_hotplug_wq); | 201 | BUG_ON(!kacpi_hotplug_wq); |
@@ -766,7 +741,14 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, | |||
766 | else | 741 | else |
767 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); | 742 | INIT_WORK(&dpc->work, acpi_os_execute_deferred); |
768 | 743 | ||
769 | ret = queue_work(queue, &dpc->work); | 744 | /* |
745 | * On some machines, a software-initiated SMI causes corruption unless | ||
746 | * the SMI runs on CPU 0. An SMI can be initiated by any AML, but | ||
747 | * typically it's done in GPE-related methods that are run via | ||
748 | * workqueues, so we can avoid the known corruption cases by always | ||
749 | * queueing on CPU 0. | ||
750 | */ | ||
751 | ret = queue_work_on(0, queue, &dpc->work); | ||
770 | 752 | ||
771 | if (!ret) { | 753 | if (!ret) { |
772 | printk(KERN_ERR PREFIX | 754 | printk(KERN_ERR PREFIX |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index a0a4d6968400..4972fdf4bd31 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -98,8 +98,6 @@ static unsigned long ata_dev_blacklisted(const struct ata_device *dev); | |||
98 | 98 | ||
99 | unsigned int ata_print_id = 1; | 99 | unsigned int ata_print_id = 1; |
100 | 100 | ||
101 | struct workqueue_struct *ata_aux_wq; | ||
102 | |||
103 | struct ata_force_param { | 101 | struct ata_force_param { |
104 | const char *name; | 102 | const char *name; |
105 | unsigned int cbl; | 103 | unsigned int cbl; |
@@ -5594,6 +5592,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) | |||
5594 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; | 5592 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
5595 | #endif | 5593 | #endif |
5596 | 5594 | ||
5595 | mutex_init(&ap->scsi_scan_mutex); | ||
5597 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); | 5596 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
5598 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); | 5597 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
5599 | INIT_LIST_HEAD(&ap->eh_done_q); | 5598 | INIT_LIST_HEAD(&ap->eh_done_q); |
@@ -6532,29 +6531,20 @@ static int __init ata_init(void) | |||
6532 | 6531 | ||
6533 | ata_parse_force_param(); | 6532 | ata_parse_force_param(); |
6534 | 6533 | ||
6535 | ata_aux_wq = create_singlethread_workqueue("ata_aux"); | ||
6536 | if (!ata_aux_wq) | ||
6537 | goto fail; | ||
6538 | |||
6539 | rc = ata_sff_init(); | 6534 | rc = ata_sff_init(); |
6540 | if (rc) | 6535 | if (rc) { |
6541 | goto fail; | 6536 | kfree(ata_force_tbl); |
6537 | return rc; | ||
6538 | } | ||
6542 | 6539 | ||
6543 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); | 6540 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); |
6544 | return 0; | 6541 | return 0; |
6545 | |||
6546 | fail: | ||
6547 | kfree(ata_force_tbl); | ||
6548 | if (ata_aux_wq) | ||
6549 | destroy_workqueue(ata_aux_wq); | ||
6550 | return rc; | ||
6551 | } | 6542 | } |
6552 | 6543 | ||
6553 | static void __exit ata_exit(void) | 6544 | static void __exit ata_exit(void) |
6554 | { | 6545 | { |
6555 | ata_sff_exit(); | 6546 | ata_sff_exit(); |
6556 | kfree(ata_force_tbl); | 6547 | kfree(ata_force_tbl); |
6557 | destroy_workqueue(ata_aux_wq); | ||
6558 | } | 6548 | } |
6559 | 6549 | ||
6560 | subsys_initcall(ata_init); | 6550 | subsys_initcall(ata_init); |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 697474b625b7..c9ae299b8342 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -727,7 +727,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
727 | if (ap->pflags & ATA_PFLAG_LOADING) | 727 | if (ap->pflags & ATA_PFLAG_LOADING) |
728 | ap->pflags &= ~ATA_PFLAG_LOADING; | 728 | ap->pflags &= ~ATA_PFLAG_LOADING; |
729 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) | 729 | else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) |
730 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0); | 730 | schedule_delayed_work(&ap->hotplug_task, 0); |
731 | 731 | ||
732 | if (ap->pflags & ATA_PFLAG_RECOVERED) | 732 | if (ap->pflags & ATA_PFLAG_RECOVERED) |
733 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); | 733 | ata_port_printk(ap, KERN_INFO, "EH complete\n"); |
@@ -2945,7 +2945,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, | |||
2945 | ehc->i.flags |= ATA_EHI_SETMODE; | 2945 | ehc->i.flags |= ATA_EHI_SETMODE; |
2946 | 2946 | ||
2947 | /* schedule the scsi_rescan_device() here */ | 2947 | /* schedule the scsi_rescan_device() here */ |
2948 | queue_work(ata_aux_wq, &(ap->scsi_rescan_task)); | 2948 | schedule_work(&(ap->scsi_rescan_task)); |
2949 | } else if (dev->class == ATA_DEV_UNKNOWN && | 2949 | } else if (dev->class == ATA_DEV_UNKNOWN && |
2950 | ehc->tries[dev->devno] && | 2950 | ehc->tries[dev->devno] && |
2951 | ata_class_enabled(ehc->classes[dev->devno])) { | 2951 | ata_class_enabled(ehc->classes[dev->devno])) { |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a54273d2c3c6..d75c9c479d1a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -3435,7 +3435,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) | |||
3435 | " switching to async\n"); | 3435 | " switching to async\n"); |
3436 | } | 3436 | } |
3437 | 3437 | ||
3438 | queue_delayed_work(ata_aux_wq, &ap->hotplug_task, | 3438 | queue_delayed_work(system_long_wq, &ap->hotplug_task, |
3439 | round_jiffies_relative(HZ)); | 3439 | round_jiffies_relative(HZ)); |
3440 | } | 3440 | } |
3441 | 3441 | ||
@@ -3582,6 +3582,7 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
3582 | } | 3582 | } |
3583 | 3583 | ||
3584 | DPRINTK("ENTER\n"); | 3584 | DPRINTK("ENTER\n"); |
3585 | mutex_lock(&ap->scsi_scan_mutex); | ||
3585 | 3586 | ||
3586 | /* Unplug detached devices. We cannot use link iterator here | 3587 | /* Unplug detached devices. We cannot use link iterator here |
3587 | * because PMP links have to be scanned even if PMP is | 3588 | * because PMP links have to be scanned even if PMP is |
@@ -3595,6 +3596,7 @@ void ata_scsi_hotplug(struct work_struct *work) | |||
3595 | /* scan for new ones */ | 3596 | /* scan for new ones */ |
3596 | ata_scsi_scan_host(ap, 0); | 3597 | ata_scsi_scan_host(ap, 0); |
3597 | 3598 | ||
3599 | mutex_unlock(&ap->scsi_scan_mutex); | ||
3598 | DPRINTK("EXIT\n"); | 3600 | DPRINTK("EXIT\n"); |
3599 | } | 3601 | } |
3600 | 3602 | ||
@@ -3673,9 +3675,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, | |||
3673 | * @work: Pointer to ATA port to perform scsi_rescan_device() | 3675 | * @work: Pointer to ATA port to perform scsi_rescan_device() |
3674 | * | 3676 | * |
3675 | * After ATA pass thru (SAT) commands are executed successfully, | 3677 | * After ATA pass thru (SAT) commands are executed successfully, |
3676 | * libata need to propagate the changes to SCSI layer. This | 3678 | * libata need to propagate the changes to SCSI layer. |
3677 | * function must be executed from ata_aux_wq such that sdev | ||
3678 | * attach/detach don't race with rescan. | ||
3679 | * | 3679 | * |
3680 | * LOCKING: | 3680 | * LOCKING: |
3681 | * Kernel thread context (may sleep). | 3681 | * Kernel thread context (may sleep). |
@@ -3688,6 +3688,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) | |||
3688 | struct ata_device *dev; | 3688 | struct ata_device *dev; |
3689 | unsigned long flags; | 3689 | unsigned long flags; |
3690 | 3690 | ||
3691 | mutex_lock(&ap->scsi_scan_mutex); | ||
3691 | spin_lock_irqsave(ap->lock, flags); | 3692 | spin_lock_irqsave(ap->lock, flags); |
3692 | 3693 | ||
3693 | ata_for_each_link(link, ap, EDGE) { | 3694 | ata_for_each_link(link, ap, EDGE) { |
@@ -3707,6 +3708,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) | |||
3707 | } | 3708 | } |
3708 | 3709 | ||
3709 | spin_unlock_irqrestore(ap->lock, flags); | 3710 | spin_unlock_irqrestore(ap->lock, flags); |
3711 | mutex_unlock(&ap->scsi_scan_mutex); | ||
3710 | } | 3712 | } |
3711 | 3713 | ||
3712 | /** | 3714 | /** |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index efa4a18cfb9d..674c1436491f 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -3318,14 +3318,7 @@ void ata_sff_port_init(struct ata_port *ap) | |||
3318 | 3318 | ||
3319 | int __init ata_sff_init(void) | 3319 | int __init ata_sff_init(void) |
3320 | { | 3320 | { |
3321 | /* | 3321 | ata_sff_wq = alloc_workqueue("ata_sff", WQ_RESCUER, WQ_MAX_ACTIVE); |
3322 | * FIXME: In UP case, there is only one workqueue thread and if you | ||
3323 | * have more than one PIO device, latency is bloody awful, with | ||
3324 | * occasional multi-second "hiccups" as one PIO device waits for | ||
3325 | * another. It's an ugly wart that users DO occasionally complain | ||
3326 | * about; luckily most users have at most one PIO polled device. | ||
3327 | */ | ||
3328 | ata_sff_wq = create_workqueue("ata_sff"); | ||
3329 | if (!ata_sff_wq) | 3322 | if (!ata_sff_wq) |
3330 | return -ENOMEM; | 3323 | return -ENOMEM; |
3331 | 3324 | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 4b84ed60324a..9ce1ecc63e39 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -54,7 +54,6 @@ enum { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | extern unsigned int ata_print_id; | 56 | extern unsigned int ata_print_id; |
57 | extern struct workqueue_struct *ata_aux_wq; | ||
58 | extern int atapi_passthru16; | 57 | extern int atapi_passthru16; |
59 | extern int libata_fua; | 58 | extern int libata_fua; |
60 | extern int libata_noacpi; | 59 | extern int libata_noacpi; |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 11fe9c870d17..45981304feb8 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -831,13 +831,11 @@ int drm_helper_resume_force_mode(struct drm_device *dev) | |||
831 | } | 831 | } |
832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); | 832 | EXPORT_SYMBOL(drm_helper_resume_force_mode); |
833 | 833 | ||
834 | static struct slow_work_ops output_poll_ops; | ||
835 | |||
836 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) | 834 | #define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
837 | static void output_poll_execute(struct slow_work *work) | 835 | static void output_poll_execute(struct work_struct *work) |
838 | { | 836 | { |
839 | struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work); | 837 | struct delayed_work *delayed_work = to_delayed_work(work); |
840 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work); | 838 | struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); |
841 | struct drm_connector *connector; | 839 | struct drm_connector *connector; |
842 | enum drm_connector_status old_status, status; | 840 | enum drm_connector_status old_status, status; |
843 | bool repoll = false, changed = false; | 841 | bool repoll = false, changed = false; |
@@ -877,7 +875,7 @@ static void output_poll_execute(struct slow_work *work) | |||
877 | } | 875 | } |
878 | 876 | ||
879 | if (repoll) { | 877 | if (repoll) { |
880 | ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD); | 878 | ret = queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); |
881 | if (ret) | 879 | if (ret) |
882 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 880 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
883 | } | 881 | } |
@@ -887,7 +885,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev) | |||
887 | { | 885 | { |
888 | if (!dev->mode_config.poll_enabled) | 886 | if (!dev->mode_config.poll_enabled) |
889 | return; | 887 | return; |
890 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 888 | cancel_delayed_work_sync(&dev->mode_config.output_poll_work); |
891 | } | 889 | } |
892 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); | 890 | EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
893 | 891 | ||
@@ -903,7 +901,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev) | |||
903 | } | 901 | } |
904 | 902 | ||
905 | if (poll) { | 903 | if (poll) { |
906 | ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD); | 904 | ret = queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
907 | if (ret) | 905 | if (ret) |
908 | DRM_ERROR("delayed enqueue failed %d\n", ret); | 906 | DRM_ERROR("delayed enqueue failed %d\n", ret); |
909 | } | 907 | } |
@@ -912,9 +910,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_enable); | |||
912 | 910 | ||
913 | void drm_kms_helper_poll_init(struct drm_device *dev) | 911 | void drm_kms_helper_poll_init(struct drm_device *dev) |
914 | { | 912 | { |
915 | slow_work_register_user(THIS_MODULE); | 913 | INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute); |
916 | delayed_slow_work_init(&dev->mode_config.output_poll_slow_work, | ||
917 | &output_poll_ops); | ||
918 | dev->mode_config.poll_enabled = true; | 914 | dev->mode_config.poll_enabled = true; |
919 | 915 | ||
920 | drm_kms_helper_poll_enable(dev); | 916 | drm_kms_helper_poll_enable(dev); |
@@ -924,7 +920,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init); | |||
924 | void drm_kms_helper_poll_fini(struct drm_device *dev) | 920 | void drm_kms_helper_poll_fini(struct drm_device *dev) |
925 | { | 921 | { |
926 | drm_kms_helper_poll_disable(dev); | 922 | drm_kms_helper_poll_disable(dev); |
927 | slow_work_unregister_user(THIS_MODULE); | ||
928 | } | 923 | } |
929 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); | 924 | EXPORT_SYMBOL(drm_kms_helper_poll_fini); |
930 | 925 | ||
@@ -932,12 +927,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) | |||
932 | { | 927 | { |
933 | if (!dev->mode_config.poll_enabled) | 928 | if (!dev->mode_config.poll_enabled) |
934 | return; | 929 | return; |
935 | delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work); | 930 | /* kill timer and schedule immediate execution, this doesn't block */ |
936 | /* schedule a slow work asap */ | 931 | cancel_delayed_work(&dev->mode_config.output_poll_work); |
937 | delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0); | 932 | queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); |
938 | } | 933 | } |
939 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); | 934 | EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
940 | |||
941 | static struct slow_work_ops output_poll_ops = { | ||
942 | .execute = output_poll_execute, | ||
943 | }; | ||
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c index 90daa6e751d8..07c5c18a25cb 100644 --- a/drivers/media/video/ivtv/ivtv-driver.c +++ b/drivers/media/video/ivtv/ivtv-driver.c | |||
@@ -705,6 +705,8 @@ done: | |||
705 | */ | 705 | */ |
706 | static int __devinit ivtv_init_struct1(struct ivtv *itv) | 706 | static int __devinit ivtv_init_struct1(struct ivtv *itv) |
707 | { | 707 | { |
708 | struct sched_param param = { .sched_priority = 99 }; | ||
709 | |||
708 | itv->base_addr = pci_resource_start(itv->pdev, 0); | 710 | itv->base_addr = pci_resource_start(itv->pdev, 0); |
709 | itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ | 711 | itv->enc_mbox.max_mbox = 2; /* the encoder has 3 mailboxes (0-2) */ |
710 | itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ | 712 | itv->dec_mbox.max_mbox = 1; /* the decoder has 2 mailboxes (0-1) */ |
@@ -716,13 +718,17 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv) | |||
716 | spin_lock_init(&itv->lock); | 718 | spin_lock_init(&itv->lock); |
717 | spin_lock_init(&itv->dma_reg_lock); | 719 | spin_lock_init(&itv->dma_reg_lock); |
718 | 720 | ||
719 | itv->irq_work_queues = create_singlethread_workqueue(itv->v4l2_dev.name); | 721 | init_kthread_worker(&itv->irq_worker); |
720 | if (itv->irq_work_queues == NULL) { | 722 | itv->irq_worker_task = kthread_run(kthread_worker_fn, &itv->irq_worker, |
721 | IVTV_ERR("Could not create ivtv workqueue\n"); | 723 | itv->v4l2_dev.name); |
724 | if (IS_ERR(itv->irq_worker_task)) { | ||
725 | IVTV_ERR("Could not create ivtv task\n"); | ||
722 | return -1; | 726 | return -1; |
723 | } | 727 | } |
728 | /* must use the FIFO scheduler as it is realtime sensitive */ | ||
729 | sched_setscheduler(itv->irq_worker_task, SCHED_FIFO, ¶m); | ||
724 | 730 | ||
725 | INIT_WORK(&itv->irq_work_queue, ivtv_irq_work_handler); | 731 | init_kthread_work(&itv->irq_work, ivtv_irq_work_handler); |
726 | 732 | ||
727 | /* start counting open_id at 1 */ | 733 | /* start counting open_id at 1 */ |
728 | itv->open_id = 1; | 734 | itv->open_id = 1; |
@@ -1006,7 +1012,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev, | |||
1006 | /* PCI Device Setup */ | 1012 | /* PCI Device Setup */ |
1007 | retval = ivtv_setup_pci(itv, pdev, pci_id); | 1013 | retval = ivtv_setup_pci(itv, pdev, pci_id); |
1008 | if (retval == -EIO) | 1014 | if (retval == -EIO) |
1009 | goto free_workqueue; | 1015 | goto free_worker; |
1010 | if (retval == -ENXIO) | 1016 | if (retval == -ENXIO) |
1011 | goto free_mem; | 1017 | goto free_mem; |
1012 | 1018 | ||
@@ -1218,8 +1224,8 @@ free_mem: | |||
1218 | release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); | 1224 | release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE); |
1219 | if (itv->has_cx23415) | 1225 | if (itv->has_cx23415) |
1220 | release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); | 1226 | release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE); |
1221 | free_workqueue: | 1227 | free_worker: |
1222 | destroy_workqueue(itv->irq_work_queues); | 1228 | kthread_stop(itv->irq_worker_task); |
1223 | err: | 1229 | err: |
1224 | if (retval == 0) | 1230 | if (retval == 0) |
1225 | retval = -ENODEV; | 1231 | retval = -ENODEV; |
@@ -1363,9 +1369,9 @@ static void ivtv_remove(struct pci_dev *pdev) | |||
1363 | ivtv_set_irq_mask(itv, 0xffffffff); | 1369 | ivtv_set_irq_mask(itv, 0xffffffff); |
1364 | del_timer_sync(&itv->dma_timer); | 1370 | del_timer_sync(&itv->dma_timer); |
1365 | 1371 | ||
1366 | /* Stop all Work Queues */ | 1372 | /* Kill irq worker */ |
1367 | flush_workqueue(itv->irq_work_queues); | 1373 | flush_kthread_worker(&itv->irq_worker); |
1368 | destroy_workqueue(itv->irq_work_queues); | 1374 | kthread_stop(itv->irq_worker_task); |
1369 | 1375 | ||
1370 | ivtv_streams_cleanup(itv, 1); | 1376 | ivtv_streams_cleanup(itv, 1); |
1371 | ivtv_udma_free(itv); | 1377 | ivtv_udma_free(itv); |
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h index bd084df4448a..102071246218 100644 --- a/drivers/media/video/ivtv/ivtv-driver.h +++ b/drivers/media/video/ivtv/ivtv-driver.h | |||
@@ -51,7 +51,7 @@ | |||
51 | #include <linux/unistd.h> | 51 | #include <linux/unistd.h> |
52 | #include <linux/pagemap.h> | 52 | #include <linux/pagemap.h> |
53 | #include <linux/scatterlist.h> | 53 | #include <linux/scatterlist.h> |
54 | #include <linux/workqueue.h> | 54 | #include <linux/kthread.h> |
55 | #include <linux/mutex.h> | 55 | #include <linux/mutex.h> |
56 | #include <linux/slab.h> | 56 | #include <linux/slab.h> |
57 | #include <asm/uaccess.h> | 57 | #include <asm/uaccess.h> |
@@ -260,7 +260,6 @@ struct ivtv_mailbox_data { | |||
260 | #define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */ | 260 | #define IVTV_F_I_DEC_PAUSED 20 /* the decoder is paused */ |
261 | #define IVTV_F_I_INITED 21 /* set after first open */ | 261 | #define IVTV_F_I_INITED 21 /* set after first open */ |
262 | #define IVTV_F_I_FAILED 22 /* set if first open failed */ | 262 | #define IVTV_F_I_FAILED 22 /* set if first open failed */ |
263 | #define IVTV_F_I_WORK_INITED 23 /* worker thread was initialized */ | ||
264 | 263 | ||
265 | /* Event notifications */ | 264 | /* Event notifications */ |
266 | #define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ | 265 | #define IVTV_F_I_EV_DEC_STOPPED 28 /* decoder stopped event */ |
@@ -666,8 +665,9 @@ struct ivtv { | |||
666 | /* Interrupts & DMA */ | 665 | /* Interrupts & DMA */ |
667 | u32 irqmask; /* active interrupts */ | 666 | u32 irqmask; /* active interrupts */ |
668 | u32 irq_rr_idx; /* round-robin stream index */ | 667 | u32 irq_rr_idx; /* round-robin stream index */ |
669 | struct workqueue_struct *irq_work_queues; /* workqueue for PIO/YUV/VBI actions */ | 668 | struct kthread_worker irq_worker; /* kthread worker for PIO/YUV/VBI actions */ |
670 | struct work_struct irq_work_queue; /* work entry */ | 669 | struct task_struct *irq_worker_task; /* task for irq_worker */ |
670 | struct kthread_work irq_work; /* kthread work entry */ | ||
671 | spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ | 671 | spinlock_t dma_reg_lock; /* lock access to DMA engine registers */ |
672 | int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */ | 672 | int cur_dma_stream; /* index of current stream doing DMA (-1 if none) */ |
673 | int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */ | 673 | int cur_pio_stream; /* index of current stream doing PIO (-1 if none) */ |
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index fea1ec33b0df..9b4faf009196 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c | |||
@@ -71,19 +71,10 @@ static void ivtv_pio_work_handler(struct ivtv *itv) | |||
71 | write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); | 71 | write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44); |
72 | } | 72 | } |
73 | 73 | ||
74 | void ivtv_irq_work_handler(struct work_struct *work) | 74 | void ivtv_irq_work_handler(struct kthread_work *work) |
75 | { | 75 | { |
76 | struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue); | 76 | struct ivtv *itv = container_of(work, struct ivtv, irq_work); |
77 | 77 | ||
78 | DEFINE_WAIT(wait); | ||
79 | |||
80 | if (test_and_clear_bit(IVTV_F_I_WORK_INITED, &itv->i_flags)) { | ||
81 | struct sched_param param = { .sched_priority = 99 }; | ||
82 | |||
83 | /* This thread must use the FIFO scheduler as it | ||
84 | is realtime sensitive. */ | ||
85 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
86 | } | ||
87 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) | 78 | if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags)) |
88 | ivtv_pio_work_handler(itv); | 79 | ivtv_pio_work_handler(itv); |
89 | 80 | ||
@@ -975,7 +966,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id) | |||
975 | } | 966 | } |
976 | 967 | ||
977 | if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { | 968 | if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) { |
978 | queue_work(itv->irq_work_queues, &itv->irq_work_queue); | 969 | queue_kthread_work(&itv->irq_worker, &itv->irq_work); |
979 | } | 970 | } |
980 | 971 | ||
981 | spin_unlock(&itv->dma_reg_lock); | 972 | spin_unlock(&itv->dma_reg_lock); |
diff --git a/drivers/media/video/ivtv/ivtv-irq.h b/drivers/media/video/ivtv/ivtv-irq.h index f879a5822e71..1e84433737cc 100644 --- a/drivers/media/video/ivtv/ivtv-irq.h +++ b/drivers/media/video/ivtv/ivtv-irq.h | |||
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | irqreturn_t ivtv_irq_handler(int irq, void *dev_id); | 47 | irqreturn_t ivtv_irq_handler(int irq, void *dev_id); |
48 | 48 | ||
49 | void ivtv_irq_work_handler(struct work_struct *work); | 49 | void ivtv_irq_work_handler(struct kthread_work *work); |
50 | void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock); | 50 | void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock); |
51 | void ivtv_unfinished_dma(unsigned long arg); | 51 | void ivtv_unfinished_dma(unsigned long arg); |
52 | 52 | ||