diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-16 11:20:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-16 11:20:19 -0400 |
commit | bd2895eeade5f11f3e5906283c630bbdb4b57454 (patch) | |
tree | 4d98f4fcd80c7d062afce28823d08aee53e66f82 | |
parent | 016aa2ed1cc9cf704cf76d8df07751b6daa9750f (diff) | |
parent | 24d51add7438f9696a7205927bf9de3c5c787a58 (diff) |
Merge branch 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: fix build failure introduced by s/freezeable/freezable/
workqueue: add system_freezeable_wq
rds/ib: use system_wq instead of rds_ib_fmr_wq
net/9p: replace p9_poll_task with a work
net/9p: use system_wq instead of p9_mux_wq
xfs: convert to alloc_workqueue()
reiserfs: make commit_wq use the default concurrency level
ocfs2: use system_wq instead of ocfs2_quota_wq
ext4: convert to alloc_workqueue()
scsi/scsi_tgt_lib: scsi_tgtd isn't used in memory reclaim path
scsi/be2iscsi,qla2xxx: convert to alloc_workqueue()
misc/iwmc3200top: use system_wq instead of dedicated workqueues
i2o: use alloc_workqueue() instead of create_workqueue()
acpi: kacpi*_wq don't need WQ_MEM_RECLAIM
fs/aio: aio_wq isn't used in memory reclaim path
input/tps6507x-ts: use system_wq instead of dedicated workqueue
cpufreq: use system_wq instead of dedicated workqueues
wireless/ipw2x00: use system_wq instead of dedicated workqueues
arm/omap: use system_wq in mailbox
workqueue: use WQ_MEM_RECLAIM instead of WQ_RESCUER
32 files changed, 190 insertions, 339 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 49d3208793e5..69ddc9f76c13 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <plat/mailbox.h> | 33 | #include <plat/mailbox.h> |
34 | 34 | ||
35 | static struct workqueue_struct *mboxd; | ||
36 | static struct omap_mbox **mboxes; | 35 | static struct omap_mbox **mboxes; |
37 | 36 | ||
38 | static int mbox_configured; | 37 | static int mbox_configured; |
@@ -197,7 +196,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox) | |||
197 | /* no more messages in the fifo. clear IRQ source. */ | 196 | /* no more messages in the fifo. clear IRQ source. */ |
198 | ack_mbox_irq(mbox, IRQ_RX); | 197 | ack_mbox_irq(mbox, IRQ_RX); |
199 | nomem: | 198 | nomem: |
200 | queue_work(mboxd, &mbox->rxq->work); | 199 | schedule_work(&mbox->rxq->work); |
201 | } | 200 | } |
202 | 201 | ||
203 | static irqreturn_t mbox_interrupt(int irq, void *p) | 202 | static irqreturn_t mbox_interrupt(int irq, void *p) |
@@ -307,7 +306,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox) | |||
307 | if (!--mbox->use_count) { | 306 | if (!--mbox->use_count) { |
308 | free_irq(mbox->irq, mbox); | 307 | free_irq(mbox->irq, mbox); |
309 | tasklet_kill(&mbox->txq->tasklet); | 308 | tasklet_kill(&mbox->txq->tasklet); |
310 | flush_work(&mbox->rxq->work); | 309 | flush_work_sync(&mbox->rxq->work); |
311 | mbox_queue_free(mbox->txq); | 310 | mbox_queue_free(mbox->txq); |
312 | mbox_queue_free(mbox->rxq); | 311 | mbox_queue_free(mbox->rxq); |
313 | } | 312 | } |
@@ -409,10 +408,6 @@ static int __init omap_mbox_init(void) | |||
409 | if (err) | 408 | if (err) |
410 | return err; | 409 | return err; |
411 | 410 | ||
412 | mboxd = create_workqueue("mboxd"); | ||
413 | if (!mboxd) | ||
414 | return -ENOMEM; | ||
415 | |||
416 | /* kfifo size sanity check: alignment and minimal size */ | 411 | /* kfifo size sanity check: alignment and minimal size */ |
417 | mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); | 412 | mbox_kfifo_size = ALIGN(mbox_kfifo_size, sizeof(mbox_msg_t)); |
418 | mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, | 413 | mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, |
@@ -424,7 +419,6 @@ subsys_initcall(omap_mbox_init); | |||
424 | 419 | ||
425 | static void __exit omap_mbox_exit(void) | 420 | static void __exit omap_mbox_exit(void) |
426 | { | 421 | { |
427 | destroy_workqueue(mboxd); | ||
428 | class_unregister(&omap_mbox_class); | 422 | class_unregister(&omap_mbox_class); |
429 | } | 423 | } |
430 | module_exit(omap_mbox_exit); | 424 | module_exit(omap_mbox_exit); |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index c90c76aa7f8b..4a6753009d79 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -1589,9 +1589,9 @@ acpi_status __init acpi_os_initialize(void) | |||
1589 | 1589 | ||
1590 | acpi_status __init acpi_os_initialize1(void) | 1590 | acpi_status __init acpi_os_initialize1(void) |
1591 | { | 1591 | { |
1592 | kacpid_wq = create_workqueue("kacpid"); | 1592 | kacpid_wq = alloc_workqueue("kacpid", 0, 1); |
1593 | kacpi_notify_wq = create_workqueue("kacpi_notify"); | 1593 | kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); |
1594 | kacpi_hotplug_wq = create_workqueue("kacpi_hotplug"); | 1594 | kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); |
1595 | BUG_ON(!kacpid_wq); | 1595 | BUG_ON(!kacpid_wq); |
1596 | BUG_ON(!kacpi_notify_wq); | 1596 | BUG_ON(!kacpi_notify_wq); |
1597 | BUG_ON(!kacpi_hotplug_wq); | 1597 | BUG_ON(!kacpi_hotplug_wq); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf69611..94284c8473b1 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -81,8 +81,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
81 | */ | 81 | */ |
82 | static DEFINE_MUTEX(dbs_mutex); | 82 | static DEFINE_MUTEX(dbs_mutex); |
83 | 83 | ||
84 | static struct workqueue_struct *kconservative_wq; | ||
85 | |||
86 | static struct dbs_tuners { | 84 | static struct dbs_tuners { |
87 | unsigned int sampling_rate; | 85 | unsigned int sampling_rate; |
88 | unsigned int sampling_down_factor; | 86 | unsigned int sampling_down_factor; |
@@ -560,7 +558,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
560 | 558 | ||
561 | dbs_check_cpu(dbs_info); | 559 | dbs_check_cpu(dbs_info); |
562 | 560 | ||
563 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 561 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
564 | mutex_unlock(&dbs_info->timer_mutex); | 562 | mutex_unlock(&dbs_info->timer_mutex); |
565 | } | 563 | } |
566 | 564 | ||
@@ -572,8 +570,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
572 | 570 | ||
573 | dbs_info->enable = 1; | 571 | dbs_info->enable = 1; |
574 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 572 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
575 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 573 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
576 | delay); | ||
577 | } | 574 | } |
578 | 575 | ||
579 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 576 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -716,25 +713,12 @@ struct cpufreq_governor cpufreq_gov_conservative = { | |||
716 | 713 | ||
717 | static int __init cpufreq_gov_dbs_init(void) | 714 | static int __init cpufreq_gov_dbs_init(void) |
718 | { | 715 | { |
719 | int err; | 716 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
720 | |||
721 | kconservative_wq = create_workqueue("kconservative"); | ||
722 | if (!kconservative_wq) { | ||
723 | printk(KERN_ERR "Creation of kconservative failed\n"); | ||
724 | return -EFAULT; | ||
725 | } | ||
726 | |||
727 | err = cpufreq_register_governor(&cpufreq_gov_conservative); | ||
728 | if (err) | ||
729 | destroy_workqueue(kconservative_wq); | ||
730 | |||
731 | return err; | ||
732 | } | 717 | } |
733 | 718 | ||
734 | static void __exit cpufreq_gov_dbs_exit(void) | 719 | static void __exit cpufreq_gov_dbs_exit(void) |
735 | { | 720 | { |
736 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 721 | cpufreq_unregister_governor(&cpufreq_gov_conservative); |
737 | destroy_workqueue(kconservative_wq); | ||
738 | } | 722 | } |
739 | 723 | ||
740 | 724 | ||
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c631f27a3dcc..58aa85ea5ec6 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -104,8 +104,6 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */ | |||
104 | */ | 104 | */ |
105 | static DEFINE_MUTEX(dbs_mutex); | 105 | static DEFINE_MUTEX(dbs_mutex); |
106 | 106 | ||
107 | static struct workqueue_struct *kondemand_wq; | ||
108 | |||
109 | static struct dbs_tuners { | 107 | static struct dbs_tuners { |
110 | unsigned int sampling_rate; | 108 | unsigned int sampling_rate; |
111 | unsigned int up_threshold; | 109 | unsigned int up_threshold; |
@@ -667,7 +665,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
667 | __cpufreq_driver_target(dbs_info->cur_policy, | 665 | __cpufreq_driver_target(dbs_info->cur_policy, |
668 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 666 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
669 | } | 667 | } |
670 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 668 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
671 | mutex_unlock(&dbs_info->timer_mutex); | 669 | mutex_unlock(&dbs_info->timer_mutex); |
672 | } | 670 | } |
673 | 671 | ||
@@ -681,8 +679,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
681 | 679 | ||
682 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 680 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
683 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 681 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
684 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | 682 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
685 | delay); | ||
686 | } | 683 | } |
687 | 684 | ||
688 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 685 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -814,7 +811,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
814 | 811 | ||
815 | static int __init cpufreq_gov_dbs_init(void) | 812 | static int __init cpufreq_gov_dbs_init(void) |
816 | { | 813 | { |
817 | int err; | ||
818 | cputime64_t wall; | 814 | cputime64_t wall; |
819 | u64 idle_time; | 815 | u64 idle_time; |
820 | int cpu = get_cpu(); | 816 | int cpu = get_cpu(); |
@@ -838,22 +834,12 @@ static int __init cpufreq_gov_dbs_init(void) | |||
838 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); | 834 | MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); |
839 | } | 835 | } |
840 | 836 | ||
841 | kondemand_wq = create_workqueue("kondemand"); | 837 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
842 | if (!kondemand_wq) { | ||
843 | printk(KERN_ERR "Creation of kondemand failed\n"); | ||
844 | return -EFAULT; | ||
845 | } | ||
846 | err = cpufreq_register_governor(&cpufreq_gov_ondemand); | ||
847 | if (err) | ||
848 | destroy_workqueue(kondemand_wq); | ||
849 | |||
850 | return err; | ||
851 | } | 838 | } |
852 | 839 | ||
853 | static void __exit cpufreq_gov_dbs_exit(void) | 840 | static void __exit cpufreq_gov_dbs_exit(void) |
854 | { | 841 | { |
855 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); | 842 | cpufreq_unregister_governor(&cpufreq_gov_ondemand); |
856 | destroy_workqueue(kondemand_wq); | ||
857 | } | 843 | } |
858 | 844 | ||
859 | 845 | ||
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c index c8c136cf7bbc..43031492d733 100644 --- a/drivers/input/touchscreen/tps6507x-ts.c +++ b/drivers/input/touchscreen/tps6507x-ts.c | |||
@@ -43,7 +43,6 @@ struct tps6507x_ts { | |||
43 | struct input_dev *input_dev; | 43 | struct input_dev *input_dev; |
44 | struct device *dev; | 44 | struct device *dev; |
45 | char phys[32]; | 45 | char phys[32]; |
46 | struct workqueue_struct *wq; | ||
47 | struct delayed_work work; | 46 | struct delayed_work work; |
48 | unsigned polling; /* polling is active */ | 47 | unsigned polling; /* polling is active */ |
49 | struct ts_event tc; | 48 | struct ts_event tc; |
@@ -220,8 +219,8 @@ done: | |||
220 | poll = 1; | 219 | poll = 1; |
221 | 220 | ||
222 | if (poll) { | 221 | if (poll) { |
223 | schd = queue_delayed_work(tsc->wq, &tsc->work, | 222 | schd = schedule_delayed_work(&tsc->work, |
224 | msecs_to_jiffies(tsc->poll_period)); | 223 | msecs_to_jiffies(tsc->poll_period)); |
225 | if (schd) | 224 | if (schd) |
226 | tsc->polling = 1; | 225 | tsc->polling = 1; |
227 | else { | 226 | else { |
@@ -303,7 +302,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev) | |||
303 | tsc->input_dev = input_dev; | 302 | tsc->input_dev = input_dev; |
304 | 303 | ||
305 | INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); | 304 | INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler); |
306 | tsc->wq = create_workqueue("TPS6507x Touchscreen"); | ||
307 | 305 | ||
308 | if (init_data) { | 306 | if (init_data) { |
309 | tsc->poll_period = init_data->poll_period; | 307 | tsc->poll_period = init_data->poll_period; |
@@ -325,8 +323,8 @@ static int tps6507x_ts_probe(struct platform_device *pdev) | |||
325 | if (error) | 323 | if (error) |
326 | goto err2; | 324 | goto err2; |
327 | 325 | ||
328 | schd = queue_delayed_work(tsc->wq, &tsc->work, | 326 | schd = schedule_delayed_work(&tsc->work, |
329 | msecs_to_jiffies(tsc->poll_period)); | 327 | msecs_to_jiffies(tsc->poll_period)); |
330 | 328 | ||
331 | if (schd) | 329 | if (schd) |
332 | tsc->polling = 1; | 330 | tsc->polling = 1; |
@@ -341,7 +339,6 @@ static int tps6507x_ts_probe(struct platform_device *pdev) | |||
341 | 339 | ||
342 | err2: | 340 | err2: |
343 | cancel_delayed_work_sync(&tsc->work); | 341 | cancel_delayed_work_sync(&tsc->work); |
344 | destroy_workqueue(tsc->wq); | ||
345 | input_free_device(input_dev); | 342 | input_free_device(input_dev); |
346 | err1: | 343 | err1: |
347 | kfree(tsc); | 344 | kfree(tsc); |
@@ -357,7 +354,6 @@ static int __devexit tps6507x_ts_remove(struct platform_device *pdev) | |||
357 | struct input_dev *input_dev = tsc->input_dev; | 354 | struct input_dev *input_dev = tsc->input_dev; |
358 | 355 | ||
359 | cancel_delayed_work_sync(&tsc->work); | 356 | cancel_delayed_work_sync(&tsc->work); |
360 | destroy_workqueue(tsc->wq); | ||
361 | 357 | ||
362 | input_unregister_device(input_dev); | 358 | input_unregister_device(input_dev); |
363 | 359 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 818313e277e7..d5ad7723b172 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -7361,7 +7361,7 @@ static int __init md_init(void) | |||
7361 | { | 7361 | { |
7362 | int ret = -ENOMEM; | 7362 | int ret = -ENOMEM; |
7363 | 7363 | ||
7364 | md_wq = alloc_workqueue("md", WQ_RESCUER, 0); | 7364 | md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); |
7365 | if (!md_wq) | 7365 | if (!md_wq) |
7366 | goto err_wq; | 7366 | goto err_wq; |
7367 | 7367 | ||
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c index a0421efe04ca..8a5b2d8f4daf 100644 --- a/drivers/message/i2o/driver.c +++ b/drivers/message/i2o/driver.c | |||
@@ -84,7 +84,8 @@ int i2o_driver_register(struct i2o_driver *drv) | |||
84 | osm_debug("Register driver %s\n", drv->name); | 84 | osm_debug("Register driver %s\n", drv->name); |
85 | 85 | ||
86 | if (drv->event) { | 86 | if (drv->event) { |
87 | drv->event_queue = create_workqueue(drv->name); | 87 | drv->event_queue = alloc_workqueue(drv->name, |
88 | WQ_MEM_RECLAIM, 1); | ||
88 | if (!drv->event_queue) { | 89 | if (!drv->event_queue) { |
89 | osm_err("Could not initialize event queue for driver " | 90 | osm_err("Could not initialize event queue for driver " |
90 | "%s\n", drv->name); | 91 | "%s\n", drv->name); |
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h index 740ff0738ea8..620973ed8bf9 100644 --- a/drivers/misc/iwmc3200top/iwmc3200top.h +++ b/drivers/misc/iwmc3200top/iwmc3200top.h | |||
@@ -183,9 +183,7 @@ struct iwmct_priv { | |||
183 | u32 barker; | 183 | u32 barker; |
184 | struct iwmct_dbg dbg; | 184 | struct iwmct_dbg dbg; |
185 | 185 | ||
186 | /* drivers work queue */ | 186 | /* drivers work items */ |
187 | struct workqueue_struct *wq; | ||
188 | struct workqueue_struct *bus_rescan_wq; | ||
189 | struct work_struct bus_rescan_worker; | 187 | struct work_struct bus_rescan_worker; |
190 | struct work_struct isr_worker; | 188 | struct work_struct isr_worker; |
191 | 189 | ||
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c index c73cef2c3c5e..727af07f1fbd 100644 --- a/drivers/misc/iwmc3200top/main.c +++ b/drivers/misc/iwmc3200top/main.c | |||
@@ -89,7 +89,7 @@ static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg) | |||
89 | switch (msg->hdr.opcode) { | 89 | switch (msg->hdr.opcode) { |
90 | case OP_OPR_ALIVE: | 90 | case OP_OPR_ALIVE: |
91 | LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); | 91 | LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n"); |
92 | queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker); | 92 | schedule_work(&priv->bus_rescan_worker); |
93 | break; | 93 | break; |
94 | default: | 94 | default: |
95 | LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", | 95 | LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n", |
@@ -360,7 +360,7 @@ static void iwmct_irq(struct sdio_func *func) | |||
360 | /* clear the function's interrupt request bit (write 1 to clear) */ | 360 | /* clear the function's interrupt request bit (write 1 to clear) */ |
361 | sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); | 361 | sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret); |
362 | 362 | ||
363 | queue_work(priv->wq, &priv->isr_worker); | 363 | schedule_work(&priv->isr_worker); |
364 | 364 | ||
365 | LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); | 365 | LOG_TRACE(priv, IRQ, "exit iwmct_irq\n"); |
366 | 366 | ||
@@ -506,10 +506,6 @@ static int iwmct_probe(struct sdio_func *func, | |||
506 | priv->func = func; | 506 | priv->func = func; |
507 | sdio_set_drvdata(func, priv); | 507 | sdio_set_drvdata(func, priv); |
508 | 508 | ||
509 | |||
510 | /* create drivers work queue */ | ||
511 | priv->wq = create_workqueue(DRV_NAME "_wq"); | ||
512 | priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq"); | ||
513 | INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); | 509 | INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker); |
514 | INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); | 510 | INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker); |
515 | 511 | ||
@@ -604,9 +600,9 @@ static void iwmct_remove(struct sdio_func *func) | |||
604 | sdio_release_irq(func); | 600 | sdio_release_irq(func); |
605 | sdio_release_host(func); | 601 | sdio_release_host(func); |
606 | 602 | ||
607 | /* Safely destroy osc workqueue */ | 603 | /* Make sure works are finished */ |
608 | destroy_workqueue(priv->bus_rescan_wq); | 604 | flush_work_sync(&priv->bus_rescan_worker); |
609 | destroy_workqueue(priv->wq); | 605 | flush_work_sync(&priv->isr_worker); |
610 | 606 | ||
611 | sdio_claim_host(func); | 607 | sdio_claim_host(func); |
612 | sdio_disable_func(func); | 608 | sdio_disable_func(func); |
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 61915f371416..471a52a2f8d4 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c | |||
@@ -706,11 +706,10 @@ static void schedule_reset(struct ipw2100_priv *priv) | |||
706 | netif_stop_queue(priv->net_dev); | 706 | netif_stop_queue(priv->net_dev); |
707 | priv->status |= STATUS_RESET_PENDING; | 707 | priv->status |= STATUS_RESET_PENDING; |
708 | if (priv->reset_backoff) | 708 | if (priv->reset_backoff) |
709 | queue_delayed_work(priv->workqueue, &priv->reset_work, | 709 | schedule_delayed_work(&priv->reset_work, |
710 | priv->reset_backoff * HZ); | 710 | priv->reset_backoff * HZ); |
711 | else | 711 | else |
712 | queue_delayed_work(priv->workqueue, &priv->reset_work, | 712 | schedule_delayed_work(&priv->reset_work, 0); |
713 | 0); | ||
714 | 713 | ||
715 | if (priv->reset_backoff < MAX_RESET_BACKOFF) | 714 | if (priv->reset_backoff < MAX_RESET_BACKOFF) |
716 | priv->reset_backoff++; | 715 | priv->reset_backoff++; |
@@ -1474,7 +1473,7 @@ static int ipw2100_enable_adapter(struct ipw2100_priv *priv) | |||
1474 | 1473 | ||
1475 | if (priv->stop_hang_check) { | 1474 | if (priv->stop_hang_check) { |
1476 | priv->stop_hang_check = 0; | 1475 | priv->stop_hang_check = 0; |
1477 | queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); | 1476 | schedule_delayed_work(&priv->hang_check, HZ / 2); |
1478 | } | 1477 | } |
1479 | 1478 | ||
1480 | fail_up: | 1479 | fail_up: |
@@ -1808,8 +1807,8 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) | |||
1808 | 1807 | ||
1809 | if (priv->stop_rf_kill) { | 1808 | if (priv->stop_rf_kill) { |
1810 | priv->stop_rf_kill = 0; | 1809 | priv->stop_rf_kill = 0; |
1811 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1810 | schedule_delayed_work(&priv->rf_kill, |
1812 | round_jiffies_relative(HZ)); | 1811 | round_jiffies_relative(HZ)); |
1813 | } | 1812 | } |
1814 | 1813 | ||
1815 | deferred = 1; | 1814 | deferred = 1; |
@@ -2086,7 +2085,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) | |||
2086 | priv->status |= STATUS_ASSOCIATING; | 2085 | priv->status |= STATUS_ASSOCIATING; |
2087 | priv->connect_start = get_seconds(); | 2086 | priv->connect_start = get_seconds(); |
2088 | 2087 | ||
2089 | queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10); | 2088 | schedule_delayed_work(&priv->wx_event_work, HZ / 10); |
2090 | } | 2089 | } |
2091 | 2090 | ||
2092 | static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, | 2091 | static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, |
@@ -2166,9 +2165,9 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) | |||
2166 | return; | 2165 | return; |
2167 | 2166 | ||
2168 | if (priv->status & STATUS_SECURITY_UPDATED) | 2167 | if (priv->status & STATUS_SECURITY_UPDATED) |
2169 | queue_delayed_work(priv->workqueue, &priv->security_work, 0); | 2168 | schedule_delayed_work(&priv->security_work, 0); |
2170 | 2169 | ||
2171 | queue_delayed_work(priv->workqueue, &priv->wx_event_work, 0); | 2170 | schedule_delayed_work(&priv->wx_event_work, 0); |
2172 | } | 2171 | } |
2173 | 2172 | ||
2174 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | 2173 | static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) |
@@ -2183,8 +2182,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) | |||
2183 | /* Make sure the RF Kill check timer is running */ | 2182 | /* Make sure the RF Kill check timer is running */ |
2184 | priv->stop_rf_kill = 0; | 2183 | priv->stop_rf_kill = 0; |
2185 | cancel_delayed_work(&priv->rf_kill); | 2184 | cancel_delayed_work(&priv->rf_kill); |
2186 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 2185 | schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); |
2187 | round_jiffies_relative(HZ)); | ||
2188 | } | 2186 | } |
2189 | 2187 | ||
2190 | static void send_scan_event(void *data) | 2188 | static void send_scan_event(void *data) |
@@ -2219,13 +2217,12 @@ static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) | |||
2219 | /* Only userspace-requested scan completion events go out immediately */ | 2217 | /* Only userspace-requested scan completion events go out immediately */ |
2220 | if (!priv->user_requested_scan) { | 2218 | if (!priv->user_requested_scan) { |
2221 | if (!delayed_work_pending(&priv->scan_event_later)) | 2219 | if (!delayed_work_pending(&priv->scan_event_later)) |
2222 | queue_delayed_work(priv->workqueue, | 2220 | schedule_delayed_work(&priv->scan_event_later, |
2223 | &priv->scan_event_later, | 2221 | round_jiffies_relative(msecs_to_jiffies(4000))); |
2224 | round_jiffies_relative(msecs_to_jiffies(4000))); | ||
2225 | } else { | 2222 | } else { |
2226 | priv->user_requested_scan = 0; | 2223 | priv->user_requested_scan = 0; |
2227 | cancel_delayed_work(&priv->scan_event_later); | 2224 | cancel_delayed_work(&priv->scan_event_later); |
2228 | queue_work(priv->workqueue, &priv->scan_event_now); | 2225 | schedule_work(&priv->scan_event_now); |
2229 | } | 2226 | } |
2230 | } | 2227 | } |
2231 | 2228 | ||
@@ -4329,8 +4326,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) | |||
4329 | /* Make sure the RF_KILL check timer is running */ | 4326 | /* Make sure the RF_KILL check timer is running */ |
4330 | priv->stop_rf_kill = 0; | 4327 | priv->stop_rf_kill = 0; |
4331 | cancel_delayed_work(&priv->rf_kill); | 4328 | cancel_delayed_work(&priv->rf_kill); |
4332 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 4329 | schedule_delayed_work(&priv->rf_kill, |
4333 | round_jiffies_relative(HZ)); | 4330 | round_jiffies_relative(HZ)); |
4334 | } else | 4331 | } else |
4335 | schedule_reset(priv); | 4332 | schedule_reset(priv); |
4336 | } | 4333 | } |
@@ -4461,20 +4458,17 @@ static void bd_queue_initialize(struct ipw2100_priv *priv, | |||
4461 | IPW_DEBUG_INFO("exit\n"); | 4458 | IPW_DEBUG_INFO("exit\n"); |
4462 | } | 4459 | } |
4463 | 4460 | ||
4464 | static void ipw2100_kill_workqueue(struct ipw2100_priv *priv) | 4461 | static void ipw2100_kill_works(struct ipw2100_priv *priv) |
4465 | { | 4462 | { |
4466 | if (priv->workqueue) { | 4463 | priv->stop_rf_kill = 1; |
4467 | priv->stop_rf_kill = 1; | 4464 | priv->stop_hang_check = 1; |
4468 | priv->stop_hang_check = 1; | 4465 | cancel_delayed_work_sync(&priv->reset_work); |
4469 | cancel_delayed_work(&priv->reset_work); | 4466 | cancel_delayed_work_sync(&priv->security_work); |
4470 | cancel_delayed_work(&priv->security_work); | 4467 | cancel_delayed_work_sync(&priv->wx_event_work); |
4471 | cancel_delayed_work(&priv->wx_event_work); | 4468 | cancel_delayed_work_sync(&priv->hang_check); |
4472 | cancel_delayed_work(&priv->hang_check); | 4469 | cancel_delayed_work_sync(&priv->rf_kill); |
4473 | cancel_delayed_work(&priv->rf_kill); | 4470 | cancel_work_sync(&priv->scan_event_now); |
4474 | cancel_delayed_work(&priv->scan_event_later); | 4471 | cancel_delayed_work_sync(&priv->scan_event_later); |
4475 | destroy_workqueue(priv->workqueue); | ||
4476 | priv->workqueue = NULL; | ||
4477 | } | ||
4478 | } | 4472 | } |
4479 | 4473 | ||
4480 | static int ipw2100_tx_allocate(struct ipw2100_priv *priv) | 4474 | static int ipw2100_tx_allocate(struct ipw2100_priv *priv) |
@@ -6046,7 +6040,7 @@ static void ipw2100_hang_check(struct work_struct *work) | |||
6046 | priv->last_rtc = rtc; | 6040 | priv->last_rtc = rtc; |
6047 | 6041 | ||
6048 | if (!priv->stop_hang_check) | 6042 | if (!priv->stop_hang_check) |
6049 | queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2); | 6043 | schedule_delayed_work(&priv->hang_check, HZ / 2); |
6050 | 6044 | ||
6051 | spin_unlock_irqrestore(&priv->low_lock, flags); | 6045 | spin_unlock_irqrestore(&priv->low_lock, flags); |
6052 | } | 6046 | } |
@@ -6062,8 +6056,8 @@ static void ipw2100_rf_kill(struct work_struct *work) | |||
6062 | if (rf_kill_active(priv)) { | 6056 | if (rf_kill_active(priv)) { |
6063 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); | 6057 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); |
6064 | if (!priv->stop_rf_kill) | 6058 | if (!priv->stop_rf_kill) |
6065 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 6059 | schedule_delayed_work(&priv->rf_kill, |
6066 | round_jiffies_relative(HZ)); | 6060 | round_jiffies_relative(HZ)); |
6067 | goto exit_unlock; | 6061 | goto exit_unlock; |
6068 | } | 6062 | } |
6069 | 6063 | ||
@@ -6209,8 +6203,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, | |||
6209 | INIT_LIST_HEAD(&priv->fw_pend_list); | 6203 | INIT_LIST_HEAD(&priv->fw_pend_list); |
6210 | INIT_STAT(&priv->fw_pend_stat); | 6204 | INIT_STAT(&priv->fw_pend_stat); |
6211 | 6205 | ||
6212 | priv->workqueue = create_workqueue(DRV_NAME); | ||
6213 | |||
6214 | INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); | 6206 | INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); |
6215 | INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); | 6207 | INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); |
6216 | INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); | 6208 | INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); |
@@ -6410,7 +6402,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, | |||
6410 | if (dev->irq) | 6402 | if (dev->irq) |
6411 | free_irq(dev->irq, priv); | 6403 | free_irq(dev->irq, priv); |
6412 | 6404 | ||
6413 | ipw2100_kill_workqueue(priv); | 6405 | ipw2100_kill_works(priv); |
6414 | 6406 | ||
6415 | /* These are safe to call even if they weren't allocated */ | 6407 | /* These are safe to call even if they weren't allocated */ |
6416 | ipw2100_queues_free(priv); | 6408 | ipw2100_queues_free(priv); |
@@ -6460,9 +6452,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) | |||
6460 | * first, then close() will crash. */ | 6452 | * first, then close() will crash. */ |
6461 | unregister_netdev(dev); | 6453 | unregister_netdev(dev); |
6462 | 6454 | ||
6463 | /* ipw2100_down will ensure that there is no more pending work | 6455 | ipw2100_kill_works(priv); |
6464 | * in the workqueue's, so we can safely remove them now. */ | ||
6465 | ipw2100_kill_workqueue(priv); | ||
6466 | 6456 | ||
6467 | ipw2100_queues_free(priv); | 6457 | ipw2100_queues_free(priv); |
6468 | 6458 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h index 838002b4881e..99cba968aa58 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/ipw2x00/ipw2100.h | |||
@@ -580,7 +580,6 @@ struct ipw2100_priv { | |||
580 | 580 | ||
581 | struct tasklet_struct irq_tasklet; | 581 | struct tasklet_struct irq_tasklet; |
582 | 582 | ||
583 | struct workqueue_struct *workqueue; | ||
584 | struct delayed_work reset_work; | 583 | struct delayed_work reset_work; |
585 | struct delayed_work security_work; | 584 | struct delayed_work security_work; |
586 | struct delayed_work wx_event_work; | 585 | struct delayed_work wx_event_work; |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index ae438ed80c2f..160881f234cc 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -894,9 +894,8 @@ static void ipw_led_link_on(struct ipw_priv *priv) | |||
894 | 894 | ||
895 | /* If we aren't associated, schedule turning the LED off */ | 895 | /* If we aren't associated, schedule turning the LED off */ |
896 | if (!(priv->status & STATUS_ASSOCIATED)) | 896 | if (!(priv->status & STATUS_ASSOCIATED)) |
897 | queue_delayed_work(priv->workqueue, | 897 | schedule_delayed_work(&priv->led_link_off, |
898 | &priv->led_link_off, | 898 | LD_TIME_LINK_ON); |
899 | LD_TIME_LINK_ON); | ||
900 | } | 899 | } |
901 | 900 | ||
902 | spin_unlock_irqrestore(&priv->lock, flags); | 901 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -939,8 +938,8 @@ static void ipw_led_link_off(struct ipw_priv *priv) | |||
939 | * turning the LED on (blink while unassociated) */ | 938 | * turning the LED on (blink while unassociated) */ |
940 | if (!(priv->status & STATUS_RF_KILL_MASK) && | 939 | if (!(priv->status & STATUS_RF_KILL_MASK) && |
941 | !(priv->status & STATUS_ASSOCIATED)) | 940 | !(priv->status & STATUS_ASSOCIATED)) |
942 | queue_delayed_work(priv->workqueue, &priv->led_link_on, | 941 | schedule_delayed_work(&priv->led_link_on, |
943 | LD_TIME_LINK_OFF); | 942 | LD_TIME_LINK_OFF); |
944 | 943 | ||
945 | } | 944 | } |
946 | 945 | ||
@@ -980,13 +979,11 @@ static void __ipw_led_activity_on(struct ipw_priv *priv) | |||
980 | priv->status |= STATUS_LED_ACT_ON; | 979 | priv->status |= STATUS_LED_ACT_ON; |
981 | 980 | ||
982 | cancel_delayed_work(&priv->led_act_off); | 981 | cancel_delayed_work(&priv->led_act_off); |
983 | queue_delayed_work(priv->workqueue, &priv->led_act_off, | 982 | schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); |
984 | LD_TIME_ACT_ON); | ||
985 | } else { | 983 | } else { |
986 | /* Reschedule LED off for full time period */ | 984 | /* Reschedule LED off for full time period */ |
987 | cancel_delayed_work(&priv->led_act_off); | 985 | cancel_delayed_work(&priv->led_act_off); |
988 | queue_delayed_work(priv->workqueue, &priv->led_act_off, | 986 | schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON); |
989 | LD_TIME_ACT_ON); | ||
990 | } | 987 | } |
991 | } | 988 | } |
992 | 989 | ||
@@ -1795,13 +1792,11 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1795 | if (disable_radio) { | 1792 | if (disable_radio) { |
1796 | priv->status |= STATUS_RF_KILL_SW; | 1793 | priv->status |= STATUS_RF_KILL_SW; |
1797 | 1794 | ||
1798 | if (priv->workqueue) { | 1795 | cancel_delayed_work(&priv->request_scan); |
1799 | cancel_delayed_work(&priv->request_scan); | 1796 | cancel_delayed_work(&priv->request_direct_scan); |
1800 | cancel_delayed_work(&priv->request_direct_scan); | 1797 | cancel_delayed_work(&priv->request_passive_scan); |
1801 | cancel_delayed_work(&priv->request_passive_scan); | 1798 | cancel_delayed_work(&priv->scan_event); |
1802 | cancel_delayed_work(&priv->scan_event); | 1799 | schedule_work(&priv->down); |
1803 | } | ||
1804 | queue_work(priv->workqueue, &priv->down); | ||
1805 | } else { | 1800 | } else { |
1806 | priv->status &= ~STATUS_RF_KILL_SW; | 1801 | priv->status &= ~STATUS_RF_KILL_SW; |
1807 | if (rf_kill_active(priv)) { | 1802 | if (rf_kill_active(priv)) { |
@@ -1809,10 +1804,10 @@ static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio) | |||
1809 | "disabled by HW switch\n"); | 1804 | "disabled by HW switch\n"); |
1810 | /* Make sure the RF_KILL check timer is running */ | 1805 | /* Make sure the RF_KILL check timer is running */ |
1811 | cancel_delayed_work(&priv->rf_kill); | 1806 | cancel_delayed_work(&priv->rf_kill); |
1812 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 1807 | schedule_delayed_work(&priv->rf_kill, |
1813 | round_jiffies_relative(2 * HZ)); | 1808 | round_jiffies_relative(2 * HZ)); |
1814 | } else | 1809 | } else |
1815 | queue_work(priv->workqueue, &priv->up); | 1810 | schedule_work(&priv->up); |
1816 | } | 1811 | } |
1817 | 1812 | ||
1818 | return 1; | 1813 | return 1; |
@@ -2063,7 +2058,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
2063 | cancel_delayed_work(&priv->request_passive_scan); | 2058 | cancel_delayed_work(&priv->request_passive_scan); |
2064 | cancel_delayed_work(&priv->scan_event); | 2059 | cancel_delayed_work(&priv->scan_event); |
2065 | schedule_work(&priv->link_down); | 2060 | schedule_work(&priv->link_down); |
2066 | queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ); | 2061 | schedule_delayed_work(&priv->rf_kill, 2 * HZ); |
2067 | handled |= IPW_INTA_BIT_RF_KILL_DONE; | 2062 | handled |= IPW_INTA_BIT_RF_KILL_DONE; |
2068 | } | 2063 | } |
2069 | 2064 | ||
@@ -2103,7 +2098,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv) | |||
2103 | priv->status &= ~STATUS_HCMD_ACTIVE; | 2098 | priv->status &= ~STATUS_HCMD_ACTIVE; |
2104 | wake_up_interruptible(&priv->wait_command_queue); | 2099 | wake_up_interruptible(&priv->wait_command_queue); |
2105 | 2100 | ||
2106 | queue_work(priv->workqueue, &priv->adapter_restart); | 2101 | schedule_work(&priv->adapter_restart); |
2107 | handled |= IPW_INTA_BIT_FATAL_ERROR; | 2102 | handled |= IPW_INTA_BIT_FATAL_ERROR; |
2108 | } | 2103 | } |
2109 | 2104 | ||
@@ -2323,11 +2318,6 @@ static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac) | |||
2323 | return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); | 2318 | return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac); |
2324 | } | 2319 | } |
2325 | 2320 | ||
2326 | /* | ||
2327 | * NOTE: This must be executed from our workqueue as it results in udelay | ||
2328 | * being called which may corrupt the keyboard if executed on default | ||
2329 | * workqueue | ||
2330 | */ | ||
2331 | static void ipw_adapter_restart(void *adapter) | 2321 | static void ipw_adapter_restart(void *adapter) |
2332 | { | 2322 | { |
2333 | struct ipw_priv *priv = adapter; | 2323 | struct ipw_priv *priv = adapter; |
@@ -2368,13 +2358,13 @@ static void ipw_scan_check(void *data) | |||
2368 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " | 2358 | IPW_DEBUG_SCAN("Scan completion watchdog resetting " |
2369 | "adapter after (%dms).\n", | 2359 | "adapter after (%dms).\n", |
2370 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); | 2360 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); |
2371 | queue_work(priv->workqueue, &priv->adapter_restart); | 2361 | schedule_work(&priv->adapter_restart); |
2372 | } else if (priv->status & STATUS_SCANNING) { | 2362 | } else if (priv->status & STATUS_SCANNING) { |
2373 | IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " | 2363 | IPW_DEBUG_SCAN("Scan completion watchdog aborting scan " |
2374 | "after (%dms).\n", | 2364 | "after (%dms).\n", |
2375 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); | 2365 | jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); |
2376 | ipw_abort_scan(priv); | 2366 | ipw_abort_scan(priv); |
2377 | queue_delayed_work(priv->workqueue, &priv->scan_check, HZ); | 2367 | schedule_delayed_work(&priv->scan_check, HZ); |
2378 | } | 2368 | } |
2379 | } | 2369 | } |
2380 | 2370 | ||
@@ -3943,7 +3933,7 @@ static void ipw_send_disassociate(struct ipw_priv *priv, int quiet) | |||
3943 | 3933 | ||
3944 | if (priv->status & STATUS_ASSOCIATING) { | 3934 | if (priv->status & STATUS_ASSOCIATING) { |
3945 | IPW_DEBUG_ASSOC("Disassociating while associating.\n"); | 3935 | IPW_DEBUG_ASSOC("Disassociating while associating.\n"); |
3946 | queue_work(priv->workqueue, &priv->disassociate); | 3936 | schedule_work(&priv->disassociate); |
3947 | return; | 3937 | return; |
3948 | } | 3938 | } |
3949 | 3939 | ||
@@ -4360,8 +4350,7 @@ static void ipw_gather_stats(struct ipw_priv *priv) | |||
4360 | 4350 | ||
4361 | priv->quality = quality; | 4351 | priv->quality = quality; |
4362 | 4352 | ||
4363 | queue_delayed_work(priv->workqueue, &priv->gather_stats, | 4353 | schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL); |
4364 | IPW_STATS_INTERVAL); | ||
4365 | } | 4354 | } |
4366 | 4355 | ||
4367 | static void ipw_bg_gather_stats(struct work_struct *work) | 4356 | static void ipw_bg_gather_stats(struct work_struct *work) |
@@ -4396,10 +4385,10 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4396 | IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | | 4385 | IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | |
4397 | IPW_DL_STATE, | 4386 | IPW_DL_STATE, |
4398 | "Aborting scan with missed beacon.\n"); | 4387 | "Aborting scan with missed beacon.\n"); |
4399 | queue_work(priv->workqueue, &priv->abort_scan); | 4388 | schedule_work(&priv->abort_scan); |
4400 | } | 4389 | } |
4401 | 4390 | ||
4402 | queue_work(priv->workqueue, &priv->disassociate); | 4391 | schedule_work(&priv->disassociate); |
4403 | return; | 4392 | return; |
4404 | } | 4393 | } |
4405 | 4394 | ||
@@ -4425,8 +4414,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4425 | if (!(priv->status & STATUS_ROAMING)) { | 4414 | if (!(priv->status & STATUS_ROAMING)) { |
4426 | priv->status |= STATUS_ROAMING; | 4415 | priv->status |= STATUS_ROAMING; |
4427 | if (!(priv->status & STATUS_SCANNING)) | 4416 | if (!(priv->status & STATUS_SCANNING)) |
4428 | queue_delayed_work(priv->workqueue, | 4417 | schedule_delayed_work(&priv->request_scan, 0); |
4429 | &priv->request_scan, 0); | ||
4430 | } | 4418 | } |
4431 | return; | 4419 | return; |
4432 | } | 4420 | } |
@@ -4439,7 +4427,7 @@ static void ipw_handle_missed_beacon(struct ipw_priv *priv, | |||
4439 | * channels..) */ | 4427 | * channels..) */ |
4440 | IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, | 4428 | IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE, |
4441 | "Aborting scan with missed beacon.\n"); | 4429 | "Aborting scan with missed beacon.\n"); |
4442 | queue_work(priv->workqueue, &priv->abort_scan); | 4430 | schedule_work(&priv->abort_scan); |
4443 | } | 4431 | } |
4444 | 4432 | ||
4445 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); | 4433 | IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count); |
@@ -4462,8 +4450,8 @@ static void handle_scan_event(struct ipw_priv *priv) | |||
4462 | /* Only userspace-requested scan completion events go out immediately */ | 4450 | /* Only userspace-requested scan completion events go out immediately */ |
4463 | if (!priv->user_requested_scan) { | 4451 | if (!priv->user_requested_scan) { |
4464 | if (!delayed_work_pending(&priv->scan_event)) | 4452 | if (!delayed_work_pending(&priv->scan_event)) |
4465 | queue_delayed_work(priv->workqueue, &priv->scan_event, | 4453 | schedule_delayed_work(&priv->scan_event, |
4466 | round_jiffies_relative(msecs_to_jiffies(4000))); | 4454 | round_jiffies_relative(msecs_to_jiffies(4000))); |
4467 | } else { | 4455 | } else { |
4468 | union iwreq_data wrqu; | 4456 | union iwreq_data wrqu; |
4469 | 4457 | ||
@@ -4516,20 +4504,17 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4516 | 4504 | ||
4517 | IPW_DEBUG_ASSOC | 4505 | IPW_DEBUG_ASSOC |
4518 | ("queueing adhoc check\n"); | 4506 | ("queueing adhoc check\n"); |
4519 | queue_delayed_work(priv-> | 4507 | schedule_delayed_work( |
4520 | workqueue, | 4508 | &priv->adhoc_check, |
4521 | &priv-> | 4509 | le16_to_cpu(priv-> |
4522 | adhoc_check, | 4510 | assoc_request. |
4523 | le16_to_cpu(priv-> | 4511 | beacon_interval)); |
4524 | assoc_request. | ||
4525 | beacon_interval)); | ||
4526 | break; | 4512 | break; |
4527 | } | 4513 | } |
4528 | 4514 | ||
4529 | priv->status &= ~STATUS_ASSOCIATING; | 4515 | priv->status &= ~STATUS_ASSOCIATING; |
4530 | priv->status |= STATUS_ASSOCIATED; | 4516 | priv->status |= STATUS_ASSOCIATED; |
4531 | queue_work(priv->workqueue, | 4517 | schedule_work(&priv->system_config); |
4532 | &priv->system_config); | ||
4533 | 4518 | ||
4534 | #ifdef CONFIG_IPW2200_QOS | 4519 | #ifdef CONFIG_IPW2200_QOS |
4535 | #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ | 4520 | #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \ |
@@ -4792,43 +4777,37 @@ static void ipw_rx_notification(struct ipw_priv *priv, | |||
4792 | #ifdef CONFIG_IPW2200_MONITOR | 4777 | #ifdef CONFIG_IPW2200_MONITOR |
4793 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { | 4778 | if (priv->ieee->iw_mode == IW_MODE_MONITOR) { |
4794 | priv->status |= STATUS_SCAN_FORCED; | 4779 | priv->status |= STATUS_SCAN_FORCED; |
4795 | queue_delayed_work(priv->workqueue, | 4780 | schedule_delayed_work(&priv->request_scan, 0); |
4796 | &priv->request_scan, 0); | ||
4797 | break; | 4781 | break; |
4798 | } | 4782 | } |
4799 | priv->status &= ~STATUS_SCAN_FORCED; | 4783 | priv->status &= ~STATUS_SCAN_FORCED; |
4800 | #endif /* CONFIG_IPW2200_MONITOR */ | 4784 | #endif /* CONFIG_IPW2200_MONITOR */ |
4801 | 4785 | ||
4802 | /* Do queued direct scans first */ | 4786 | /* Do queued direct scans first */ |
4803 | if (priv->status & STATUS_DIRECT_SCAN_PENDING) { | 4787 | if (priv->status & STATUS_DIRECT_SCAN_PENDING) |
4804 | queue_delayed_work(priv->workqueue, | 4788 | schedule_delayed_work(&priv->request_direct_scan, 0); |
4805 | &priv->request_direct_scan, 0); | ||
4806 | } | ||
4807 | 4789 | ||
4808 | if (!(priv->status & (STATUS_ASSOCIATED | | 4790 | if (!(priv->status & (STATUS_ASSOCIATED | |
4809 | STATUS_ASSOCIATING | | 4791 | STATUS_ASSOCIATING | |
4810 | STATUS_ROAMING | | 4792 | STATUS_ROAMING | |
4811 | STATUS_DISASSOCIATING))) | 4793 | STATUS_DISASSOCIATING))) |
4812 | queue_work(priv->workqueue, &priv->associate); | 4794 | schedule_work(&priv->associate); |
4813 | else if (priv->status & STATUS_ROAMING) { | 4795 | else if (priv->status & STATUS_ROAMING) { |
4814 | if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) | 4796 | if (x->status == SCAN_COMPLETED_STATUS_COMPLETE) |
4815 | /* If a scan completed and we are in roam mode, then | 4797 | /* If a scan completed and we are in roam mode, then |
4816 | * the scan that completed was the one requested as a | 4798 | * the scan that completed was the one requested as a |
4817 | * result of entering roam... so, schedule the | 4799 | * result of entering roam... so, schedule the |
4818 | * roam work */ | 4800 | * roam work */ |
4819 | queue_work(priv->workqueue, | 4801 | schedule_work(&priv->roam); |
4820 | &priv->roam); | ||
4821 | else | 4802 | else |
4822 | /* Don't schedule if we aborted the scan */ | 4803 | /* Don't schedule if we aborted the scan */ |
4823 | priv->status &= ~STATUS_ROAMING; | 4804 | priv->status &= ~STATUS_ROAMING; |
4824 | } else if (priv->status & STATUS_SCAN_PENDING) | 4805 | } else if (priv->status & STATUS_SCAN_PENDING) |
4825 | queue_delayed_work(priv->workqueue, | 4806 | schedule_delayed_work(&priv->request_scan, 0); |
4826 | &priv->request_scan, 0); | ||
4827 | else if (priv->config & CFG_BACKGROUND_SCAN | 4807 | else if (priv->config & CFG_BACKGROUND_SCAN |
4828 | && priv->status & STATUS_ASSOCIATED) | 4808 | && priv->status & STATUS_ASSOCIATED) |
4829 | queue_delayed_work(priv->workqueue, | 4809 | schedule_delayed_work(&priv->request_scan, |
4830 | &priv->request_scan, | 4810 | round_jiffies_relative(HZ)); |
4831 | round_jiffies_relative(HZ)); | ||
4832 | 4811 | ||
4833 | /* Send an empty event to user space. | 4812 | /* Send an empty event to user space. |
4834 | * We don't send the received data on the event because | 4813 | * We don't send the received data on the event because |
@@ -5192,7 +5171,7 @@ static void ipw_rx_queue_restock(struct ipw_priv *priv) | |||
5192 | /* If the pre-allocated buffer pool is dropping low, schedule to | 5171 | /* If the pre-allocated buffer pool is dropping low, schedule to |
5193 | * refill it */ | 5172 | * refill it */ |
5194 | if (rxq->free_count <= RX_LOW_WATERMARK) | 5173 | if (rxq->free_count <= RX_LOW_WATERMARK) |
5195 | queue_work(priv->workqueue, &priv->rx_replenish); | 5174 | schedule_work(&priv->rx_replenish); |
5196 | 5175 | ||
5197 | /* If we've added more space for the firmware to place data, tell it */ | 5176 | /* If we've added more space for the firmware to place data, tell it */ |
5198 | if (write != rxq->write) | 5177 | if (write != rxq->write) |
@@ -6133,8 +6112,8 @@ static void ipw_adhoc_check(void *data) | |||
6133 | return; | 6112 | return; |
6134 | } | 6113 | } |
6135 | 6114 | ||
6136 | queue_delayed_work(priv->workqueue, &priv->adhoc_check, | 6115 | schedule_delayed_work(&priv->adhoc_check, |
6137 | le16_to_cpu(priv->assoc_request.beacon_interval)); | 6116 | le16_to_cpu(priv->assoc_request.beacon_interval)); |
6138 | } | 6117 | } |
6139 | 6118 | ||
6140 | static void ipw_bg_adhoc_check(struct work_struct *work) | 6119 | static void ipw_bg_adhoc_check(struct work_struct *work) |
@@ -6523,8 +6502,7 @@ send_request: | |||
6523 | } else | 6502 | } else |
6524 | priv->status &= ~STATUS_SCAN_PENDING; | 6503 | priv->status &= ~STATUS_SCAN_PENDING; |
6525 | 6504 | ||
6526 | queue_delayed_work(priv->workqueue, &priv->scan_check, | 6505 | schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG); |
6527 | IPW_SCAN_CHECK_WATCHDOG); | ||
6528 | done: | 6506 | done: |
6529 | mutex_unlock(&priv->mutex); | 6507 | mutex_unlock(&priv->mutex); |
6530 | return err; | 6508 | return err; |
@@ -6994,8 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv, | |||
6994 | !memcmp(network->ssid, | 6972 | !memcmp(network->ssid, |
6995 | priv->assoc_network->ssid, | 6973 | priv->assoc_network->ssid, |
6996 | network->ssid_len)) { | 6974 | network->ssid_len)) { |
6997 | queue_work(priv->workqueue, | 6975 | schedule_work(&priv->merge_networks); |
6998 | &priv->merge_networks); | ||
6999 | } | 6976 | } |
7000 | } | 6977 | } |
7001 | 6978 | ||
@@ -7663,7 +7640,7 @@ static int ipw_associate(void *data) | |||
7663 | if (priv->status & STATUS_DISASSOCIATING) { | 7640 | if (priv->status & STATUS_DISASSOCIATING) { |
7664 | IPW_DEBUG_ASSOC("Not attempting association (in " | 7641 | IPW_DEBUG_ASSOC("Not attempting association (in " |
7665 | "disassociating)\n "); | 7642 | "disassociating)\n "); |
7666 | queue_work(priv->workqueue, &priv->associate); | 7643 | schedule_work(&priv->associate); |
7667 | return 0; | 7644 | return 0; |
7668 | } | 7645 | } |
7669 | 7646 | ||
@@ -7731,12 +7708,10 @@ static int ipw_associate(void *data) | |||
7731 | 7708 | ||
7732 | if (!(priv->status & STATUS_SCANNING)) { | 7709 | if (!(priv->status & STATUS_SCANNING)) { |
7733 | if (!(priv->config & CFG_SPEED_SCAN)) | 7710 | if (!(priv->config & CFG_SPEED_SCAN)) |
7734 | queue_delayed_work(priv->workqueue, | 7711 | schedule_delayed_work(&priv->request_scan, |
7735 | &priv->request_scan, | 7712 | SCAN_INTERVAL); |
7736 | SCAN_INTERVAL); | ||
7737 | else | 7713 | else |
7738 | queue_delayed_work(priv->workqueue, | 7714 | schedule_delayed_work(&priv->request_scan, 0); |
7739 | &priv->request_scan, 0); | ||
7740 | } | 7715 | } |
7741 | 7716 | ||
7742 | return 0; | 7717 | return 0; |
@@ -8899,7 +8874,7 @@ static int ipw_wx_set_mode(struct net_device *dev, | |||
8899 | 8874 | ||
8900 | priv->ieee->iw_mode = wrqu->mode; | 8875 | priv->ieee->iw_mode = wrqu->mode; |
8901 | 8876 | ||
8902 | queue_work(priv->workqueue, &priv->adapter_restart); | 8877 | schedule_work(&priv->adapter_restart); |
8903 | mutex_unlock(&priv->mutex); | 8878 | mutex_unlock(&priv->mutex); |
8904 | return err; | 8879 | return err; |
8905 | } | 8880 | } |
@@ -9598,7 +9573,7 @@ static int ipw_wx_set_scan(struct net_device *dev, | |||
9598 | 9573 | ||
9599 | IPW_DEBUG_WX("Start scan\n"); | 9574 | IPW_DEBUG_WX("Start scan\n"); |
9600 | 9575 | ||
9601 | queue_delayed_work(priv->workqueue, work, 0); | 9576 | schedule_delayed_work(work, 0); |
9602 | 9577 | ||
9603 | return 0; | 9578 | return 0; |
9604 | } | 9579 | } |
@@ -9937,7 +9912,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9937 | #else | 9912 | #else |
9938 | priv->net_dev->type = ARPHRD_IEEE80211; | 9913 | priv->net_dev->type = ARPHRD_IEEE80211; |
9939 | #endif | 9914 | #endif |
9940 | queue_work(priv->workqueue, &priv->adapter_restart); | 9915 | schedule_work(&priv->adapter_restart); |
9941 | } | 9916 | } |
9942 | 9917 | ||
9943 | ipw_set_channel(priv, parms[1]); | 9918 | ipw_set_channel(priv, parms[1]); |
@@ -9947,7 +9922,7 @@ static int ipw_wx_set_monitor(struct net_device *dev, | |||
9947 | return 0; | 9922 | return 0; |
9948 | } | 9923 | } |
9949 | priv->net_dev->type = ARPHRD_ETHER; | 9924 | priv->net_dev->type = ARPHRD_ETHER; |
9950 | queue_work(priv->workqueue, &priv->adapter_restart); | 9925 | schedule_work(&priv->adapter_restart); |
9951 | } | 9926 | } |
9952 | mutex_unlock(&priv->mutex); | 9927 | mutex_unlock(&priv->mutex); |
9953 | return 0; | 9928 | return 0; |
@@ -9961,7 +9936,7 @@ static int ipw_wx_reset(struct net_device *dev, | |||
9961 | { | 9936 | { |
9962 | struct ipw_priv *priv = libipw_priv(dev); | 9937 | struct ipw_priv *priv = libipw_priv(dev); |
9963 | IPW_DEBUG_WX("RESET\n"); | 9938 | IPW_DEBUG_WX("RESET\n"); |
9964 | queue_work(priv->workqueue, &priv->adapter_restart); | 9939 | schedule_work(&priv->adapter_restart); |
9965 | return 0; | 9940 | return 0; |
9966 | } | 9941 | } |
9967 | 9942 | ||
@@ -10551,7 +10526,7 @@ static int ipw_net_set_mac_address(struct net_device *dev, void *p) | |||
10551 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); | 10526 | memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); |
10552 | printk(KERN_INFO "%s: Setting MAC to %pM\n", | 10527 | printk(KERN_INFO "%s: Setting MAC to %pM\n", |
10553 | priv->net_dev->name, priv->mac_addr); | 10528 | priv->net_dev->name, priv->mac_addr); |
10554 | queue_work(priv->workqueue, &priv->adapter_restart); | 10529 | schedule_work(&priv->adapter_restart); |
10555 | mutex_unlock(&priv->mutex); | 10530 | mutex_unlock(&priv->mutex); |
10556 | return 0; | 10531 | return 0; |
10557 | } | 10532 | } |
@@ -10684,9 +10659,7 @@ static void ipw_rf_kill(void *adapter) | |||
10684 | 10659 | ||
10685 | if (rf_kill_active(priv)) { | 10660 | if (rf_kill_active(priv)) { |
10686 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); | 10661 | IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); |
10687 | if (priv->workqueue) | 10662 | schedule_delayed_work(&priv->rf_kill, 2 * HZ); |
10688 | queue_delayed_work(priv->workqueue, | ||
10689 | &priv->rf_kill, 2 * HZ); | ||
10690 | goto exit_unlock; | 10663 | goto exit_unlock; |
10691 | } | 10664 | } |
10692 | 10665 | ||
@@ -10697,7 +10670,7 @@ static void ipw_rf_kill(void *adapter) | |||
10697 | "device\n"); | 10670 | "device\n"); |
10698 | 10671 | ||
10699 | /* we can not do an adapter restart while inside an irq lock */ | 10672 | /* we can not do an adapter restart while inside an irq lock */ |
10700 | queue_work(priv->workqueue, &priv->adapter_restart); | 10673 | schedule_work(&priv->adapter_restart); |
10701 | } else | 10674 | } else |
10702 | IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " | 10675 | IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " |
10703 | "enabled\n"); | 10676 | "enabled\n"); |
@@ -10735,7 +10708,7 @@ static void ipw_link_up(struct ipw_priv *priv) | |||
10735 | notify_wx_assoc_event(priv); | 10708 | notify_wx_assoc_event(priv); |
10736 | 10709 | ||
10737 | if (priv->config & CFG_BACKGROUND_SCAN) | 10710 | if (priv->config & CFG_BACKGROUND_SCAN) |
10738 | queue_delayed_work(priv->workqueue, &priv->request_scan, HZ); | 10711 | schedule_delayed_work(&priv->request_scan, HZ); |
10739 | } | 10712 | } |
10740 | 10713 | ||
10741 | static void ipw_bg_link_up(struct work_struct *work) | 10714 | static void ipw_bg_link_up(struct work_struct *work) |
@@ -10764,7 +10737,7 @@ static void ipw_link_down(struct ipw_priv *priv) | |||
10764 | 10737 | ||
10765 | if (!(priv->status & STATUS_EXIT_PENDING)) { | 10738 | if (!(priv->status & STATUS_EXIT_PENDING)) { |
10766 | /* Queue up another scan... */ | 10739 | /* Queue up another scan... */ |
10767 | queue_delayed_work(priv->workqueue, &priv->request_scan, 0); | 10740 | schedule_delayed_work(&priv->request_scan, 0); |
10768 | } else | 10741 | } else |
10769 | cancel_delayed_work(&priv->scan_event); | 10742 | cancel_delayed_work(&priv->scan_event); |
10770 | } | 10743 | } |
@@ -10782,7 +10755,6 @@ static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv) | |||
10782 | { | 10755 | { |
10783 | int ret = 0; | 10756 | int ret = 0; |
10784 | 10757 | ||
10785 | priv->workqueue = create_workqueue(DRV_NAME); | ||
10786 | init_waitqueue_head(&priv->wait_command_queue); | 10758 | init_waitqueue_head(&priv->wait_command_queue); |
10787 | init_waitqueue_head(&priv->wait_state); | 10759 | init_waitqueue_head(&priv->wait_state); |
10788 | 10760 | ||
@@ -11339,8 +11311,7 @@ static int ipw_up(struct ipw_priv *priv) | |||
11339 | IPW_WARNING("Radio Frequency Kill Switch is On:\n" | 11311 | IPW_WARNING("Radio Frequency Kill Switch is On:\n" |
11340 | "Kill switch must be turned off for " | 11312 | "Kill switch must be turned off for " |
11341 | "wireless networking to work.\n"); | 11313 | "wireless networking to work.\n"); |
11342 | queue_delayed_work(priv->workqueue, &priv->rf_kill, | 11314 | schedule_delayed_work(&priv->rf_kill, 2 * HZ); |
11343 | 2 * HZ); | ||
11344 | return 0; | 11315 | return 0; |
11345 | } | 11316 | } |
11346 | 11317 | ||
@@ -11350,8 +11321,7 @@ static int ipw_up(struct ipw_priv *priv) | |||
11350 | 11321 | ||
11351 | /* If configure to try and auto-associate, kick | 11322 | /* If configure to try and auto-associate, kick |
11352 | * off a scan. */ | 11323 | * off a scan. */ |
11353 | queue_delayed_work(priv->workqueue, | 11324 | schedule_delayed_work(&priv->request_scan, 0); |
11354 | &priv->request_scan, 0); | ||
11355 | 11325 | ||
11356 | return 0; | 11326 | return 0; |
11357 | } | 11327 | } |
@@ -11817,7 +11787,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11817 | err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); | 11787 | err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv); |
11818 | if (err) { | 11788 | if (err) { |
11819 | IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); | 11789 | IPW_ERROR("Error allocating IRQ %d\n", pdev->irq); |
11820 | goto out_destroy_workqueue; | 11790 | goto out_iounmap; |
11821 | } | 11791 | } |
11822 | 11792 | ||
11823 | SET_NETDEV_DEV(net_dev, &pdev->dev); | 11793 | SET_NETDEV_DEV(net_dev, &pdev->dev); |
@@ -11885,9 +11855,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, | |||
11885 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); | 11855 | sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group); |
11886 | out_release_irq: | 11856 | out_release_irq: |
11887 | free_irq(pdev->irq, priv); | 11857 | free_irq(pdev->irq, priv); |
11888 | out_destroy_workqueue: | ||
11889 | destroy_workqueue(priv->workqueue); | ||
11890 | priv->workqueue = NULL; | ||
11891 | out_iounmap: | 11858 | out_iounmap: |
11892 | iounmap(priv->hw_base); | 11859 | iounmap(priv->hw_base); |
11893 | out_pci_release_regions: | 11860 | out_pci_release_regions: |
@@ -11930,18 +11897,31 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) | |||
11930 | kfree(priv->cmdlog); | 11897 | kfree(priv->cmdlog); |
11931 | priv->cmdlog = NULL; | 11898 | priv->cmdlog = NULL; |
11932 | } | 11899 | } |
11933 | /* ipw_down will ensure that there is no more pending work | 11900 | |
11934 | * in the workqueue's, so we can safely remove them now. */ | 11901 | /* make sure all works are inactive */ |
11935 | cancel_delayed_work(&priv->adhoc_check); | 11902 | cancel_delayed_work_sync(&priv->adhoc_check); |
11936 | cancel_delayed_work(&priv->gather_stats); | 11903 | cancel_work_sync(&priv->associate); |
11937 | cancel_delayed_work(&priv->request_scan); | 11904 | cancel_work_sync(&priv->disassociate); |
11938 | cancel_delayed_work(&priv->request_direct_scan); | 11905 | cancel_work_sync(&priv->system_config); |
11939 | cancel_delayed_work(&priv->request_passive_scan); | 11906 | cancel_work_sync(&priv->rx_replenish); |
11940 | cancel_delayed_work(&priv->scan_event); | 11907 | cancel_work_sync(&priv->adapter_restart); |
11941 | cancel_delayed_work(&priv->rf_kill); | 11908 | cancel_delayed_work_sync(&priv->rf_kill); |
11942 | cancel_delayed_work(&priv->scan_check); | 11909 | cancel_work_sync(&priv->up); |
11943 | destroy_workqueue(priv->workqueue); | 11910 | cancel_work_sync(&priv->down); |
11944 | priv->workqueue = NULL; | 11911 | cancel_delayed_work_sync(&priv->request_scan); |
11912 | cancel_delayed_work_sync(&priv->request_direct_scan); | ||
11913 | cancel_delayed_work_sync(&priv->request_passive_scan); | ||
11914 | cancel_delayed_work_sync(&priv->scan_event); | ||
11915 | cancel_delayed_work_sync(&priv->gather_stats); | ||
11916 | cancel_work_sync(&priv->abort_scan); | ||
11917 | cancel_work_sync(&priv->roam); | ||
11918 | cancel_delayed_work_sync(&priv->scan_check); | ||
11919 | cancel_work_sync(&priv->link_up); | ||
11920 | cancel_work_sync(&priv->link_down); | ||
11921 | cancel_delayed_work_sync(&priv->led_link_on); | ||
11922 | cancel_delayed_work_sync(&priv->led_link_off); | ||
11923 | cancel_delayed_work_sync(&priv->led_act_off); | ||
11924 | cancel_work_sync(&priv->merge_networks); | ||
11945 | 11925 | ||
11946 | /* Free MAC hash list for ADHOC */ | 11926 | /* Free MAC hash list for ADHOC */ |
11947 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { | 11927 | for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) { |
@@ -12029,7 +12009,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) | |||
12029 | priv->suspend_time = get_seconds() - priv->suspend_at; | 12009 | priv->suspend_time = get_seconds() - priv->suspend_at; |
12030 | 12010 | ||
12031 | /* Bring the device back up */ | 12011 | /* Bring the device back up */ |
12032 | queue_work(priv->workqueue, &priv->up); | 12012 | schedule_work(&priv->up); |
12033 | 12013 | ||
12034 | return 0; | 12014 | return 0; |
12035 | } | 12015 | } |
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.h b/drivers/net/wireless/ipw2x00/ipw2200.h index d7d049c7a4fa..0441445b8bfa 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/ipw2x00/ipw2200.h | |||
@@ -1299,8 +1299,6 @@ struct ipw_priv { | |||
1299 | u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; | 1299 | u8 direct_scan_ssid[IW_ESSID_MAX_SIZE]; |
1300 | u8 direct_scan_ssid_len; | 1300 | u8 direct_scan_ssid_len; |
1301 | 1301 | ||
1302 | struct workqueue_struct *workqueue; | ||
1303 | |||
1304 | struct delayed_work adhoc_check; | 1302 | struct delayed_work adhoc_check; |
1305 | struct work_struct associate; | 1303 | struct work_struct associate; |
1306 | struct work_struct disassociate; | 1304 | struct work_struct disassociate; |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 79cefbe31367..638c72b7f94a 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -4277,7 +4277,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, | |||
4277 | 4277 | ||
4278 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", | 4278 | snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", |
4279 | phba->shost->host_no); | 4279 | phba->shost->host_no); |
4280 | phba->wq = create_workqueue(phba->wq_name); | 4280 | phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); |
4281 | if (!phba->wq) { | 4281 | if (!phba->wq) { |
4282 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" | 4282 | shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" |
4283 | "Failed to allocate work queue\n"); | 4283 | "Failed to allocate work queue\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f27724d76cf6..e90f7c16b956 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -349,7 +349,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha) | |||
349 | "Can't create request queue\n"); | 349 | "Can't create request queue\n"); |
350 | goto fail; | 350 | goto fail; |
351 | } | 351 | } |
352 | ha->wq = create_workqueue("qla2xxx_wq"); | 352 | ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 1); |
353 | vha->req = ha->req_q_map[req]; | 353 | vha->req = ha->req_q_map[req]; |
354 | options |= BIT_1; | 354 | options |= BIT_1; |
355 | for (ques = 1; ques < ha->max_rsp_queues; ques++) { | 355 | for (ques = 1; ques < ha->max_rsp_queues; ques++) { |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index c399be979921..f67282058ba1 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -629,7 +629,7 @@ static int __init scsi_tgt_init(void) | |||
629 | if (!scsi_tgt_cmd_cache) | 629 | if (!scsi_tgt_cmd_cache) |
630 | return -ENOMEM; | 630 | return -ENOMEM; |
631 | 631 | ||
632 | scsi_tgtd = create_workqueue("scsi_tgtd"); | 632 | scsi_tgtd = alloc_workqueue("scsi_tgtd", 0, 1); |
633 | if (!scsi_tgtd) { | 633 | if (!scsi_tgtd) { |
634 | err = -ENOMEM; | 634 | err = -ENOMEM; |
635 | goto free_kmemcache; | 635 | goto free_kmemcache; |
@@ -85,7 +85,7 @@ static int __init aio_setup(void) | |||
85 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 85 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
86 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 86 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
87 | 87 | ||
88 | aio_wq = create_workqueue("aio"); | 88 | aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */ |
89 | abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); | 89 | abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); |
90 | BUG_ON(!aio_wq || !abe_pool); | 90 | BUG_ON(!aio_wq || !abe_pool); |
91 | 91 | ||
@@ -577,7 +577,7 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
577 | spin_lock(&fput_lock); | 577 | spin_lock(&fput_lock); |
578 | list_add(&req->ki_list, &fput_head); | 578 | list_add(&req->ki_list, &fput_head); |
579 | spin_unlock(&fput_lock); | 579 | spin_unlock(&fput_lock); |
580 | queue_work(aio_wq, &fput_work); | 580 | schedule_work(&fput_work); |
581 | } else { | 581 | } else { |
582 | req->ki_filp = NULL; | 582 | req->ki_filp = NULL; |
583 | really_put_req(ctx, req); | 583 | really_put_req(ctx, req); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5977b356a435..203f9e4a70be 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -3511,7 +3511,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3511 | percpu_counter_set(&sbi->s_dirtyblocks_counter, 0); | 3511 | percpu_counter_set(&sbi->s_dirtyblocks_counter, 0); |
3512 | 3512 | ||
3513 | no_journal: | 3513 | no_journal: |
3514 | EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten"); | 3514 | /* |
3515 | * The maximum number of concurrent works can be high and | ||
3516 | * concurrency isn't really necessary. Limit it to 1. | ||
3517 | */ | ||
3518 | EXT4_SB(sb)->dio_unwritten_wq = | ||
3519 | alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1); | ||
3515 | if (!EXT4_SB(sb)->dio_unwritten_wq) { | 3520 | if (!EXT4_SB(sb)->dio_unwritten_wq) { |
3516 | printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); | 3521 | printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); |
3517 | goto failed_mount_wq; | 3522 | goto failed_mount_wq; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 2f8e61816d75..01768e5e2c9b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -1518,7 +1518,7 @@ static int nfsiod_start(void) | |||
1518 | { | 1518 | { |
1519 | struct workqueue_struct *wq; | 1519 | struct workqueue_struct *wq; |
1520 | dprintk("RPC: creating workqueue nfsiod\n"); | 1520 | dprintk("RPC: creating workqueue nfsiod\n"); |
1521 | wq = alloc_workqueue("nfsiod", WQ_RESCUER, 0); | 1521 | wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); |
1522 | if (wq == NULL) | 1522 | if (wq == NULL) |
1523 | return -ENOMEM; | 1523 | return -ENOMEM; |
1524 | nfsiod_workqueue = wq; | 1524 | nfsiod_workqueue = wq; |
diff --git a/fs/ocfs2/quota.h b/fs/ocfs2/quota.h index 196fcb52d95d..d5ab56cbe5c5 100644 --- a/fs/ocfs2/quota.h +++ b/fs/ocfs2/quota.h | |||
@@ -114,7 +114,4 @@ int ocfs2_local_write_dquot(struct dquot *dquot); | |||
114 | extern const struct dquot_operations ocfs2_quota_operations; | 114 | extern const struct dquot_operations ocfs2_quota_operations; |
115 | extern struct quota_format_type ocfs2_quota_format; | 115 | extern struct quota_format_type ocfs2_quota_format; |
116 | 116 | ||
117 | int ocfs2_quota_setup(void); | ||
118 | void ocfs2_quota_shutdown(void); | ||
119 | |||
120 | #endif /* _OCFS2_QUOTA_H */ | 117 | #endif /* _OCFS2_QUOTA_H */ |
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 4607923eb24c..a73f64166481 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c | |||
@@ -63,8 +63,6 @@ | |||
63 | * write to gf | 63 | * write to gf |
64 | */ | 64 | */ |
65 | 65 | ||
66 | static struct workqueue_struct *ocfs2_quota_wq = NULL; | ||
67 | |||
68 | static void qsync_work_fn(struct work_struct *work); | 66 | static void qsync_work_fn(struct work_struct *work); |
69 | 67 | ||
70 | static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) | 68 | static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) |
@@ -400,8 +398,8 @@ int ocfs2_global_read_info(struct super_block *sb, int type) | |||
400 | OCFS2_QBLK_RESERVED_SPACE; | 398 | OCFS2_QBLK_RESERVED_SPACE; |
401 | oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); | 399 | oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); |
402 | INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); | 400 | INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); |
403 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, | 401 | schedule_delayed_work(&oinfo->dqi_sync_work, |
404 | msecs_to_jiffies(oinfo->dqi_syncms)); | 402 | msecs_to_jiffies(oinfo->dqi_syncms)); |
405 | 403 | ||
406 | out_err: | 404 | out_err: |
407 | mlog_exit(status); | 405 | mlog_exit(status); |
@@ -635,8 +633,8 @@ static void qsync_work_fn(struct work_struct *work) | |||
635 | struct super_block *sb = oinfo->dqi_gqinode->i_sb; | 633 | struct super_block *sb = oinfo->dqi_gqinode->i_sb; |
636 | 634 | ||
637 | dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); | 635 | dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); |
638 | queue_delayed_work(ocfs2_quota_wq, &oinfo->dqi_sync_work, | 636 | schedule_delayed_work(&oinfo->dqi_sync_work, |
639 | msecs_to_jiffies(oinfo->dqi_syncms)); | 637 | msecs_to_jiffies(oinfo->dqi_syncms)); |
640 | } | 638 | } |
641 | 639 | ||
642 | /* | 640 | /* |
@@ -923,20 +921,3 @@ const struct dquot_operations ocfs2_quota_operations = { | |||
923 | .alloc_dquot = ocfs2_alloc_dquot, | 921 | .alloc_dquot = ocfs2_alloc_dquot, |
924 | .destroy_dquot = ocfs2_destroy_dquot, | 922 | .destroy_dquot = ocfs2_destroy_dquot, |
925 | }; | 923 | }; |
926 | |||
927 | int ocfs2_quota_setup(void) | ||
928 | { | ||
929 | ocfs2_quota_wq = create_workqueue("o2quot"); | ||
930 | if (!ocfs2_quota_wq) | ||
931 | return -ENOMEM; | ||
932 | return 0; | ||
933 | } | ||
934 | |||
935 | void ocfs2_quota_shutdown(void) | ||
936 | { | ||
937 | if (ocfs2_quota_wq) { | ||
938 | flush_workqueue(ocfs2_quota_wq); | ||
939 | destroy_workqueue(ocfs2_quota_wq); | ||
940 | ocfs2_quota_wq = NULL; | ||
941 | } | ||
942 | } | ||
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 36c423fb0635..236ed1bdca2c 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1657,16 +1657,11 @@ static int __init ocfs2_init(void) | |||
1657 | mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); | 1657 | mlog(ML_ERROR, "Unable to create ocfs2 debugfs root.\n"); |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | status = ocfs2_quota_setup(); | ||
1661 | if (status) | ||
1662 | goto leave; | ||
1663 | |||
1664 | ocfs2_set_locking_protocol(); | 1660 | ocfs2_set_locking_protocol(); |
1665 | 1661 | ||
1666 | status = register_quota_format(&ocfs2_quota_format); | 1662 | status = register_quota_format(&ocfs2_quota_format); |
1667 | leave: | 1663 | leave: |
1668 | if (status < 0) { | 1664 | if (status < 0) { |
1669 | ocfs2_quota_shutdown(); | ||
1670 | ocfs2_free_mem_caches(); | 1665 | ocfs2_free_mem_caches(); |
1671 | exit_ocfs2_uptodate_cache(); | 1666 | exit_ocfs2_uptodate_cache(); |
1672 | } | 1667 | } |
@@ -1683,8 +1678,6 @@ static void __exit ocfs2_exit(void) | |||
1683 | { | 1678 | { |
1684 | mlog_entry_void(); | 1679 | mlog_entry_void(); |
1685 | 1680 | ||
1686 | ocfs2_quota_shutdown(); | ||
1687 | |||
1688 | if (ocfs2_wq) { | 1681 | if (ocfs2_wq) { |
1689 | flush_workqueue(ocfs2_wq); | 1682 | flush_workqueue(ocfs2_wq); |
1690 | destroy_workqueue(ocfs2_wq); | 1683 | destroy_workqueue(ocfs2_wq); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 3eea859e6990..c77514bd5776 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -2876,7 +2876,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name, | |||
2876 | reiserfs_mounted_fs_count++; | 2876 | reiserfs_mounted_fs_count++; |
2877 | if (reiserfs_mounted_fs_count <= 1) { | 2877 | if (reiserfs_mounted_fs_count <= 1) { |
2878 | reiserfs_write_unlock(sb); | 2878 | reiserfs_write_unlock(sb); |
2879 | commit_wq = create_workqueue("reiserfs"); | 2879 | commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0); |
2880 | reiserfs_write_lock(sb); | 2880 | reiserfs_write_lock(sb); |
2881 | } | 2881 | } |
2882 | 2882 | ||
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index ac1c7e8378dd..f83a4c830a65 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -2022,11 +2022,12 @@ xfs_buf_init(void) | |||
2022 | if (!xfslogd_workqueue) | 2022 | if (!xfslogd_workqueue) |
2023 | goto out_free_buf_zone; | 2023 | goto out_free_buf_zone; |
2024 | 2024 | ||
2025 | xfsdatad_workqueue = create_workqueue("xfsdatad"); | 2025 | xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1); |
2026 | if (!xfsdatad_workqueue) | 2026 | if (!xfsdatad_workqueue) |
2027 | goto out_destroy_xfslogd_workqueue; | 2027 | goto out_destroy_xfslogd_workqueue; |
2028 | 2028 | ||
2029 | xfsconvertd_workqueue = create_workqueue("xfsconvertd"); | 2029 | xfsconvertd_workqueue = alloc_workqueue("xfsconvertd", |
2030 | WQ_MEM_RECLAIM, 1); | ||
2030 | if (!xfsconvertd_workqueue) | 2031 | if (!xfsconvertd_workqueue) |
2031 | goto out_destroy_xfsdatad_workqueue; | 2032 | goto out_destroy_xfsdatad_workqueue; |
2032 | 2033 | ||
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c index edfa178bafb6..4aff56395732 100644 --- a/fs/xfs/xfs_mru_cache.c +++ b/fs/xfs/xfs_mru_cache.c | |||
@@ -309,7 +309,7 @@ xfs_mru_cache_init(void) | |||
309 | if (!xfs_mru_elem_zone) | 309 | if (!xfs_mru_elem_zone) |
310 | goto out; | 310 | goto out; |
311 | 311 | ||
312 | xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); | 312 | xfs_mru_reap_wq = alloc_workqueue("xfs_mru_cache", WQ_MEM_RECLAIM, 1); |
313 | if (!xfs_mru_reap_wq) | 313 | if (!xfs_mru_reap_wq) |
314 | goto out_destroy_mru_elem_zone; | 314 | goto out_destroy_mru_elem_zone; |
315 | 315 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index f7998a3bf020..f584aba78ca9 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -286,11 +286,15 @@ enum { | |||
286 | * any specific CPU, not concurrency managed, and all queued works are | 286 | * any specific CPU, not concurrency managed, and all queued works are |
287 | * executed immediately as long as max_active limit is not reached and | 287 | * executed immediately as long as max_active limit is not reached and |
288 | * resources are available. | 288 | * resources are available. |
289 | * | ||
290 | * system_freezable_wq is equivalent to system_wq except that it's | ||
291 | * freezable. | ||
289 | */ | 292 | */ |
290 | extern struct workqueue_struct *system_wq; | 293 | extern struct workqueue_struct *system_wq; |
291 | extern struct workqueue_struct *system_long_wq; | 294 | extern struct workqueue_struct *system_long_wq; |
292 | extern struct workqueue_struct *system_nrt_wq; | 295 | extern struct workqueue_struct *system_nrt_wq; |
293 | extern struct workqueue_struct *system_unbound_wq; | 296 | extern struct workqueue_struct *system_unbound_wq; |
297 | extern struct workqueue_struct *system_freezable_wq; | ||
294 | 298 | ||
295 | extern struct workqueue_struct * | 299 | extern struct workqueue_struct * |
296 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | 300 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b5fe4c00eb3c..5ca7ce9ce754 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -251,10 +251,12 @@ struct workqueue_struct *system_wq __read_mostly; | |||
251 | struct workqueue_struct *system_long_wq __read_mostly; | 251 | struct workqueue_struct *system_long_wq __read_mostly; |
252 | struct workqueue_struct *system_nrt_wq __read_mostly; | 252 | struct workqueue_struct *system_nrt_wq __read_mostly; |
253 | struct workqueue_struct *system_unbound_wq __read_mostly; | 253 | struct workqueue_struct *system_unbound_wq __read_mostly; |
254 | struct workqueue_struct *system_freezable_wq __read_mostly; | ||
254 | EXPORT_SYMBOL_GPL(system_wq); | 255 | EXPORT_SYMBOL_GPL(system_wq); |
255 | EXPORT_SYMBOL_GPL(system_long_wq); | 256 | EXPORT_SYMBOL_GPL(system_long_wq); |
256 | EXPORT_SYMBOL_GPL(system_nrt_wq); | 257 | EXPORT_SYMBOL_GPL(system_nrt_wq); |
257 | EXPORT_SYMBOL_GPL(system_unbound_wq); | 258 | EXPORT_SYMBOL_GPL(system_unbound_wq); |
259 | EXPORT_SYMBOL_GPL(system_freezable_wq); | ||
258 | 260 | ||
259 | #define CREATE_TRACE_POINTS | 261 | #define CREATE_TRACE_POINTS |
260 | #include <trace/events/workqueue.h> | 262 | #include <trace/events/workqueue.h> |
@@ -3781,8 +3783,10 @@ static int __init init_workqueues(void) | |||
3781 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); | 3783 | system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); |
3782 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, | 3784 | system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, |
3783 | WQ_UNBOUND_MAX_ACTIVE); | 3785 | WQ_UNBOUND_MAX_ACTIVE); |
3786 | system_freezable_wq = alloc_workqueue("events_freezable", | ||
3787 | WQ_FREEZABLE, 0); | ||
3784 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || | 3788 | BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || |
3785 | !system_unbound_wq); | 3789 | !system_unbound_wq || !system_freezable_wq); |
3786 | return 0; | 3790 | return 0; |
3787 | } | 3791 | } |
3788 | early_initcall(init_workqueues); | 3792 | early_initcall(init_workqueues); |
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 078eb162d9bf..a30471e51740 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c | |||
@@ -153,10 +153,11 @@ struct p9_conn { | |||
153 | unsigned long wsched; | 153 | unsigned long wsched; |
154 | }; | 154 | }; |
155 | 155 | ||
156 | static void p9_poll_workfn(struct work_struct *work); | ||
157 | |||
156 | static DEFINE_SPINLOCK(p9_poll_lock); | 158 | static DEFINE_SPINLOCK(p9_poll_lock); |
157 | static LIST_HEAD(p9_poll_pending_list); | 159 | static LIST_HEAD(p9_poll_pending_list); |
158 | static struct workqueue_struct *p9_mux_wq; | 160 | static DECLARE_WORK(p9_poll_work, p9_poll_workfn); |
159 | static struct task_struct *p9_poll_task; | ||
160 | 161 | ||
161 | static void p9_mux_poll_stop(struct p9_conn *m) | 162 | static void p9_mux_poll_stop(struct p9_conn *m) |
162 | { | 163 | { |
@@ -384,7 +385,7 @@ static void p9_read_work(struct work_struct *work) | |||
384 | 385 | ||
385 | if (n & POLLIN) { | 386 | if (n & POLLIN) { |
386 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); | 387 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); |
387 | queue_work(p9_mux_wq, &m->rq); | 388 | schedule_work(&m->rq); |
388 | } else | 389 | } else |
389 | clear_bit(Rworksched, &m->wsched); | 390 | clear_bit(Rworksched, &m->wsched); |
390 | } else | 391 | } else |
@@ -497,7 +498,7 @@ static void p9_write_work(struct work_struct *work) | |||
497 | 498 | ||
498 | if (n & POLLOUT) { | 499 | if (n & POLLOUT) { |
499 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); | 500 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); |
500 | queue_work(p9_mux_wq, &m->wq); | 501 | schedule_work(&m->wq); |
501 | } else | 502 | } else |
502 | clear_bit(Wworksched, &m->wsched); | 503 | clear_bit(Wworksched, &m->wsched); |
503 | } else | 504 | } else |
@@ -516,15 +517,14 @@ static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) | |||
516 | container_of(wait, struct p9_poll_wait, wait); | 517 | container_of(wait, struct p9_poll_wait, wait); |
517 | struct p9_conn *m = pwait->conn; | 518 | struct p9_conn *m = pwait->conn; |
518 | unsigned long flags; | 519 | unsigned long flags; |
519 | DECLARE_WAITQUEUE(dummy_wait, p9_poll_task); | ||
520 | 520 | ||
521 | spin_lock_irqsave(&p9_poll_lock, flags); | 521 | spin_lock_irqsave(&p9_poll_lock, flags); |
522 | if (list_empty(&m->poll_pending_link)) | 522 | if (list_empty(&m->poll_pending_link)) |
523 | list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); | 523 | list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); |
524 | spin_unlock_irqrestore(&p9_poll_lock, flags); | 524 | spin_unlock_irqrestore(&p9_poll_lock, flags); |
525 | 525 | ||
526 | /* perform the default wake up operation */ | 526 | schedule_work(&p9_poll_work); |
527 | return default_wake_function(&dummy_wait, mode, sync, key); | 527 | return 1; |
528 | } | 528 | } |
529 | 529 | ||
530 | /** | 530 | /** |
@@ -629,7 +629,7 @@ static void p9_poll_mux(struct p9_conn *m) | |||
629 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); | 629 | P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); |
630 | if (!test_and_set_bit(Rworksched, &m->wsched)) { | 630 | if (!test_and_set_bit(Rworksched, &m->wsched)) { |
631 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); | 631 | P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); |
632 | queue_work(p9_mux_wq, &m->rq); | 632 | schedule_work(&m->rq); |
633 | } | 633 | } |
634 | } | 634 | } |
635 | 635 | ||
@@ -639,7 +639,7 @@ static void p9_poll_mux(struct p9_conn *m) | |||
639 | if ((m->wsize || !list_empty(&m->unsent_req_list)) && | 639 | if ((m->wsize || !list_empty(&m->unsent_req_list)) && |
640 | !test_and_set_bit(Wworksched, &m->wsched)) { | 640 | !test_and_set_bit(Wworksched, &m->wsched)) { |
641 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); | 641 | P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); |
642 | queue_work(p9_mux_wq, &m->wq); | 642 | schedule_work(&m->wq); |
643 | } | 643 | } |
644 | } | 644 | } |
645 | } | 645 | } |
@@ -677,7 +677,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) | |||
677 | n = p9_fd_poll(m->client, NULL); | 677 | n = p9_fd_poll(m->client, NULL); |
678 | 678 | ||
679 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) | 679 | if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) |
680 | queue_work(p9_mux_wq, &m->wq); | 680 | schedule_work(&m->wq); |
681 | 681 | ||
682 | return 0; | 682 | return 0; |
683 | } | 683 | } |
@@ -1047,12 +1047,12 @@ static struct p9_trans_module p9_fd_trans = { | |||
1047 | * | 1047 | * |
1048 | */ | 1048 | */ |
1049 | 1049 | ||
1050 | static int p9_poll_proc(void *a) | 1050 | static void p9_poll_workfn(struct work_struct *work) |
1051 | { | 1051 | { |
1052 | unsigned long flags; | 1052 | unsigned long flags; |
1053 | 1053 | ||
1054 | P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); | 1054 | P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); |
1055 | repeat: | 1055 | |
1056 | spin_lock_irqsave(&p9_poll_lock, flags); | 1056 | spin_lock_irqsave(&p9_poll_lock, flags); |
1057 | while (!list_empty(&p9_poll_pending_list)) { | 1057 | while (!list_empty(&p9_poll_pending_list)) { |
1058 | struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, | 1058 | struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, |
@@ -1067,35 +1067,11 @@ static int p9_poll_proc(void *a) | |||
1067 | } | 1067 | } |
1068 | spin_unlock_irqrestore(&p9_poll_lock, flags); | 1068 | spin_unlock_irqrestore(&p9_poll_lock, flags); |
1069 | 1069 | ||
1070 | set_current_state(TASK_INTERRUPTIBLE); | ||
1071 | if (list_empty(&p9_poll_pending_list)) { | ||
1072 | P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n"); | ||
1073 | schedule(); | ||
1074 | } | ||
1075 | __set_current_state(TASK_RUNNING); | ||
1076 | |||
1077 | if (!kthread_should_stop()) | ||
1078 | goto repeat; | ||
1079 | |||
1080 | P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); | 1070 | P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); |
1081 | return 0; | ||
1082 | } | 1071 | } |
1083 | 1072 | ||
1084 | int p9_trans_fd_init(void) | 1073 | int p9_trans_fd_init(void) |
1085 | { | 1074 | { |
1086 | p9_mux_wq = create_workqueue("v9fs"); | ||
1087 | if (!p9_mux_wq) { | ||
1088 | printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); | ||
1089 | return -ENOMEM; | ||
1090 | } | ||
1091 | |||
1092 | p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll"); | ||
1093 | if (IS_ERR(p9_poll_task)) { | ||
1094 | destroy_workqueue(p9_mux_wq); | ||
1095 | printk(KERN_WARNING "v9fs: mux: creating poll task failed\n"); | ||
1096 | return PTR_ERR(p9_poll_task); | ||
1097 | } | ||
1098 | |||
1099 | v9fs_register_trans(&p9_tcp_trans); | 1075 | v9fs_register_trans(&p9_tcp_trans); |
1100 | v9fs_register_trans(&p9_unix_trans); | 1076 | v9fs_register_trans(&p9_unix_trans); |
1101 | v9fs_register_trans(&p9_fd_trans); | 1077 | v9fs_register_trans(&p9_fd_trans); |
@@ -1105,10 +1081,8 @@ int p9_trans_fd_init(void) | |||
1105 | 1081 | ||
1106 | void p9_trans_fd_exit(void) | 1082 | void p9_trans_fd_exit(void) |
1107 | { | 1083 | { |
1108 | kthread_stop(p9_poll_task); | 1084 | flush_work_sync(&p9_poll_work); |
1109 | v9fs_unregister_trans(&p9_tcp_trans); | 1085 | v9fs_unregister_trans(&p9_tcp_trans); |
1110 | v9fs_unregister_trans(&p9_unix_trans); | 1086 | v9fs_unregister_trans(&p9_unix_trans); |
1111 | v9fs_unregister_trans(&p9_fd_trans); | 1087 | v9fs_unregister_trans(&p9_fd_trans); |
1112 | |||
1113 | destroy_workqueue(p9_mux_wq); | ||
1114 | } | 1088 | } |
diff --git a/net/rds/ib.c b/net/rds/ib.c index 4123967d4d65..cce19f95c624 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -364,7 +364,6 @@ void rds_ib_exit(void) | |||
364 | rds_ib_sysctl_exit(); | 364 | rds_ib_sysctl_exit(); |
365 | rds_ib_recv_exit(); | 365 | rds_ib_recv_exit(); |
366 | rds_trans_unregister(&rds_ib_transport); | 366 | rds_trans_unregister(&rds_ib_transport); |
367 | rds_ib_fmr_exit(); | ||
368 | } | 367 | } |
369 | 368 | ||
370 | struct rds_transport rds_ib_transport = { | 369 | struct rds_transport rds_ib_transport = { |
@@ -400,13 +399,9 @@ int rds_ib_init(void) | |||
400 | 399 | ||
401 | INIT_LIST_HEAD(&rds_ib_devices); | 400 | INIT_LIST_HEAD(&rds_ib_devices); |
402 | 401 | ||
403 | ret = rds_ib_fmr_init(); | ||
404 | if (ret) | ||
405 | goto out; | ||
406 | |||
407 | ret = ib_register_client(&rds_ib_client); | 402 | ret = ib_register_client(&rds_ib_client); |
408 | if (ret) | 403 | if (ret) |
409 | goto out_fmr_exit; | 404 | goto out; |
410 | 405 | ||
411 | ret = rds_ib_sysctl_init(); | 406 | ret = rds_ib_sysctl_init(); |
412 | if (ret) | 407 | if (ret) |
@@ -430,8 +425,6 @@ out_sysctl: | |||
430 | rds_ib_sysctl_exit(); | 425 | rds_ib_sysctl_exit(); |
431 | out_ibreg: | 426 | out_ibreg: |
432 | rds_ib_unregister_client(); | 427 | rds_ib_unregister_client(); |
433 | out_fmr_exit: | ||
434 | rds_ib_fmr_exit(); | ||
435 | out: | 428 | out: |
436 | return ret; | 429 | return ret; |
437 | } | 430 | } |
diff --git a/net/rds/ib.h b/net/rds/ib.h index e34ad032b66d..4297d92788dc 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h | |||
@@ -307,8 +307,6 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents, | |||
307 | void rds_ib_sync_mr(void *trans_private, int dir); | 307 | void rds_ib_sync_mr(void *trans_private, int dir); |
308 | void rds_ib_free_mr(void *trans_private, int invalidate); | 308 | void rds_ib_free_mr(void *trans_private, int invalidate); |
309 | void rds_ib_flush_mrs(void); | 309 | void rds_ib_flush_mrs(void); |
310 | int rds_ib_fmr_init(void); | ||
311 | void rds_ib_fmr_exit(void); | ||
312 | 310 | ||
313 | /* ib_recv.c */ | 311 | /* ib_recv.c */ |
314 | int rds_ib_recv_init(void); | 312 | int rds_ib_recv_init(void); |
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 18a833c450c8..819c35a0d9cb 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c | |||
@@ -38,8 +38,6 @@ | |||
38 | #include "ib.h" | 38 | #include "ib.h" |
39 | #include "xlist.h" | 39 | #include "xlist.h" |
40 | 40 | ||
41 | static struct workqueue_struct *rds_ib_fmr_wq; | ||
42 | |||
43 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); | 41 | static DEFINE_PER_CPU(unsigned long, clean_list_grace); |
44 | #define CLEAN_LIST_BUSY_BIT 0 | 42 | #define CLEAN_LIST_BUSY_BIT 0 |
45 | 43 | ||
@@ -307,7 +305,7 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev) | |||
307 | int err = 0, iter = 0; | 305 | int err = 0, iter = 0; |
308 | 306 | ||
309 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 307 | if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
310 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); | 308 | schedule_delayed_work(&pool->flush_worker, 10); |
311 | 309 | ||
312 | while (1) { | 310 | while (1) { |
313 | ibmr = rds_ib_reuse_fmr(pool); | 311 | ibmr = rds_ib_reuse_fmr(pool); |
@@ -696,24 +694,6 @@ out_nolock: | |||
696 | return ret; | 694 | return ret; |
697 | } | 695 | } |
698 | 696 | ||
699 | int rds_ib_fmr_init(void) | ||
700 | { | ||
701 | rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd"); | ||
702 | if (!rds_ib_fmr_wq) | ||
703 | return -ENOMEM; | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | /* | ||
708 | * By the time this is called all the IB devices should have been torn down and | ||
709 | * had their pools freed. As each pool is freed its work struct is waited on, | ||
710 | * so the pool flushing work queue should be idle by the time we get here. | ||
711 | */ | ||
712 | void rds_ib_fmr_exit(void) | ||
713 | { | ||
714 | destroy_workqueue(rds_ib_fmr_wq); | ||
715 | } | ||
716 | |||
717 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) | 697 | static void rds_ib_mr_pool_flush_worker(struct work_struct *work) |
718 | { | 698 | { |
719 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); | 699 | struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); |
@@ -741,7 +721,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
741 | /* If we've pinned too many pages, request a flush */ | 721 | /* If we've pinned too many pages, request a flush */ |
742 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || | 722 | if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || |
743 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) | 723 | atomic_read(&pool->dirty_count) >= pool->max_items / 10) |
744 | queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10); | 724 | schedule_delayed_work(&pool->flush_worker, 10); |
745 | 725 | ||
746 | if (invalidate) { | 726 | if (invalidate) { |
747 | if (likely(!in_interrupt())) { | 727 | if (likely(!in_interrupt())) { |
@@ -749,8 +729,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) | |||
749 | } else { | 729 | } else { |
750 | /* We get here if the user created a MR marked | 730 | /* We get here if the user created a MR marked |
751 | * as use_once and invalidate at the same time. */ | 731 | * as use_once and invalidate at the same time. */ |
752 | queue_delayed_work(rds_ib_fmr_wq, | 732 | schedule_delayed_work(&pool->flush_worker, 10); |
753 | &pool->flush_worker, 10); | ||
754 | } | 733 | } |
755 | } | 734 | } |
756 | 735 | ||
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 59e599498e37..3fc8624fcd17 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -955,7 +955,7 @@ static int rpciod_start(void) | |||
955 | * Create the rpciod thread and wait for it to start. | 955 | * Create the rpciod thread and wait for it to start. |
956 | */ | 956 | */ |
957 | dprintk("RPC: creating workqueue rpciod\n"); | 957 | dprintk("RPC: creating workqueue rpciod\n"); |
958 | wq = alloc_workqueue("rpciod", WQ_RESCUER, 0); | 958 | wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0); |
959 | rpciod_workqueue = wq; | 959 | rpciod_workqueue = wq; |
960 | return rpciod_workqueue != NULL; | 960 | return rpciod_workqueue != NULL; |
961 | } | 961 | } |