aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c4
-rw-r--r--arch/arm/plat-omap/mailbox.c2
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c2
-rw-r--r--arch/sh/drivers/push-switch.c2
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/genhd.c14
-rw-r--r--drivers/block/floppy.c5
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tpm/tpm.c4
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c2
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/edac/edac_mc.c17
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/hid/hid-picolcd_fb.c2
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/mad.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c5
-rw-r--r--drivers/input/keyboard/qt2160.c3
-rw-r--r--drivers/input/mouse/synaptics_i2c.c7
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c2
-rw-r--r--drivers/isdn/mISDN/hwchannel.c2
-rw-r--r--drivers/leds/leds-lm3533.c6
-rw-r--r--drivers/leds/leds-lp8788.c2
-rw-r--r--drivers/leds/leds-wm8350.c2
-rw-r--r--drivers/macintosh/ams/ams-core.c2
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c4
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c2
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c6
-rw-r--r--drivers/media/video/cx88/cx88-mpeg.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/video/omap24xxcam.c6
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/video/tm6000/tm6000-cards.c2
-rw-r--r--drivers/mfd/menelaus.c4
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mtd/mtdoops.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c10
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c8
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c3
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c20
-rw-r--r--drivers/power/ab8500_btemp.c2
-rw-r--r--drivers/power/ab8500_charger.c8
-rw-r--r--drivers/power/ab8500_fg.c8
-rw-r--r--drivers/power/abx500_chargalg.c4
-rw-r--r--drivers/power/charger-manager.c9
-rw-r--r--drivers/power/collie_battery.c2
-rw-r--r--drivers/power/ds2760_battery.c9
-rw-r--r--drivers/power/jz4740-battery.c6
-rw-r--r--drivers/power/max17040_battery.c2
-rw-r--r--drivers/power/tosa_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c2
-rw-r--r--drivers/power/z2_battery.c2
-rw-r--r--drivers/regulator/core.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/staging/ccg/u_ether.c2
-rw-r--r--drivers/staging/nvec/nvec.c4
-rw-r--r--drivers/thermal/thermal_sys.c15
-rw-r--r--drivers/tty/hvc/hvsi.c2
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/ipwireless/network.c4
-rw-r--r--drivers/tty/serial/kgdboc.c2
-rw-r--r--drivers/tty/serial/omap-serial.c2
-rw-r--r--drivers/tty/tty_ldisc.c6
-rw-r--r--drivers/usb/atm/speedtch.c2
-rw-r--r--drivers/usb/atm/ueagle-atm.c2
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/video/omap2/displays/panel-taal.c6
-rw-r--r--drivers/video/omap2/dss/dsi.c6
-rw-r--r--fs/affs/super.c2
-rw-r--r--fs/afs/callback.c4
-rw-r--r--fs/afs/server.c10
-rw-r--r--fs/afs/vlocation.c14
-rw-r--r--fs/gfs2/lock_dlm.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/ncpfs/inode.c6
-rw-r--r--fs/nfs/nfs4renewd.c3
-rw-r--r--fs/ocfs2/cluster/quorum.c2
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--fs/xfs/xfs_sync.c2
-rw-r--r--include/linux/workqueue.h220
-rw-r--r--kernel/srcu.c4
-rw-r--r--kernel/workqueue.c1217
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/link_watch.c21
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ipv4/inetpeer.c2
-rw-r--r--net/rfkill/input.c3
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c2
-rw-r--r--sound/i2c/other/ak4113.c2
-rw-r--r--sound/i2c/other/ak4114.c2
-rw-r--r--sound/pci/oxygen/oxygen_lib.c8
-rw-r--r--sound/soc/codecs/wm8350.c2
-rw-r--r--sound/soc/codecs/wm8753.c2
-rw-r--r--sound/soc/soc-core.c6
-rw-r--r--virt/kvm/eventfd.c2
134 files changed, 988 insertions, 995 deletions
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index 9a154bad1984..5a406f794798 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -579,8 +579,8 @@ static int sharpsl_ac_check(void)
579static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state) 579static int sharpsl_pm_suspend(struct platform_device *pdev, pm_message_t state)
580{ 580{
581 sharpsl_pm.flags |= SHARPSL_SUSPENDED; 581 sharpsl_pm.flags |= SHARPSL_SUSPENDED;
582 flush_delayed_work_sync(&toggle_charger); 582 flush_delayed_work(&toggle_charger);
583 flush_delayed_work_sync(&sharpsl_bat); 583 flush_delayed_work(&sharpsl_bat);
584 584
585 if (sharpsl_pm.charge_mode == CHRG_ON) 585 if (sharpsl_pm.charge_mode == CHRG_ON)
586 sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG; 586 sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 5e13c3884aa4..42377ef9ea3d 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -310,7 +310,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
310 omap_mbox_disable_irq(mbox, IRQ_RX); 310 omap_mbox_disable_irq(mbox, IRQ_RX);
311 free_irq(mbox->irq, mbox); 311 free_irq(mbox->irq, mbox);
312 tasklet_kill(&mbox->txq->tasklet); 312 tasklet_kill(&mbox->txq->tasklet);
313 flush_work_sync(&mbox->rxq->work); 313 flush_work(&mbox->rxq->work);
314 mbox_queue_free(mbox->txq); 314 mbox_queue_free(mbox->txq);
315 mbox_queue_free(mbox->rxq); 315 mbox_queue_free(mbox->rxq);
316 } 316 }
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 23bc9db4317e..82607d621aca 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -76,7 +76,7 @@ static void spu_gov_work(struct work_struct *work)
76static void spu_gov_init_work(struct spu_gov_info_struct *info) 76static void spu_gov_init_work(struct spu_gov_info_struct *info)
77{ 77{
78 int delay = usecs_to_jiffies(info->poll_int); 78 int delay = usecs_to_jiffies(info->poll_int);
79 INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); 79 INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
80 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); 80 schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
81} 81}
82 82
diff --git a/arch/sh/drivers/push-switch.c b/arch/sh/drivers/push-switch.c
index 637b79b09657..5bfb341cc5c4 100644
--- a/arch/sh/drivers/push-switch.c
+++ b/arch/sh/drivers/push-switch.c
@@ -107,7 +107,7 @@ static int switch_drv_remove(struct platform_device *pdev)
107 device_remove_file(&pdev->dev, &dev_attr_switch); 107 device_remove_file(&pdev->dev, &dev_attr_switch);
108 108
109 platform_set_drvdata(pdev, NULL); 109 platform_set_drvdata(pdev, NULL);
110 flush_work_sync(&psw->work); 110 flush_work(&psw->work);
111 del_timer_sync(&psw->debounce); 111 del_timer_sync(&psw->debounce);
112 free_irq(irq, pdev); 112 free_irq(irq, pdev);
113 113
diff --git a/block/blk-core.c b/block/blk-core.c
index ee3cb3a5e278..d2da64170513 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(blk_start_queue);
262 **/ 262 **/
263void blk_stop_queue(struct request_queue *q) 263void blk_stop_queue(struct request_queue *q)
264{ 264{
265 __cancel_delayed_work(&q->delay_work); 265 cancel_delayed_work(&q->delay_work);
266 queue_flag_set(QUEUE_FLAG_STOPPED, q); 266 queue_flag_set(QUEUE_FLAG_STOPPED, q);
267} 267}
268EXPORT_SYMBOL(blk_stop_queue); 268EXPORT_SYMBOL(blk_stop_queue);
@@ -319,10 +319,8 @@ EXPORT_SYMBOL(__blk_run_queue);
319 */ 319 */
320void blk_run_queue_async(struct request_queue *q) 320void blk_run_queue_async(struct request_queue *q)
321{ 321{
322 if (likely(!blk_queue_stopped(q))) { 322 if (likely(!blk_queue_stopped(q)))
323 __cancel_delayed_work(&q->delay_work); 323 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
325 }
326} 324}
327EXPORT_SYMBOL(blk_run_queue_async); 325EXPORT_SYMBOL(blk_run_queue_async);
328 326
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index e287c19908c8..a9664fa0b609 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -180,7 +180,7 @@ static inline unsigned int total_nr_queued(struct throtl_data *td)
180 180
181/* 181/*
182 * Worker for allocating per cpu stat for tgs. This is scheduled on the 182 * Worker for allocating per cpu stat for tgs. This is scheduled on the
183 * system_nrt_wq once there are some groups on the alloc_list waiting for 183 * system_wq once there are some groups on the alloc_list waiting for
184 * allocation. 184 * allocation.
185 */ 185 */
186static void tg_stats_alloc_fn(struct work_struct *work) 186static void tg_stats_alloc_fn(struct work_struct *work)
@@ -194,8 +194,7 @@ alloc_stats:
194 stats_cpu = alloc_percpu(struct tg_stats_cpu); 194 stats_cpu = alloc_percpu(struct tg_stats_cpu);
195 if (!stats_cpu) { 195 if (!stats_cpu) {
196 /* allocation failed, try again after some time */ 196 /* allocation failed, try again after some time */
197 queue_delayed_work(system_nrt_wq, dwork, 197 schedule_delayed_work(dwork, msecs_to_jiffies(10));
198 msecs_to_jiffies(10));
199 return; 198 return;
200 } 199 }
201 } 200 }
@@ -238,7 +237,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
238 */ 237 */
239 spin_lock_irqsave(&tg_stats_alloc_lock, flags); 238 spin_lock_irqsave(&tg_stats_alloc_lock, flags);
240 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); 239 list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
241 queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); 240 schedule_delayed_work(&tg_stats_alloc_work, 0);
242 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); 241 spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
243} 242}
244 243
@@ -930,12 +929,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
930 929
931 /* schedule work if limits changed even if no bio is queued */ 930 /* schedule work if limits changed even if no bio is queued */
932 if (total_nr_queued(td) || td->limits_changed) { 931 if (total_nr_queued(td) || td->limits_changed) {
933 /* 932 mod_delayed_work(kthrotld_workqueue, dwork, delay);
934 * We might have a work scheduled to be executed in future.
935 * Cancel that and schedule a new one.
936 */
937 __cancel_delayed_work(dwork);
938 queue_delayed_work(kthrotld_workqueue, dwork, delay);
939 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 933 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
940 delay, jiffies); 934 delay, jiffies);
941 } 935 }
diff --git a/block/genhd.c b/block/genhd.c
index d839723303c8..6cace663a80e 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1490,9 +1490,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
1490 intv = disk_events_poll_jiffies(disk); 1490 intv = disk_events_poll_jiffies(disk);
1491 set_timer_slack(&ev->dwork.timer, intv / 4); 1491 set_timer_slack(&ev->dwork.timer, intv / 4);
1492 if (check_now) 1492 if (check_now)
1493 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1493 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1494 else if (intv) 1494 else if (intv)
1495 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1495 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1496out_unlock: 1496out_unlock:
1497 spin_unlock_irqrestore(&ev->lock, flags); 1497 spin_unlock_irqrestore(&ev->lock, flags);
1498} 1498}
@@ -1534,10 +1534,8 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask)
1534 1534
1535 spin_lock_irq(&ev->lock); 1535 spin_lock_irq(&ev->lock);
1536 ev->clearing |= mask; 1536 ev->clearing |= mask;
1537 if (!ev->block) { 1537 if (!ev->block)
1538 cancel_delayed_work(&ev->dwork); 1538 mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
1539 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
1540 }
1541 spin_unlock_irq(&ev->lock); 1539 spin_unlock_irq(&ev->lock);
1542} 1540}
1543 1541
@@ -1573,7 +1571,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
1573 1571
1574 /* uncondtionally schedule event check and wait for it to finish */ 1572 /* uncondtionally schedule event check and wait for it to finish */
1575 disk_block_events(disk); 1573 disk_block_events(disk);
1576 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); 1574 queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
1577 flush_delayed_work(&ev->dwork); 1575 flush_delayed_work(&ev->dwork);
1578 __disk_unblock_events(disk, false); 1576 __disk_unblock_events(disk, false);
1579 1577
@@ -1610,7 +1608,7 @@ static void disk_events_workfn(struct work_struct *work)
1610 1608
1611 intv = disk_events_poll_jiffies(disk); 1609 intv = disk_events_poll_jiffies(disk);
1612 if (!ev->block && intv) 1610 if (!ev->block && intv)
1613 queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); 1611 queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
1614 1612
1615 spin_unlock_irq(&ev->lock); 1613 spin_unlock_irq(&ev->lock);
1616 1614
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index a7d6347aaa79..17c675c52295 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message)
672 672
673 if (drive == current_reqD) 673 if (drive == current_reqD)
674 drive = current_drive; 674 drive = current_drive;
675 __cancel_delayed_work(&fd_timeout);
676 675
677 if (drive < 0 || drive >= N_DRIVE) { 676 if (drive < 0 || drive >= N_DRIVE) {
678 delay = 20UL * HZ; 677 delay = 20UL * HZ;
@@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message)
680 } else 679 } else
681 delay = UDP->timeout; 680 delay = UDP->timeout;
682 681
683 queue_delayed_work(floppy_wq, &fd_timeout, delay); 682 mod_delayed_work(floppy_wq, &fd_timeout, delay);
684 if (UDP->flags & FD_DEBUG) 683 if (UDP->flags & FD_DEBUG)
685 DPRINT("reschedule timeout %s\n", message); 684 DPRINT("reschedule timeout %s\n", message);
686 timeout_message = message; 685 timeout_message = message;
@@ -891,7 +890,7 @@ static void unlock_fdc(void)
891 890
892 raw_cmd = NULL; 891 raw_cmd = NULL;
893 command_status = FD_COMMAND_NONE; 892 command_status = FD_COMMAND_NONE;
894 __cancel_delayed_work(&fd_timeout); 893 cancel_delayed_work(&fd_timeout);
895 do_floppy = NULL; 894 do_floppy = NULL;
896 cont = NULL; 895 cont = NULL;
897 clear_bit(0, &fdc_busy); 896 clear_bit(0, &fdc_busy);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2c2d2e5c1597..007db8986e84 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
670 spin_unlock_irqrestore(&info->io_lock, flags); 670 spin_unlock_irqrestore(&info->io_lock, flags);
671 671
672 /* Flush gnttab callback work. Must be done with no locks held. */ 672 /* Flush gnttab callback work. Must be done with no locks held. */
673 flush_work_sync(&info->work); 673 flush_work(&info->work);
674 674
675 del_gendisk(info->gd); 675 del_gendisk(info->gd);
676 676
@@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
719 spin_unlock_irq(&info->io_lock); 719 spin_unlock_irq(&info->io_lock);
720 720
721 /* Flush gnttab callback work. Must be done with no locks held. */ 721 /* Flush gnttab callback work. Must be done with no locks held. */
722 flush_work_sync(&info->work); 722 flush_work(&info->work);
723 723
724 /* Free resources associated with old device channel. */ 724 /* Free resources associated with old device channel. */
725 if (info->ring_ref != GRANT_INVALID_REF) { 725 if (info->ring_ref != GRANT_INVALID_REF) {
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 3ceaf006e7f0..75d485afe56c 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -840,7 +840,7 @@ probe_fail_no_mem:
840 840
841static int __devexit remove_gdrom(struct platform_device *devptr) 841static int __devexit remove_gdrom(struct platform_device *devptr)
842{ 842{
843 flush_work_sync(&work); 843 flush_work(&work);
844 blk_cleanup_queue(gd.gdrom_rq); 844 blk_cleanup_queue(gd.gdrom_rq);
845 free_irq(HW_EVENT_GDROM_CMD, &gd); 845 free_irq(HW_EVENT_GDROM_CMD, &gd);
846 free_irq(HW_EVENT_GDROM_DMA, &gd); 846 free_irq(HW_EVENT_GDROM_DMA, &gd);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f87780502b41..320debbe32fa 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1433,7 +1433,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
1433 sonypi_disable(); 1433 sonypi_disable();
1434 1434
1435 synchronize_irq(sonypi_device.irq); 1435 synchronize_irq(sonypi_device.irq);
1436 flush_work_sync(&sonypi_device.input_work); 1436 flush_work(&sonypi_device.input_work);
1437 1437
1438 if (useinput) { 1438 if (useinput) {
1439 input_unregister_device(sonypi_device.input_key_dev); 1439 input_unregister_device(sonypi_device.input_key_dev);
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 817f0ee202b6..3af9f4d1a23f 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -1172,7 +1172,7 @@ int tpm_release(struct inode *inode, struct file *file)
1172 struct tpm_chip *chip = file->private_data; 1172 struct tpm_chip *chip = file->private_data;
1173 1173
1174 del_singleshot_timer_sync(&chip->user_read_timer); 1174 del_singleshot_timer_sync(&chip->user_read_timer);
1175 flush_work_sync(&chip->work); 1175 flush_work(&chip->work);
1176 file->private_data = NULL; 1176 file->private_data = NULL;
1177 atomic_set(&chip->data_pending, 0); 1177 atomic_set(&chip->data_pending, 0);
1178 kfree(chip->data_buffer); 1178 kfree(chip->data_buffer);
@@ -1225,7 +1225,7 @@ ssize_t tpm_read(struct file *file, char __user *buf,
1225 int rc; 1225 int rc;
1226 1226
1227 del_singleshot_timer_sync(&chip->user_read_timer); 1227 del_singleshot_timer_sync(&chip->user_read_timer);
1228 flush_work_sync(&chip->work); 1228 flush_work(&chip->work);
1229 ret_size = atomic_read(&chip->data_pending); 1229 ret_size = atomic_read(&chip->data_pending);
1230 atomic_set(&chip->data_pending, 0); 1230 atomic_set(&chip->data_pending, 0);
1231 if (ret_size > 0) { /* relay data */ 1231 if (ret_size > 0) { /* relay data */
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 235a340e81f2..55f0354864e2 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -466,7 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
466 delay -= jiffies % delay; 466 delay -= jiffies % delay;
467 467
468 dbs_info->enable = 1; 468 dbs_info->enable = 1;
469 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 469 INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
470 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); 470 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
471} 471}
472 472
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 836e9b062e5e..14c1af5a264f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -644,7 +644,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
644 delay -= jiffies % delay; 644 delay -= jiffies % delay;
645 645
646 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 646 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
647 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); 647 INIT_DEFERRABLE_WORK(&dbs_info->work, do_dbs_timer);
648 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); 648 schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
649} 649}
650 650
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 70c31d43fff3..b146d76f04cf 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -607,7 +607,7 @@ static int __init devfreq_start_polling(void)
607 mutex_lock(&devfreq_list_lock); 607 mutex_lock(&devfreq_list_lock);
608 polling = false; 608 polling = false;
609 devfreq_wq = create_freezable_workqueue("devfreq_wq"); 609 devfreq_wq = create_freezable_workqueue("devfreq_wq");
610 INIT_DELAYED_WORK_DEFERRABLE(&devfreq_work, devfreq_monitor); 610 INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
611 mutex_unlock(&devfreq_list_lock); 611 mutex_unlock(&devfreq_list_lock);
612 612
613 devfreq_monitor(&devfreq_work.work); 613 devfreq_monitor(&devfreq_work.work);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d5dc9da7f99f..90f0b730e9bb 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,7 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
559 return; 559 return;
560 560
561 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); 561 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
562 queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); 562 mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
563} 563}
564 564
565/* 565/*
@@ -599,21 +599,6 @@ void edac_mc_reset_delay_period(int value)
599 599
600 mutex_lock(&mem_ctls_mutex); 600 mutex_lock(&mem_ctls_mutex);
601 601
602 /* scan the list and turn off all workq timers, doing so under lock
603 */
604 list_for_each(item, &mc_devices) {
605 mci = list_entry(item, struct mem_ctl_info, link);
606
607 if (mci->op_state == OP_RUNNING_POLL)
608 cancel_delayed_work(&mci->work);
609 }
610
611 mutex_unlock(&mem_ctls_mutex);
612
613
614 /* re-walk the list, and reset the poll delay */
615 mutex_lock(&mem_ctls_mutex);
616
617 list_for_each(item, &mc_devices) { 602 list_for_each(item, &mc_devices) {
618 mci = list_entry(item, struct mem_ctl_info, link); 603 mci = list_entry(item, struct mem_ctl_info, link);
619 604
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 60ac3fbb4cde..725eb5aa8d8c 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -143,7 +143,7 @@ static int __devinit adc_jack_probe(struct platform_device *pdev)
143 143
144 data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms); 144 data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
145 145
146 INIT_DELAYED_WORK_DEFERRABLE(&data->handler, adc_jack_handler); 146 INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
147 147
148 platform_set_drvdata(pdev, data); 148 platform_set_drvdata(pdev, data);
149 149
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 3252e7067d8b..8fa9d52820d9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -968,7 +968,7 @@ static void output_poll_execute(struct work_struct *work)
968 } 968 }
969 969
970 if (repoll) 970 if (repoll)
971 queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD); 971 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
972} 972}
973 973
974void drm_kms_helper_poll_disable(struct drm_device *dev) 974void drm_kms_helper_poll_disable(struct drm_device *dev)
@@ -993,7 +993,7 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
993 } 993 }
994 994
995 if (poll) 995 if (poll)
996 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); 996 schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
997} 997}
998EXPORT_SYMBOL(drm_kms_helper_poll_enable); 998EXPORT_SYMBOL(drm_kms_helper_poll_enable);
999 999
@@ -1020,6 +1020,6 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1020 /* kill timer and schedule immediate execution, this doesn't block */ 1020 /* kill timer and schedule immediate execution, this doesn't block */
1021 cancel_delayed_work(&dev->mode_config.output_poll_work); 1021 cancel_delayed_work(&dev->mode_config.output_poll_work);
1022 if (drm_kms_helper_poll) 1022 if (drm_kms_helper_poll)
1023 queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0); 1023 schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
1024} 1024}
1025EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1025EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 1065e90d0919..2526e82bea32 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -878,7 +878,7 @@ static int g2d_suspend(struct device *dev)
878 /* FIXME: good range? */ 878 /* FIXME: good range? */
879 usleep_range(500, 1000); 879 usleep_range(500, 1000);
880 880
881 flush_work_sync(&g2d->runqueue_work); 881 flush_work(&g2d->runqueue_work);
882 882
883 return 0; 883 return 0;
884} 884}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
index 82c19e82ff02..0fe4e17c461d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gpio.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -302,7 +302,7 @@ nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
302 spin_unlock_irqrestore(&pgpio->lock, flags); 302 spin_unlock_irqrestore(&pgpio->lock, flags);
303 303
304 list_for_each_entry_safe(isr, tmp, &tofree, head) { 304 list_for_each_entry_safe(isr, tmp, &tofree, head) {
305 flush_work_sync(&isr->work); 305 flush_work(&isr->work);
306 kfree(isr); 306 kfree(isr);
307 } 307 }
308 } 308 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index afaa1727abd2..50b596ec7b7e 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -277,7 +277,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
277 if (rdev->msi_enabled) 277 if (rdev->msi_enabled)
278 pci_disable_msi(rdev->pdev); 278 pci_disable_msi(rdev->pdev);
279 } 279 }
280 flush_work_sync(&rdev->hotplug_work); 280 flush_work(&rdev->hotplug_work);
281} 281}
282 282
283/** 283/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3c447bf317cb..a32f2e96dd02 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -594,7 +594,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
594 par->dirty.active = false; 594 par->dirty.active = false;
595 spin_unlock_irqrestore(&par->dirty.lock, flags); 595 spin_unlock_irqrestore(&par->dirty.lock, flags);
596 596
597 flush_delayed_work_sync(&info->deferred_work); 597 flush_delayed_work(&info->deferred_work);
598 598
599 par->bo_ptr = NULL; 599 par->bo_ptr = NULL;
600 ttm_bo_kunmap(&par->map); 600 ttm_bo_kunmap(&par->map);
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 0008a512211d..eb003574b634 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -608,7 +608,7 @@ void picolcd_exit_framebuffer(struct picolcd_data *data)
608 /* make sure there is no running update - thus that fbdata->picolcd 608 /* make sure there is no running update - thus that fbdata->picolcd
609 * once obtained under lock is guaranteed not to get free() under 609 * once obtained under lock is guaranteed not to get free() under
610 * the feet of the deferred work */ 610 * the feet of the deferred work */
611 flush_delayed_work_sync(&info->deferred_work); 611 flush_delayed_work(&info->deferred_work);
612 612
613 data->fb_info = NULL; 613 data->fb_info = NULL;
614 unregister_framebuffer(info); 614 unregister_framebuffer(info);
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index bc85bf29062e..38ae87772e96 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -229,7 +229,7 @@ static void wiiext_worker(struct work_struct *work)
229/* schedule work only once, otherwise mark for reschedule */ 229/* schedule work only once, otherwise mark for reschedule */
230static void wiiext_schedule(struct wiimote_ext *ext) 230static void wiiext_schedule(struct wiimote_ext *ext)
231{ 231{
232 queue_work(system_nrt_wq, &ext->worker); 232 schedule_work(&ext->worker);
233} 233}
234 234
235/* 235/*
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 28058ae33d38..eaec8d7a3b73 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -152,13 +152,11 @@ static void set_timeout(unsigned long time)
152{ 152{
153 unsigned long delay; 153 unsigned long delay;
154 154
155 cancel_delayed_work(&work);
156
157 delay = time - jiffies; 155 delay = time - jiffies;
158 if ((long)delay <= 0) 156 if ((long)delay <= 0)
159 delay = 1; 157 delay = 1;
160 158
161 queue_delayed_work(addr_wq, &work, delay); 159 mod_delayed_work(addr_wq, &work, delay);
162} 160}
163 161
164static void queue_req(struct addr_req *req) 162static void queue_req(struct addr_req *req)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index b0d0bc8a6fb6..dc3fd1e8af07 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2004,7 +2004,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2004 unsigned long delay; 2004 unsigned long delay;
2005 2005
2006 if (list_empty(&mad_agent_priv->wait_list)) { 2006 if (list_empty(&mad_agent_priv->wait_list)) {
2007 __cancel_delayed_work(&mad_agent_priv->timed_work); 2007 cancel_delayed_work(&mad_agent_priv->timed_work);
2008 } else { 2008 } else {
2009 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 2009 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2010 struct ib_mad_send_wr_private, 2010 struct ib_mad_send_wr_private,
@@ -2013,13 +2013,11 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2013 if (time_after(mad_agent_priv->timeout, 2013 if (time_after(mad_agent_priv->timeout,
2014 mad_send_wr->timeout)) { 2014 mad_send_wr->timeout)) {
2015 mad_agent_priv->timeout = mad_send_wr->timeout; 2015 mad_agent_priv->timeout = mad_send_wr->timeout;
2016 __cancel_delayed_work(&mad_agent_priv->timed_work);
2017 delay = mad_send_wr->timeout - jiffies; 2016 delay = mad_send_wr->timeout - jiffies;
2018 if ((long)delay <= 0) 2017 if ((long)delay <= 0)
2019 delay = 1; 2018 delay = 1;
2020 queue_delayed_work(mad_agent_priv->qp_info-> 2019 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2021 port_priv->wq, 2020 &mad_agent_priv->timed_work, delay);
2022 &mad_agent_priv->timed_work, delay);
2023 } 2021 }
2024 } 2022 }
2025} 2023}
@@ -2052,11 +2050,9 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2052 list_add(&mad_send_wr->agent_list, list_item); 2050 list_add(&mad_send_wr->agent_list, list_item);
2053 2051
2054 /* Reschedule a work item if we have a shorter timeout */ 2052 /* Reschedule a work item if we have a shorter timeout */
2055 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2053 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2056 __cancel_delayed_work(&mad_agent_priv->timed_work); 2054 mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2057 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2055 &mad_agent_priv->timed_work, delay);
2058 &mad_agent_priv->timed_work, delay);
2059 }
2060} 2056}
2061 2057
2062void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr, 2058void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index d42c9f435b1b..9e0895b45eb8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2679,11 +2679,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2679 } 2679 }
2680 } 2680 }
2681 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) { 2681 if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_SFP_D) {
2682 if (nesdev->link_recheck)
2683 cancel_delayed_work(&nesdev->work);
2684 nesdev->link_recheck = 1; 2682 nesdev->link_recheck = 1;
2685 schedule_delayed_work(&nesdev->work, 2683 mod_delayed_work(system_wq, &nesdev->work,
2686 NES_LINK_RECHECK_DELAY); 2684 NES_LINK_RECHECK_DELAY);
2687 } 2685 }
2688 } 2686 }
2689 2687
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index f3a3ecf8d09e..e43f6e41a6bd 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -243,10 +243,9 @@ static int nes_netdev_open(struct net_device *netdev)
243 243
244 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); 244 spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
245 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) { 245 if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_SFP_D) {
246 if (nesdev->link_recheck)
247 cancel_delayed_work(&nesdev->work);
248 nesdev->link_recheck = 1; 246 nesdev->link_recheck = 1;
249 schedule_delayed_work(&nesdev->work, NES_LINK_RECHECK_DELAY); 247 mod_delayed_work(system_wq, &nesdev->work,
248 NES_LINK_RECHECK_DELAY);
250 } 249 }
251 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); 250 spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
252 251
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index e7a5e36e1203..76b7d430d03a 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -156,8 +156,7 @@ static irqreturn_t qt2160_irq(int irq, void *_qt2160)
156 156
157 spin_lock_irqsave(&qt2160->lock, flags); 157 spin_lock_irqsave(&qt2160->lock, flags);
158 158
159 __cancel_delayed_work(&qt2160->dwork); 159 mod_delayed_work(system_wq, &qt2160->dwork, 0);
160 schedule_delayed_work(&qt2160->dwork, 0);
161 160
162 spin_unlock_irqrestore(&qt2160->lock, flags); 161 spin_unlock_irqrestore(&qt2160->lock, flags);
163 162
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index f14675702c0f..063a174d3a88 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -376,12 +376,7 @@ static void synaptics_i2c_reschedule_work(struct synaptics_i2c *touch,
376 376
377 spin_lock_irqsave(&touch->lock, flags); 377 spin_lock_irqsave(&touch->lock, flags);
378 378
379 /* 379 mod_delayed_work(system_wq, &touch->dwork, delay);
380 * If work is already scheduled then subsequent schedules will not
381 * change the scheduled time that's why we have to cancel it first.
382 */
383 __cancel_delayed_work(&touch->dwork);
384 schedule_delayed_work(&touch->dwork, delay);
385 380
386 spin_unlock_irqrestore(&touch->lock, flags); 381 spin_unlock_irqrestore(&touch->lock, flags);
387} 382}
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index e83410721e38..52abb98a8ae5 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -221,7 +221,7 @@ static void wm831x_ts_input_close(struct input_dev *idev)
221 synchronize_irq(wm831x_ts->pd_irq); 221 synchronize_irq(wm831x_ts->pd_irq);
222 222
223 /* Make sure the IRQ completion work is quiesced */ 223 /* Make sure the IRQ completion work is quiesced */
224 flush_work_sync(&wm831x_ts->pd_data_work); 224 flush_work(&wm831x_ts->pd_data_work);
225 225
226 /* If we ended up with the pen down then make sure we revert back 226 /* If we ended up with the pen down then make sure we revert back
227 * to pen detection state for the next time we start up. 227 * to pen detection state for the next time we start up.
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
index 2602be23f341..84b4b0f7eb99 100644
--- a/drivers/isdn/mISDN/hwchannel.c
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -116,7 +116,7 @@ mISDN_freedchannel(struct dchannel *ch)
116 } 116 }
117 skb_queue_purge(&ch->squeue); 117 skb_queue_purge(&ch->squeue);
118 skb_queue_purge(&ch->rqueue); 118 skb_queue_purge(&ch->rqueue);
119 flush_work_sync(&ch->workq); 119 flush_work(&ch->workq);
120 return 0; 120 return 0;
121} 121}
122EXPORT_SYMBOL(mISDN_freedchannel); 122EXPORT_SYMBOL(mISDN_freedchannel);
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
index f56b6e7ffdac..f6837b99908c 100644
--- a/drivers/leds/leds-lm3533.c
+++ b/drivers/leds/leds-lm3533.c
@@ -737,7 +737,7 @@ err_sysfs_remove:
737 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); 737 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
738err_unregister: 738err_unregister:
739 led_classdev_unregister(&led->cdev); 739 led_classdev_unregister(&led->cdev);
740 flush_work_sync(&led->work); 740 flush_work(&led->work);
741 741
742 return ret; 742 return ret;
743} 743}
@@ -751,7 +751,7 @@ static int __devexit lm3533_led_remove(struct platform_device *pdev)
751 lm3533_ctrlbank_disable(&led->cb); 751 lm3533_ctrlbank_disable(&led->cb);
752 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group); 752 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
753 led_classdev_unregister(&led->cdev); 753 led_classdev_unregister(&led->cdev);
754 flush_work_sync(&led->work); 754 flush_work(&led->work);
755 755
756 return 0; 756 return 0;
757} 757}
@@ -765,7 +765,7 @@ static void lm3533_led_shutdown(struct platform_device *pdev)
765 765
766 lm3533_ctrlbank_disable(&led->cb); 766 lm3533_ctrlbank_disable(&led->cb);
767 lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */ 767 lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
768 flush_work_sync(&led->work); 768 flush_work(&led->work);
769} 769}
770 770
771static struct platform_driver lm3533_led_driver = { 771static struct platform_driver lm3533_led_driver = {
diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c
index 0ade6ebfc914..64009a176651 100644
--- a/drivers/leds/leds-lp8788.c
+++ b/drivers/leds/leds-lp8788.c
@@ -172,7 +172,7 @@ static int __devexit lp8788_led_remove(struct platform_device *pdev)
172 struct lp8788_led *led = platform_get_drvdata(pdev); 172 struct lp8788_led *led = platform_get_drvdata(pdev);
173 173
174 led_classdev_unregister(&led->led_dev); 174 led_classdev_unregister(&led->led_dev);
175 flush_work_sync(&led->work); 175 flush_work(&led->work);
176 176
177 return 0; 177 return 0;
178} 178}
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 918d4baff1c7..4c62113f7a77 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -275,7 +275,7 @@ static int wm8350_led_remove(struct platform_device *pdev)
275 struct wm8350_led *led = platform_get_drvdata(pdev); 275 struct wm8350_led *led = platform_get_drvdata(pdev);
276 276
277 led_classdev_unregister(&led->cdev); 277 led_classdev_unregister(&led->cdev);
278 flush_work_sync(&led->work); 278 flush_work(&led->work);
279 wm8350_led_disable(led); 279 wm8350_led_disable(led);
280 regulator_put(led->dcdc); 280 regulator_put(led->dcdc);
281 regulator_put(led->isink); 281 regulator_put(led->isink);
diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 5c6a2d876562..36a4fdddd64a 100644
--- a/drivers/macintosh/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
@@ -226,7 +226,7 @@ void ams_sensor_detach(void)
226 * We do this after ams_info.exit(), because an interrupt might 226 * We do this after ams_info.exit(), because an interrupt might
227 * have arrived before disabling them. 227 * have arrived before disabling them.
228 */ 228 */
229 flush_work_sync(&ams_info.worker); 229 flush_work(&ams_info.worker);
230 230
231 /* Remove device */ 231 /* Remove device */
232 of_device_unregister(ams_info.of_dev); 232 of_device_unregister(ams_info.of_dev);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 034233eefc82..d778563a4ffd 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -944,7 +944,7 @@ static void flush_multipath_work(struct multipath *m)
944 flush_workqueue(kmpath_handlerd); 944 flush_workqueue(kmpath_handlerd);
945 multipath_wait_for_pg_init_completion(m); 945 multipath_wait_for_pg_init_completion(m);
946 flush_workqueue(kmultipathd); 946 flush_workqueue(kmultipathd);
947 flush_work_sync(&m->trigger_event); 947 flush_work(&m->trigger_event);
948} 948}
949 949
950static void multipath_dtr(struct dm_target *ti) 950static void multipath_dtr(struct dm_target *ti)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index bc5ddba8045b..fd61f98ee1f6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1146,7 +1146,7 @@ static void mirror_dtr(struct dm_target *ti)
1146 1146
1147 del_timer_sync(&ms->timer); 1147 del_timer_sync(&ms->timer);
1148 flush_workqueue(ms->kmirrord_wq); 1148 flush_workqueue(ms->kmirrord_wq);
1149 flush_work_sync(&ms->trigger_event); 1149 flush_work(&ms->trigger_event);
1150 dm_kcopyd_client_destroy(ms->kcopyd_client); 1150 dm_kcopyd_client_destroy(ms->kcopyd_client);
1151 destroy_workqueue(ms->kmirrord_wq); 1151 destroy_workqueue(ms->kmirrord_wq);
1152 free_context(ms, ti, ms->nr_mirrors); 1152 free_context(ms, ti, ms->nr_mirrors);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a087bf2a8d66..e2f876539743 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -199,7 +199,7 @@ static void stripe_dtr(struct dm_target *ti)
199 for (i = 0; i < sc->stripes; i++) 199 for (i = 0; i < sc->stripes; i++)
200 dm_put_device(ti, sc->stripe[i].dev); 200 dm_put_device(ti, sc->stripe[i].dev);
201 201
202 flush_work_sync(&sc->trigger_event); 202 flush_work(&sc->trigger_event);
203 kfree(sc); 203 kfree(sc);
204} 204}
205 205
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8766ce8c354d..c2117688aa23 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1329,8 +1329,8 @@ static int dvb_net_remove_if(struct dvb_net *dvbnet, unsigned long num)
1329 return -EBUSY; 1329 return -EBUSY;
1330 1330
1331 dvb_net_stop(net); 1331 dvb_net_stop(net);
1332 flush_work_sync(&priv->set_multicast_list_wq); 1332 flush_work(&priv->set_multicast_list_wq);
1333 flush_work_sync(&priv->restart_net_feed_wq); 1333 flush_work(&priv->restart_net_feed_wq);
1334 printk("dvb_net: removed network interface %s\n", net->name); 1334 printk("dvb_net: removed network interface %s\n", net->name);
1335 unregister_netdev(net); 1335 unregister_netdev(net);
1336 dvbnet->state[num]=0; 1336 dvbnet->state[num]=0;
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
index 71ce52875c38..909ff54868a3 100644
--- a/drivers/media/dvb/mantis/mantis_evm.c
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -111,7 +111,7 @@ void mantis_evmgr_exit(struct mantis_ca *ca)
111 struct mantis_pci *mantis = ca->ca_priv; 111 struct mantis_pci *mantis = ca->ca_priv;
112 112
113 dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting"); 113 dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
114 flush_work_sync(&ca->hif_evm_work); 114 flush_work(&ca->hif_evm_work);
115 mantis_hif_exit(ca); 115 mantis_hif_exit(ca);
116 mantis_pcmcia_exit(ca); 116 mantis_pcmcia_exit(ca);
117} 117}
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
index 18340dafa426..85e977861b4a 100644
--- a/drivers/media/dvb/mantis/mantis_uart.c
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -183,6 +183,6 @@ void mantis_uart_exit(struct mantis_pci *mantis)
183{ 183{
184 /* disable interrupt */ 184 /* disable interrupt */
185 mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL); 185 mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
186 flush_work_sync(&mantis->uart_work); 186 flush_work(&mantis->uart_work);
187} 187}
188EXPORT_SYMBOL_GPL(mantis_uart_exit); 188EXPORT_SYMBOL_GPL(mantis_uart_exit);
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index b58ff87db771..2ce7179a3864 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -196,7 +196,7 @@ static void request_modules(struct bttv *dev)
196 196
197static void flush_request_modules(struct bttv *dev) 197static void flush_request_modules(struct bttv *dev)
198{ 198{
199 flush_work_sync(&dev->request_module_wk); 199 flush_work(&dev->request_module_wk);
200} 200}
201#else 201#else
202#define request_modules(dev) 202#define request_modules(dev)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 7e5ffd6f5178..75c890907920 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -272,7 +272,7 @@ static void request_modules(struct cx18 *dev)
272 272
273static void flush_request_modules(struct cx18 *dev) 273static void flush_request_modules(struct cx18 *dev)
274{ 274{
275 flush_work_sync(&dev->request_module_wk); 275 flush_work(&dev->request_module_wk);
276} 276}
277#else 277#else
278#define request_modules(dev) 278#define request_modules(dev)
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 02d4d36735d3..b84ebc54d91b 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1002,7 +1002,7 @@ static void request_modules(struct cx231xx *dev)
1002 1002
1003static void flush_request_modules(struct cx231xx *dev) 1003static void flush_request_modules(struct cx231xx *dev)
1004{ 1004{
1005 flush_work_sync(&dev->request_module_wk); 1005 flush_work(&dev->request_module_wk);
1006} 1006}
1007#else 1007#else
1008#define request_modules(dev) 1008#define request_modules(dev)
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index ce765e3f77bd..bcbf7faf1bab 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -231,9 +231,9 @@ static void cx23885_input_ir_stop(struct cx23885_dev *dev)
231 v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params); 231 v4l2_subdev_call(dev->sd_ir, ir, rx_s_parameters, &params);
232 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params); 232 v4l2_subdev_call(dev->sd_ir, ir, rx_g_parameters, &params);
233 } 233 }
234 flush_work_sync(&dev->cx25840_work); 234 flush_work(&dev->cx25840_work);
235 flush_work_sync(&dev->ir_rx_work); 235 flush_work(&dev->ir_rx_work);
236 flush_work_sync(&dev->ir_tx_work); 236 flush_work(&dev->ir_tx_work);
237} 237}
238 238
239static void cx23885_input_ir_close(struct rc_dev *rc) 239static void cx23885_input_ir_close(struct rc_dev *rc)
diff --git a/drivers/media/video/cx88/cx88-mpeg.c b/drivers/media/video/cx88/cx88-mpeg.c
index cd5386ee210c..c04fb618e10b 100644
--- a/drivers/media/video/cx88/cx88-mpeg.c
+++ b/drivers/media/video/cx88/cx88-mpeg.c
@@ -70,7 +70,7 @@ static void request_modules(struct cx8802_dev *dev)
70 70
71static void flush_request_modules(struct cx8802_dev *dev) 71static void flush_request_modules(struct cx8802_dev *dev)
72{ 72{
73 flush_work_sync(&dev->request_module_wk); 73 flush_work(&dev->request_module_wk);
74} 74}
75#else 75#else
76#define request_modules(dev) 76#define request_modules(dev)
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index ca62b9981380..f7831e73f077 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2900,7 +2900,7 @@ static void request_modules(struct em28xx *dev)
2900 2900
2901static void flush_request_modules(struct em28xx *dev) 2901static void flush_request_modules(struct em28xx *dev)
2902{ 2902{
2903 flush_work_sync(&dev->request_module_wk); 2903 flush_work(&dev->request_module_wk);
2904} 2904}
2905#else 2905#else
2906#define request_modules(dev) 2906#define request_modules(dev)
diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
index e5015b0d5508..8d7283bbd431 100644
--- a/drivers/media/video/omap24xxcam.c
+++ b/drivers/media/video/omap24xxcam.c
@@ -1198,7 +1198,7 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
1198 1198
1199 atomic_inc(&cam->reset_disable); 1199 atomic_inc(&cam->reset_disable);
1200 1200
1201 flush_work_sync(&cam->sensor_reset_work); 1201 flush_work(&cam->sensor_reset_work);
1202 1202
1203 rval = videobuf_streamoff(q); 1203 rval = videobuf_streamoff(q);
1204 if (!rval) { 1204 if (!rval) {
@@ -1512,7 +1512,7 @@ static int omap24xxcam_release(struct file *file)
1512 1512
1513 atomic_inc(&cam->reset_disable); 1513 atomic_inc(&cam->reset_disable);
1514 1514
1515 flush_work_sync(&cam->sensor_reset_work); 1515 flush_work(&cam->sensor_reset_work);
1516 1516
1517 /* stop streaming capture */ 1517 /* stop streaming capture */
1518 videobuf_streamoff(&fh->vbq); 1518 videobuf_streamoff(&fh->vbq);
@@ -1536,7 +1536,7 @@ static int omap24xxcam_release(struct file *file)
1536 * not be scheduled anymore since streaming is already 1536 * not be scheduled anymore since streaming is already
1537 * disabled.) 1537 * disabled.)
1538 */ 1538 */
1539 flush_work_sync(&cam->sensor_reset_work); 1539 flush_work(&cam->sensor_reset_work);
1540 1540
1541 mutex_lock(&cam->mutex); 1541 mutex_lock(&cam->mutex);
1542 if (atomic_dec_return(&cam->users) == 0) { 1542 if (atomic_dec_return(&cam->users) == 0) {
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 5fbb4e49495c..f2b37e05b964 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -170,7 +170,7 @@ static void request_submodules(struct saa7134_dev *dev)
170 170
171static void flush_request_submodules(struct saa7134_dev *dev) 171static void flush_request_submodules(struct saa7134_dev *dev)
172{ 172{
173 flush_work_sync(&dev->request_module_wk); 173 flush_work(&dev->request_module_wk);
174} 174}
175 175
176#else 176#else
diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c
index dde361a9194e..4df79c656909 100644
--- a/drivers/media/video/saa7134/saa7134-empress.c
+++ b/drivers/media/video/saa7134/saa7134-empress.c
@@ -556,7 +556,7 @@ static int empress_fini(struct saa7134_dev *dev)
556 556
557 if (NULL == dev->empress_dev) 557 if (NULL == dev->empress_dev)
558 return 0; 558 return 0;
559 flush_work_sync(&dev->empress_workqueue); 559 flush_work(&dev->empress_workqueue);
560 video_unregister_device(dev->empress_dev); 560 video_unregister_device(dev->empress_dev);
561 dev->empress_dev = NULL; 561 dev->empress_dev = NULL;
562 return 0; 562 return 0;
diff --git a/drivers/media/video/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index 034659b13174..307d8c5fb7cd 100644
--- a/drivers/media/video/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -1074,7 +1074,7 @@ static void request_modules(struct tm6000_core *dev)
1074 1074
1075static void flush_request_modules(struct tm6000_core *dev) 1075static void flush_request_modules(struct tm6000_core *dev)
1076{ 1076{
1077 flush_work_sync(&dev->request_module_wk); 1077 flush_work(&dev->request_module_wk);
1078} 1078}
1079#else 1079#else
1080#define request_modules(dev) 1080#define request_modules(dev)
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index cb4910ac4d12..55d589981412 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1259,7 +1259,7 @@ static int menelaus_probe(struct i2c_client *client,
1259 return 0; 1259 return 0;
1260fail2: 1260fail2:
1261 free_irq(client->irq, menelaus); 1261 free_irq(client->irq, menelaus);
1262 flush_work_sync(&menelaus->work); 1262 flush_work(&menelaus->work);
1263fail1: 1263fail1:
1264 kfree(menelaus); 1264 kfree(menelaus);
1265 return err; 1265 return err;
@@ -1270,7 +1270,7 @@ static int __exit menelaus_remove(struct i2c_client *client)
1270 struct menelaus_chip *menelaus = i2c_get_clientdata(client); 1270 struct menelaus_chip *menelaus = i2c_get_clientdata(client);
1271 1271
1272 free_irq(client->irq, menelaus); 1272 free_irq(client->irq, menelaus);
1273 flush_work_sync(&menelaus->work); 1273 flush_work(&menelaus->work);
1274 kfree(menelaus); 1274 kfree(menelaus);
1275 the_menelaus = NULL; 1275 the_menelaus = NULL;
1276 return 0; 1276 return 0;
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c
index df03dd3bd0e2..6a7710603a90 100644
--- a/drivers/misc/ioc4.c
+++ b/drivers/misc/ioc4.c
@@ -487,7 +487,7 @@ static void __exit
487ioc4_exit(void) 487ioc4_exit(void)
488{ 488{
489 /* Ensure ioc4_load_modules() has completed before exiting */ 489 /* Ensure ioc4_load_modules() has completed before exiting */
490 flush_work_sync(&ioc4_load_modules_work); 490 flush_work(&ioc4_load_modules_work);
491 pci_unregister_driver(&ioc4_driver); 491 pci_unregister_driver(&ioc4_driver);
492} 492}
493 493
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 597f189b4427..ee2e16b17017 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -204,8 +204,8 @@ void mmc_host_clk_release(struct mmc_host *host)
204 host->clk_requests--; 204 host->clk_requests--;
205 if (mmc_host_may_gate_card(host->card) && 205 if (mmc_host_may_gate_card(host->card) &&
206 !host->clk_requests) 206 !host->clk_requests)
207 queue_delayed_work(system_nrt_wq, &host->clk_gate_work, 207 schedule_delayed_work(&host->clk_gate_work,
208 msecs_to_jiffies(host->clkgate_delay)); 208 msecs_to_jiffies(host->clkgate_delay));
209 spin_unlock_irqrestore(&host->clk_lock, flags); 209 spin_unlock_irqrestore(&host->clk_lock, flags);
210} 210}
211 211
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 551e316e4454..438737a1f59a 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -387,8 +387,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
387 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 387 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
388 388
389 cxt->mtd = NULL; 389 cxt->mtd = NULL;
390 flush_work_sync(&cxt->work_erase); 390 flush_work(&cxt->work_erase);
391 flush_work_sync(&cxt->work_write); 391 flush_work(&cxt->work_write);
392} 392}
393 393
394 394
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 875bbb999aa2..9c9f3260344a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1394,7 +1394,7 @@ static int offload_close(struct t3cdev *tdev)
1394 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1394 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1395 1395
1396 /* Flush work scheduled while releasing TIDs */ 1396 /* Flush work scheduled while releasing TIDs */
1397 flush_work_sync(&td->tid_release_task); 1397 flush_work(&td->tid_release_task);
1398 1398
1399 tdev->lldev = NULL; 1399 tdev->lldev = NULL;
1400 cxgb3_set_dummy_ops(tdev); 1400 cxgb3_set_dummy_ops(tdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index 34ee09bae36e..094773d88f80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -139,5 +139,5 @@ void mlx4_sense_init(struct mlx4_dev *dev)
139 for (port = 1; port <= dev->caps.num_ports; port++) 139 for (port = 1; port <= dev->caps.num_ports; port++)
140 sense->do_sense_port[port] = 1; 140 sense->do_sense_port[port] = 1;
141 141
142 INIT_DELAYED_WORK_DEFERRABLE(&sense->sense_poll, mlx4_sense_port); 142 INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
143} 143}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index cfa71a30dc8d..3e5b7509502c 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -3521,7 +3521,7 @@ static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3521 3521
3522 strncpy(buf, dev->name, IFNAMSIZ); 3522 strncpy(buf, dev->name, IFNAMSIZ);
3523 3523
3524 flush_work_sync(&vdev->reset_task); 3524 flush_work(&vdev->reset_task);
3525 3525
3526 /* in 2.6 will call stop() if device is up */ 3526 /* in 2.6 will call stop() if device is up */
3527 unregister_netdev(dev); 3527 unregister_netdev(dev);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index ce4df61b4b56..c8251be104d6 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3890,7 +3890,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3890 schedule_work(&cp->reset_task); 3890 schedule_work(&cp->reset_task);
3891#endif 3891#endif
3892 3892
3893 flush_work_sync(&cp->reset_task); 3893 flush_work(&cp->reset_task);
3894 return 0; 3894 return 0;
3895} 3895}
3896 3896
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 3208dca66758..8419bf385e08 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9927,7 +9927,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9927 if (!netif_running(dev)) 9927 if (!netif_running(dev))
9928 return 0; 9928 return 0;
9929 9929
9930 flush_work_sync(&np->reset_task); 9930 flush_work(&np->reset_task);
9931 niu_netif_stop(np); 9931 niu_netif_stop(np);
9932 9932
9933 del_timer_sync(&np->timer); 9933 del_timer_sync(&np->timer);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 83d2b0c34c5e..9650c413e11f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -521,7 +521,7 @@ static void refill_work(struct work_struct *work)
521 /* In theory, this can happen: if we don't get any buffers in 521 /* In theory, this can happen: if we don't get any buffers in
522 * we will *never* try to fill again. */ 522 * we will *never* try to fill again. */
523 if (still_empty) 523 if (still_empty)
524 queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); 524 schedule_delayed_work(&vi->refill, HZ/2);
525} 525}
526 526
527static int virtnet_poll(struct napi_struct *napi, int budget) 527static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -540,7 +540,7 @@ again:
540 540
541 if (vi->num < vi->max / 2) { 541 if (vi->num < vi->max / 2) {
542 if (!try_fill_recv(vi, GFP_ATOMIC)) 542 if (!try_fill_recv(vi, GFP_ATOMIC))
543 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 543 schedule_delayed_work(&vi->refill, 0);
544 } 544 }
545 545
546 /* Out of packets? */ 546 /* Out of packets? */
@@ -745,7 +745,7 @@ static int virtnet_open(struct net_device *dev)
745 745
746 /* Make sure we have some buffers: if oom use wq. */ 746 /* Make sure we have some buffers: if oom use wq. */
747 if (!try_fill_recv(vi, GFP_KERNEL)) 747 if (!try_fill_recv(vi, GFP_KERNEL))
748 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 748 schedule_delayed_work(&vi->refill, 0);
749 749
750 virtnet_napi_enable(vi); 750 virtnet_napi_enable(vi);
751 return 0; 751 return 0;
@@ -1020,7 +1020,7 @@ static void virtnet_config_changed(struct virtio_device *vdev)
1020{ 1020{
1021 struct virtnet_info *vi = vdev->priv; 1021 struct virtnet_info *vi = vdev->priv;
1022 1022
1023 queue_work(system_nrt_wq, &vi->config_work); 1023 schedule_work(&vi->config_work);
1024} 1024}
1025 1025
1026static int init_vqs(struct virtnet_info *vi) 1026static int init_vqs(struct virtnet_info *vi)
@@ -1152,7 +1152,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1152 otherwise get link status from config. */ 1152 otherwise get link status from config. */
1153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1153 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1154 netif_carrier_off(dev); 1154 netif_carrier_off(dev);
1155 queue_work(system_nrt_wq, &vi->config_work); 1155 schedule_work(&vi->config_work);
1156 } else { 1156 } else {
1157 vi->status = VIRTIO_NET_S_LINK_UP; 1157 vi->status = VIRTIO_NET_S_LINK_UP;
1158 netif_carrier_on(dev); 1158 netif_carrier_on(dev);
@@ -1264,7 +1264,7 @@ static int virtnet_restore(struct virtio_device *vdev)
1264 netif_device_attach(vi->dev); 1264 netif_device_attach(vi->dev);
1265 1265
1266 if (!try_fill_recv(vi, GFP_KERNEL)) 1266 if (!try_fill_recv(vi, GFP_KERNEL))
1267 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1267 schedule_delayed_work(&vi->refill, 0);
1268 1268
1269 mutex_lock(&vi->config_lock); 1269 mutex_lock(&vi->config_lock);
1270 vi->config_enable = true; 1270 vi->config_enable = true;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index e1f410277242..c6ea995750db 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -860,10 +860,10 @@ void hostap_free_data(struct ap_data *ap)
860 return; 860 return;
861 } 861 }
862 862
863 flush_work_sync(&ap->add_sta_proc_queue); 863 flush_work(&ap->add_sta_proc_queue);
864 864
865#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 865#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
866 flush_work_sync(&ap->wds_oper_queue); 866 flush_work(&ap->wds_oper_queue);
867 if (ap->crypt) 867 if (ap->crypt)
868 ap->crypt->deinit(ap->crypt_priv); 868 ap->crypt->deinit(ap->crypt_priv);
869 ap->crypt = ap->crypt_priv = NULL; 869 ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 50f87b60b0bd..8e7000fd4414 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3311,13 +3311,13 @@ static void prism2_free_local_data(struct net_device *dev)
3311 3311
3312 unregister_netdev(local->dev); 3312 unregister_netdev(local->dev);
3313 3313
3314 flush_work_sync(&local->reset_queue); 3314 flush_work(&local->reset_queue);
3315 flush_work_sync(&local->set_multicast_list_queue); 3315 flush_work(&local->set_multicast_list_queue);
3316 flush_work_sync(&local->set_tim_queue); 3316 flush_work(&local->set_tim_queue);
3317#ifndef PRISM2_NO_STATION_MODES 3317#ifndef PRISM2_NO_STATION_MODES
3318 flush_work_sync(&local->info_queue); 3318 flush_work(&local->info_queue);
3319#endif 3319#endif
3320 flush_work_sync(&local->comms_qual_update); 3320 flush_work(&local->comms_qual_update);
3321 3321
3322 lib80211_crypt_info_free(&local->crypt_info); 3322 lib80211_crypt_info_free(&local->crypt_info);
3323 3323
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 83324b321652..534e6557e7e6 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2181,8 +2181,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2181 2181
2182 /* Make sure the RF Kill check timer is running */ 2182 /* Make sure the RF Kill check timer is running */
2183 priv->stop_rf_kill = 0; 2183 priv->stop_rf_kill = 0;
2184 cancel_delayed_work(&priv->rf_kill); 2184 mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
2185 schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ));
2186} 2185}
2187 2186
2188static void send_scan_event(void *data) 2187static void send_scan_event(void *data)
@@ -4322,9 +4321,8 @@ static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4322 "disabled by HW switch\n"); 4321 "disabled by HW switch\n");
4323 /* Make sure the RF_KILL check timer is running */ 4322 /* Make sure the RF_KILL check timer is running */
4324 priv->stop_rf_kill = 0; 4323 priv->stop_rf_kill = 0;
4325 cancel_delayed_work(&priv->rf_kill); 4324 mod_delayed_work(system_wq, &priv->rf_kill,
4326 schedule_delayed_work(&priv->rf_kill, 4325 round_jiffies_relative(HZ));
4327 round_jiffies_relative(HZ));
4328 } else 4326 } else
4329 schedule_reset(priv); 4327 schedule_reset(priv);
4330 } 4328 }
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index af83c43bcdb1..ef2b171e3514 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1164,8 +1164,7 @@ void zd_usb_reset_rx_idle_timer(struct zd_usb *usb)
1164{ 1164{
1165 struct zd_usb_rx *rx = &usb->rx; 1165 struct zd_usb_rx *rx = &usb->rx;
1166 1166
1167 cancel_delayed_work(&rx->idle_work); 1167 mod_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
1168 queue_delayed_work(zd_workqueue, &rx->idle_work, ZD_RX_IDLE_INTERVAL);
1169} 1168}
1170 1169
1171static inline void init_usb_interrupt(struct zd_usb *usb) 1170static inline void init_usb_interrupt(struct zd_usb *usb)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 52daaa816e53..9da5fe715e6a 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7685,25 +7685,15 @@ static int fan_set_speed(int speed)
7685 7685
7686static void fan_watchdog_reset(void) 7686static void fan_watchdog_reset(void)
7687{ 7687{
7688 static int fan_watchdog_active;
7689
7690 if (fan_control_access_mode == TPACPI_FAN_WR_NONE) 7688 if (fan_control_access_mode == TPACPI_FAN_WR_NONE)
7691 return; 7689 return;
7692 7690
7693 if (fan_watchdog_active)
7694 cancel_delayed_work(&fan_watchdog_task);
7695
7696 if (fan_watchdog_maxinterval > 0 && 7691 if (fan_watchdog_maxinterval > 0 &&
7697 tpacpi_lifecycle != TPACPI_LIFE_EXITING) { 7692 tpacpi_lifecycle != TPACPI_LIFE_EXITING)
7698 fan_watchdog_active = 1; 7693 mod_delayed_work(tpacpi_wq, &fan_watchdog_task,
7699 if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task, 7694 msecs_to_jiffies(fan_watchdog_maxinterval * 1000));
7700 msecs_to_jiffies(fan_watchdog_maxinterval 7695 else
7701 * 1000))) { 7696 cancel_delayed_work(&fan_watchdog_task);
7702 pr_err("failed to queue the fan watchdog, "
7703 "watchdog will not trigger\n");
7704 }
7705 } else
7706 fan_watchdog_active = 0;
7707} 7697}
7708 7698
7709static void fan_watchdog_fire(struct work_struct *ignored) 7699static void fan_watchdog_fire(struct work_struct *ignored)
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index bba3ccac72fe..3041514f4d3f 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -1018,7 +1018,7 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
1018 } 1018 }
1019 1019
1020 /* Init work for measuring temperature periodically */ 1020 /* Init work for measuring temperature periodically */
1021 INIT_DELAYED_WORK_DEFERRABLE(&di->btemp_periodic_work, 1021 INIT_DEFERRABLE_WORK(&di->btemp_periodic_work,
1022 ab8500_btemp_periodic_work); 1022 ab8500_btemp_periodic_work);
1023 1023
1024 /* Identify the battery */ 1024 /* Identify the battery */
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index d4f0c98428cb..0701dbc2b7e1 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2618,9 +2618,9 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2618 } 2618 }
2619 2619
2620 /* Init work for HW failure check */ 2620 /* Init work for HW failure check */
2621 INIT_DELAYED_WORK_DEFERRABLE(&di->check_hw_failure_work, 2621 INIT_DEFERRABLE_WORK(&di->check_hw_failure_work,
2622 ab8500_charger_check_hw_failure_work); 2622 ab8500_charger_check_hw_failure_work);
2623 INIT_DELAYED_WORK_DEFERRABLE(&di->check_usbchgnotok_work, 2623 INIT_DEFERRABLE_WORK(&di->check_usbchgnotok_work,
2624 ab8500_charger_check_usbchargernotok_work); 2624 ab8500_charger_check_usbchargernotok_work);
2625 2625
2626 /* 2626 /*
@@ -2632,10 +2632,10 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2632 * watchdog have to be kicked by the charger driver 2632 * watchdog have to be kicked by the charger driver
2633 * when the AC charger is disabled 2633 * when the AC charger is disabled
2634 */ 2634 */
2635 INIT_DELAYED_WORK_DEFERRABLE(&di->kick_wd_work, 2635 INIT_DEFERRABLE_WORK(&di->kick_wd_work,
2636 ab8500_charger_kick_watchdog_work); 2636 ab8500_charger_kick_watchdog_work);
2637 2637
2638 INIT_DELAYED_WORK_DEFERRABLE(&di->check_vbat_work, 2638 INIT_DEFERRABLE_WORK(&di->check_vbat_work,
2639 ab8500_charger_check_vbat_work); 2639 ab8500_charger_check_vbat_work);
2640 2640
2641 /* Init work for charger detection */ 2641 /* Init work for charger detection */
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index bf022255994c..5c9e7c263c38 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2516,19 +2516,19 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2516 INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work); 2516 INIT_WORK(&di->fg_acc_cur_work, ab8500_fg_acc_cur_work);
2517 2517
2518 /* Init work for reinitialising the fg algorithm */ 2518 /* Init work for reinitialising the fg algorithm */
2519 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_reinit_work, 2519 INIT_DEFERRABLE_WORK(&di->fg_reinit_work,
2520 ab8500_fg_reinit_work); 2520 ab8500_fg_reinit_work);
2521 2521
2522 /* Work delayed Queue to run the state machine */ 2522 /* Work delayed Queue to run the state machine */
2523 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_periodic_work, 2523 INIT_DEFERRABLE_WORK(&di->fg_periodic_work,
2524 ab8500_fg_periodic_work); 2524 ab8500_fg_periodic_work);
2525 2525
2526 /* Work to check low battery condition */ 2526 /* Work to check low battery condition */
2527 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_low_bat_work, 2527 INIT_DEFERRABLE_WORK(&di->fg_low_bat_work,
2528 ab8500_fg_low_bat_work); 2528 ab8500_fg_low_bat_work);
2529 2529
2530 /* Init work for HW failure check */ 2530 /* Init work for HW failure check */
2531 INIT_DELAYED_WORK_DEFERRABLE(&di->fg_check_hw_failure_work, 2531 INIT_DEFERRABLE_WORK(&di->fg_check_hw_failure_work,
2532 ab8500_fg_check_hw_failure_work); 2532 ab8500_fg_check_hw_failure_work);
2533 2533
2534 /* Initialize OVV, and other registers */ 2534 /* Initialize OVV, and other registers */
diff --git a/drivers/power/abx500_chargalg.c b/drivers/power/abx500_chargalg.c
index 804b88c760d6..4d302803ffcc 100644
--- a/drivers/power/abx500_chargalg.c
+++ b/drivers/power/abx500_chargalg.c
@@ -1848,9 +1848,9 @@ static int __devinit abx500_chargalg_probe(struct platform_device *pdev)
1848 } 1848 }
1849 1849
1850 /* Init work for chargalg */ 1850 /* Init work for chargalg */
1851 INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_periodic_work, 1851 INIT_DEFERRABLE_WORK(&di->chargalg_periodic_work,
1852 abx500_chargalg_periodic_work); 1852 abx500_chargalg_periodic_work);
1853 INIT_DELAYED_WORK_DEFERRABLE(&di->chargalg_wd_work, 1853 INIT_DEFERRABLE_WORK(&di->chargalg_wd_work,
1854 abx500_chargalg_wd_work); 1854 abx500_chargalg_wd_work);
1855 1855
1856 /* Init work for chargalg */ 1856 /* Init work for chargalg */
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 526e5c931294..7ff83cf43c8c 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -509,9 +509,8 @@ static void _setup_polling(struct work_struct *work)
509 if (!delayed_work_pending(&cm_monitor_work) || 509 if (!delayed_work_pending(&cm_monitor_work) ||
510 (delayed_work_pending(&cm_monitor_work) && 510 (delayed_work_pending(&cm_monitor_work) &&
511 time_after(next_polling, _next_polling))) { 511 time_after(next_polling, _next_polling))) {
512 cancel_delayed_work_sync(&cm_monitor_work);
513 next_polling = jiffies + polling_jiffy; 512 next_polling = jiffies + polling_jiffy;
514 queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy); 513 mod_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
515 } 514 }
516 515
517out: 516out:
@@ -546,10 +545,8 @@ static void fullbatt_handler(struct charger_manager *cm)
546 if (cm_suspended) 545 if (cm_suspended)
547 device_set_wakeup_capable(cm->dev, true); 546 device_set_wakeup_capable(cm->dev, true);
548 547
549 if (delayed_work_pending(&cm->fullbatt_vchk_work)) 548 mod_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
550 cancel_delayed_work(&cm->fullbatt_vchk_work); 549 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
551 queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
552 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
553 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies( 550 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
554 desc->fullbatt_vchkdrop_ms); 551 desc->fullbatt_vchkdrop_ms);
555 552
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 74c6b23aeabf..b19bfe400f8c 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -290,7 +290,7 @@ static struct gpio collie_batt_gpios[] = {
290static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state) 290static int collie_bat_suspend(struct ucb1x00_dev *dev, pm_message_t state)
291{ 291{
292 /* flush all pending status updates */ 292 /* flush all pending status updates */
293 flush_work_sync(&bat_work); 293 flush_work(&bat_work);
294 return 0; 294 return 0;
295} 295}
296 296
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index 076e211a40b7..704e652072be 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -355,8 +355,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
355 355
356 dev_dbg(di->dev, "%s\n", __func__); 356 dev_dbg(di->dev, "%s\n", __func__);
357 357
358 cancel_delayed_work(&di->monitor_work); 358 mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
359 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
360} 359}
361 360
362 361
@@ -401,8 +400,7 @@ static void ds2760_battery_set_charged(struct power_supply *psy)
401 400
402 /* postpone the actual work by 20 secs. This is for debouncing GPIO 401 /* postpone the actual work by 20 secs. This is for debouncing GPIO
403 * signals and to let the current value settle. See AN4188. */ 402 * signals and to let the current value settle. See AN4188. */
404 cancel_delayed_work(&di->set_charged_work); 403 mod_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
405 queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20);
406} 404}
407 405
408static int ds2760_battery_get_property(struct power_supply *psy, 406static int ds2760_battery_get_property(struct power_supply *psy,
@@ -616,8 +614,7 @@ static int ds2760_battery_resume(struct platform_device *pdev)
616 di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN; 614 di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN;
617 power_supply_changed(&di->bat); 615 power_supply_changed(&di->bat);
618 616
619 cancel_delayed_work(&di->monitor_work); 617 mod_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
620 queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ);
621 618
622 return 0; 619 return 0;
623} 620}
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 8dbc7bfaab14..ffbed5e5b945 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -173,16 +173,14 @@ static void jz_battery_external_power_changed(struct power_supply *psy)
173{ 173{
174 struct jz_battery *jz_battery = psy_to_jz_battery(psy); 174 struct jz_battery *jz_battery = psy_to_jz_battery(psy);
175 175
176 cancel_delayed_work(&jz_battery->work); 176 mod_delayed_work(system_wq, &jz_battery->work, 0);
177 schedule_delayed_work(&jz_battery->work, 0);
178} 177}
179 178
180static irqreturn_t jz_battery_charge_irq(int irq, void *data) 179static irqreturn_t jz_battery_charge_irq(int irq, void *data)
181{ 180{
182 struct jz_battery *jz_battery = data; 181 struct jz_battery *jz_battery = data;
183 182
184 cancel_delayed_work(&jz_battery->work); 183 mod_delayed_work(system_wq, &jz_battery->work, 0);
185 schedule_delayed_work(&jz_battery->work, 0);
186 184
187 return IRQ_HANDLED; 185 return IRQ_HANDLED;
188} 186}
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c284143cfcd7..58e67830143c 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -232,7 +232,7 @@ static int __devinit max17040_probe(struct i2c_client *client,
232 max17040_reset(client); 232 max17040_reset(client);
233 max17040_get_version(client); 233 max17040_get_version(client);
234 234
235 INIT_DELAYED_WORK_DEFERRABLE(&chip->work, max17040_work); 235 INIT_DEFERRABLE_WORK(&chip->work, max17040_work);
236 schedule_delayed_work(&chip->work, MAX17040_DELAY); 236 schedule_delayed_work(&chip->work, MAX17040_DELAY);
237 237
238 return 0; 238 return 0;
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 28bbe7e094e3..51199b5ce221 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -327,7 +327,7 @@ static struct gpio tosa_bat_gpios[] = {
327static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state) 327static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
328{ 328{
329 /* flush all pending status updates */ 329 /* flush all pending status updates */
330 flush_work_sync(&bat_work); 330 flush_work(&bat_work);
331 return 0; 331 return 0;
332} 332}
333 333
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index d2d4c08c681c..1245fe1f48c3 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -146,7 +146,7 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data)
146#ifdef CONFIG_PM 146#ifdef CONFIG_PM
147static int wm97xx_bat_suspend(struct device *dev) 147static int wm97xx_bat_suspend(struct device *dev)
148{ 148{
149 flush_work_sync(&bat_work); 149 flush_work(&bat_work);
150 return 0; 150 return 0;
151} 151}
152 152
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index 8c9a607ea77a..5757d0d6782f 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -276,7 +276,7 @@ static int z2_batt_suspend(struct device *dev)
276 struct i2c_client *client = to_i2c_client(dev); 276 struct i2c_client *client = to_i2c_client(dev);
277 struct z2_charger *charger = i2c_get_clientdata(client); 277 struct z2_charger *charger = i2c_get_clientdata(client);
278 278
279 flush_work_sync(&charger->bat_work); 279 flush_work(&charger->bat_work);
280 return 0; 280 return 0;
281} 281}
282 282
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2e0352dc26bd..5c4829cba6a6 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3476,7 +3476,7 @@ void regulator_unregister(struct regulator_dev *rdev)
3476 regulator_put(rdev->supply); 3476 regulator_put(rdev->supply);
3477 mutex_lock(&regulator_list_mutex); 3477 mutex_lock(&regulator_list_mutex);
3478 debugfs_remove_recursive(rdev->debugfs); 3478 debugfs_remove_recursive(rdev->debugfs);
3479 flush_work_sync(&rdev->disable_work.work); 3479 flush_work(&rdev->disable_work.work);
3480 WARN_ON(rdev->open_count); 3480 WARN_ON(rdev->open_count);
3481 unset_regulator_supplies(rdev); 3481 unset_regulator_supplies(rdev);
3482 list_del(&rdev->list); 3482 list_del(&rdev->list);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index def24a1079ad..33c52bc2c7b4 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -999,7 +999,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
999 int poll_count = 0; 999 int poll_count = 0;
1000 arcmsr_free_sysfs_attr(acb); 1000 arcmsr_free_sysfs_attr(acb);
1001 scsi_remove_host(host); 1001 scsi_remove_host(host);
1002 flush_work_sync(&acb->arcmsr_do_message_isr_bh); 1002 flush_work(&acb->arcmsr_do_message_isr_bh);
1003 del_timer_sync(&acb->eternal_timer); 1003 del_timer_sync(&acb->eternal_timer);
1004 arcmsr_disable_outbound_ints(acb); 1004 arcmsr_disable_outbound_ints(acb);
1005 arcmsr_stop_adapter_bgrb(acb); 1005 arcmsr_stop_adapter_bgrb(acb);
@@ -1045,7 +1045,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
1045 (struct AdapterControlBlock *)host->hostdata; 1045 (struct AdapterControlBlock *)host->hostdata;
1046 del_timer_sync(&acb->eternal_timer); 1046 del_timer_sync(&acb->eternal_timer);
1047 arcmsr_disable_outbound_ints(acb); 1047 arcmsr_disable_outbound_ints(acb);
1048 flush_work_sync(&acb->arcmsr_do_message_isr_bh); 1048 flush_work(&acb->arcmsr_do_message_isr_bh);
1049 arcmsr_stop_adapter_bgrb(acb); 1049 arcmsr_stop_adapter_bgrb(acb);
1050 arcmsr_flush_adapter_cache(acb); 1050 arcmsr_flush_adapter_cache(acb);
1051} 1051}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0a2c5a8ebb82..45e192a51005 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9020,7 +9020,7 @@ static void __ipr_remove(struct pci_dev *pdev)
9020 9020
9021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); 9021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); 9022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9023 flush_work_sync(&ioa_cfg->work_q); 9023 flush_work(&ioa_cfg->work_q);
9024 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); 9024 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9025 9025
9026 spin_lock(&ipr_driver_lock); 9026 spin_lock(&ipr_driver_lock);
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index ea8a0b47d66d..af763eab2039 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -5459,7 +5459,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev)
5459 pmcraid_shutdown(pdev); 5459 pmcraid_shutdown(pdev);
5460 5460
5461 pmcraid_disable_interrupts(pinstance, ~0); 5461 pmcraid_disable_interrupts(pinstance, ~0);
5462 flush_work_sync(&pinstance->worker_q); 5462 flush_work(&pinstance->worker_q);
5463 5463
5464 pmcraid_kill_tasklets(pinstance); 5464 pmcraid_kill_tasklets(pinstance);
5465 pmcraid_unregister_interrupt_handler(pinstance); 5465 pmcraid_unregister_interrupt_handler(pinstance);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5b30132960c7..bddc97c5c8e9 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -969,7 +969,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
969 spin_unlock_irqrestore(&ha->hardware_lock, flags); 969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970 mutex_unlock(&ha->tgt.tgt_mutex); 970 mutex_unlock(&ha->tgt.tgt_mutex);
971 971
972 flush_delayed_work_sync(&tgt->sess_del_work); 972 flush_delayed_work(&tgt->sess_del_work);
973 973
974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, 974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
975 "Waiting for sess works (tgt %p)", tgt); 975 "Waiting for sess works (tgt %p)", tgt);
diff --git a/drivers/staging/ccg/u_ether.c b/drivers/staging/ccg/u_ether.c
index 1154a99dc8db..d0dabcf015a9 100644
--- a/drivers/staging/ccg/u_ether.c
+++ b/drivers/staging/ccg/u_ether.c
@@ -827,7 +827,7 @@ void gether_cleanup(void)
827 return; 827 return;
828 828
829 unregister_netdev(the_dev->net); 829 unregister_netdev(the_dev->net);
830 flush_work_sync(&the_dev->work); 830 flush_work(&the_dev->work);
831 free_netdev(the_dev->net); 831 free_netdev(the_dev->net);
832 832
833 the_dev = NULL; 833 the_dev = NULL;
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 24d8eebc1d10..094fdc366f30 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
264 list_add_tail(&msg->node, &nvec->tx_data); 264 list_add_tail(&msg->node, &nvec->tx_data);
265 spin_unlock_irqrestore(&nvec->tx_lock, flags); 265 spin_unlock_irqrestore(&nvec->tx_lock, flags);
266 266
267 queue_work(system_nrt_wq, &nvec->tx_work); 267 schedule_work(&nvec->tx_work);
268 268
269 return 0; 269 return 0;
270} 270}
@@ -471,7 +471,7 @@ static void nvec_rx_completed(struct nvec_chip *nvec)
471 if (!nvec_msg_is_event(nvec->rx)) 471 if (!nvec_msg_is_event(nvec->rx))
472 complete(&nvec->ec_transfer); 472 complete(&nvec->ec_transfer);
473 473
474 queue_work(system_nrt_wq, &nvec->rx_work); 474 schedule_work(&nvec->rx_work);
475} 475}
476 476
477/** 477/**
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 2ab31e4f02cc..67789b8345d2 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -694,17 +694,14 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
694static void thermal_zone_device_set_polling(struct thermal_zone_device *tz, 694static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
695 int delay) 695 int delay)
696{ 696{
697 cancel_delayed_work(&(tz->poll_queue));
698
699 if (!delay)
700 return;
701
702 if (delay > 1000) 697 if (delay > 1000)
703 queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 698 mod_delayed_work(system_freezable_wq, &tz->poll_queue,
704 round_jiffies(msecs_to_jiffies(delay))); 699 round_jiffies(msecs_to_jiffies(delay)));
700 else if (delay)
701 mod_delayed_work(system_freezable_wq, &tz->poll_queue,
702 msecs_to_jiffies(delay));
705 else 703 else
706 queue_delayed_work(system_freezable_wq, &(tz->poll_queue), 704 cancel_delayed_work(&tz->poll_queue);
707 msecs_to_jiffies(delay));
708} 705}
709 706
710static void thermal_zone_device_passive(struct thermal_zone_device *tz, 707static void thermal_zone_device_passive(struct thermal_zone_device *tz,
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index 0083bc1f63f4..5b95b4f28cf3 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -765,7 +765,7 @@ static void hvsi_flush_output(struct hvsi_struct *hp)
765 765
766 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */ 766 /* 'writer' could still be pending if it didn't see n_outbuf = 0 yet */
767 cancel_delayed_work_sync(&hp->writer); 767 cancel_delayed_work_sync(&hp->writer);
768 flush_work_sync(&hp->handshaker); 768 flush_work(&hp->handshaker);
769 769
770 /* 770 /*
771 * it's also possible that our timeout expired and hvsi_write_worker 771 * it's also possible that our timeout expired and hvsi_write_worker
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index 0aeb5a38d296..b4ba0670dc54 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1729,7 +1729,7 @@ void ipwireless_hardware_free(struct ipw_hardware *hw)
1729 1729
1730 ipwireless_stop_interrupts(hw); 1730 ipwireless_stop_interrupts(hw);
1731 1731
1732 flush_work_sync(&hw->work_rx); 1732 flush_work(&hw->work_rx);
1733 1733
1734 for (i = 0; i < NL_NUM_OF_ADDRESSES; i++) 1734 for (i = 0; i < NL_NUM_OF_ADDRESSES; i++)
1735 if (hw->packet_assembler[i] != NULL) 1735 if (hw->packet_assembler[i] != NULL)
diff --git a/drivers/tty/ipwireless/network.c b/drivers/tty/ipwireless/network.c
index d2af155dec8b..57102e66165a 100644
--- a/drivers/tty/ipwireless/network.c
+++ b/drivers/tty/ipwireless/network.c
@@ -435,8 +435,8 @@ void ipwireless_network_free(struct ipw_network *network)
435 network->shutting_down = 1; 435 network->shutting_down = 1;
436 436
437 ipwireless_ppp_close(network); 437 ipwireless_ppp_close(network);
438 flush_work_sync(&network->work_go_online); 438 flush_work(&network->work_go_online);
439 flush_work_sync(&network->work_go_offline); 439 flush_work(&network->work_go_offline);
440 440
441 ipwireless_stop_interrupts(network->hardware); 441 ipwireless_stop_interrupts(network->hardware);
442 ipwireless_associate_network(network->hardware, NULL); 442 ipwireless_associate_network(network->hardware, NULL);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 3f63d834cbc9..c0b334327d93 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -122,7 +122,7 @@ static void kgdboc_unregister_kbd(void)
122 i--; 122 i--;
123 } 123 }
124 } 124 }
125 flush_work_sync(&kgdboc_restore_input_work); 125 flush_work(&kgdboc_restore_input_work);
126} 126}
127#else /* ! CONFIG_KDB_KEYBOARD */ 127#else /* ! CONFIG_KDB_KEYBOARD */
128#define kgdboc_register_kbd(x) 0 128#define kgdboc_register_kbd(x) 0
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index ccc2f35adff1..6ede6fd92b4c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1227,7 +1227,7 @@ static int serial_omap_suspend(struct device *dev)
1227 struct uart_omap_port *up = dev_get_drvdata(dev); 1227 struct uart_omap_port *up = dev_get_drvdata(dev);
1228 1228
1229 uart_suspend_port(&serial_omap_reg, &up->port); 1229 uart_suspend_port(&serial_omap_reg, &up->port);
1230 flush_work_sync(&up->qos_work); 1230 flush_work(&up->qos_work);
1231 1231
1232 return 0; 1232 return 0;
1233} 1233}
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4d7b56268c79..0f2a2c5e704c 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -523,9 +523,9 @@ static int tty_ldisc_halt(struct tty_struct *tty)
523 */ 523 */
524static void tty_ldisc_flush_works(struct tty_struct *tty) 524static void tty_ldisc_flush_works(struct tty_struct *tty)
525{ 525{
526 flush_work_sync(&tty->hangup_work); 526 flush_work(&tty->hangup_work);
527 flush_work_sync(&tty->SAK_work); 527 flush_work(&tty->SAK_work);
528 flush_work_sync(&tty->buf.work); 528 flush_work(&tty->buf.work);
529} 529}
530 530
531/** 531/**
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 975e9c6691d6..807627b36cc8 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -718,7 +718,7 @@ static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_de
718 del_timer_sync(&instance->resubmit_timer); 718 del_timer_sync(&instance->resubmit_timer);
719 usb_free_urb(int_urb); 719 usb_free_urb(int_urb);
720 720
721 flush_work_sync(&instance->status_check_work); 721 flush_work(&instance->status_check_work);
722} 722}
723 723
724static int speedtch_pre_reset(struct usb_interface *intf) 724static int speedtch_pre_reset(struct usb_interface *intf)
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index e1f8b2c973fe..defff43950bc 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2262,7 +2262,7 @@ static void uea_stop(struct uea_softc *sc)
2262 usb_free_urb(sc->urb_int); 2262 usb_free_urb(sc->urb_int);
2263 2263
2264 /* flush the work item, when no one can schedule it */ 2264 /* flush the work item, when no one can schedule it */
2265 flush_work_sync(&sc->task); 2265 flush_work(&sc->task);
2266 2266
2267 release_firmware(sc->dsp_firm); 2267 release_firmware(sc->dsp_firm);
2268 uea_leaves(INS_TO_USBDEV(sc)); 2268 uea_leaves(INS_TO_USBDEV(sc));
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index b9c46900c2c1..6458764994ef 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -834,7 +834,7 @@ void gether_cleanup(void)
834 return; 834 return;
835 835
836 unregister_netdev(the_dev->net); 836 unregister_netdev(the_dev->net);
837 flush_work_sync(&the_dev->work); 837 flush_work(&the_dev->work);
838 free_netdev(the_dev->net); 838 free_netdev(the_dev->net);
839 839
840 the_dev = NULL; 840 the_dev = NULL;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 6780010e9c3c..4a1d64d92338 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -893,7 +893,7 @@ static void ohci_stop (struct usb_hcd *hcd)
893 ohci_dump (ohci, 1); 893 ohci_dump (ohci, 1);
894 894
895 if (quirk_nec(ohci)) 895 if (quirk_nec(ohci))
896 flush_work_sync(&ohci->nec_work); 896 flush_work(&ohci->nec_work);
897 897
898 ohci_usb_reset (ohci); 898 ohci_usb_reset (ohci);
899 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); 899 ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 81f1f9a0be8f..ceee2119bffa 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -1230,7 +1230,7 @@ static int __exit isp1301_remove(struct i2c_client *i2c)
1230 isp->timer.data = 0; 1230 isp->timer.data = 0;
1231 set_bit(WORK_STOP, &isp->todo); 1231 set_bit(WORK_STOP, &isp->todo);
1232 del_timer_sync(&isp->timer); 1232 del_timer_sync(&isp->timer);
1233 flush_work_sync(&isp->work); 1233 flush_work(&isp->work);
1234 1234
1235 put_device(&i2c->dev); 1235 put_device(&i2c->dev);
1236 the_transceiver = NULL; 1236 the_transceiver = NULL;
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 3f5acc7771da..6b5e6e0e202f 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -906,7 +906,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
906 r = -ENOMEM; 906 r = -ENOMEM;
907 goto err_wq; 907 goto err_wq;
908 } 908 }
909 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); 909 INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
910 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); 910 INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
911 911
912 dev_set_drvdata(&dssdev->dev, td); 912 dev_set_drvdata(&dssdev->dev, td);
@@ -962,8 +962,8 @@ static int taal_probe(struct omap_dss_device *dssdev)
962 goto err_irq; 962 goto err_irq;
963 } 963 }
964 964
965 INIT_DELAYED_WORK_DEFERRABLE(&td->te_timeout_work, 965 INIT_DEFERRABLE_WORK(&td->te_timeout_work,
966 taal_te_timeout_work_callback); 966 taal_te_timeout_work_callback);
967 967
968 dev_dbg(&dssdev->dev, "Using GPIO TE\n"); 968 dev_dbg(&dssdev->dev, "Using GPIO TE\n");
969 } 969 }
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index b07e8864f82f..05ee04667af1 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -4306,7 +4306,7 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
4306 * and is sending the data. 4306 * and is sending the data.
4307 */ 4307 */
4308 4308
4309 __cancel_delayed_work(&dsi->framedone_timeout_work); 4309 cancel_delayed_work(&dsi->framedone_timeout_work);
4310 4310
4311 dsi_handle_framedone(dsidev, 0); 4311 dsi_handle_framedone(dsidev, 0);
4312} 4312}
@@ -4863,8 +4863,8 @@ static int __init omap_dsihw_probe(struct platform_device *dsidev)
4863 mutex_init(&dsi->lock); 4863 mutex_init(&dsi->lock);
4864 sema_init(&dsi->bus_lock, 1); 4864 sema_init(&dsi->bus_lock, 1);
4865 4865
4866 INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, 4866 INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work,
4867 dsi_framedone_timeout_work_callback); 4867 dsi_framedone_timeout_work_callback);
4868 4868
4869#ifdef DSI_CATCH_MISSING_TE 4869#ifdef DSI_CATCH_MISSING_TE
4870 init_timer(&dsi->te_timer); 4870 init_timer(&dsi->te_timer);
diff --git a/fs/affs/super.c b/fs/affs/super.c
index c70f1e5fc024..022cecb0757d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -551,7 +551,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
551 return -EINVAL; 551 return -EINVAL;
552 } 552 }
553 553
554 flush_delayed_work_sync(&sbi->sb_work); 554 flush_delayed_work(&sbi->sb_work);
555 replace_mount_options(sb, new_opts); 555 replace_mount_options(sb, new_opts);
556 556
557 sbi->s_flags = mount_flags; 557 sbi->s_flags = mount_flags;
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 587ef5123cd8..7ef637d7f3a5 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -351,9 +351,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work)
351 */ 351 */
352void afs_flush_callback_breaks(struct afs_server *server) 352void afs_flush_callback_breaks(struct afs_server *server)
353{ 353{
354 cancel_delayed_work(&server->cb_break_work); 354 mod_delayed_work(afs_callback_update_worker, &server->cb_break_work, 0);
355 queue_delayed_work(afs_callback_update_worker,
356 &server->cb_break_work, 0);
357} 355}
358 356
359#if 0 357#if 0
diff --git a/fs/afs/server.c b/fs/afs/server.c
index d59b7516e943..f342acf3547d 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -285,12 +285,7 @@ static void afs_reap_server(struct work_struct *work)
285 expiry = server->time_of_death + afs_server_timeout; 285 expiry = server->time_of_death + afs_server_timeout;
286 if (expiry > now) { 286 if (expiry > now) {
287 delay = (expiry - now) * HZ; 287 delay = (expiry - now) * HZ;
288 if (!queue_delayed_work(afs_wq, &afs_server_reaper, 288 mod_delayed_work(afs_wq, &afs_server_reaper, delay);
289 delay)) {
290 cancel_delayed_work(&afs_server_reaper);
291 queue_delayed_work(afs_wq, &afs_server_reaper,
292 delay);
293 }
294 break; 289 break;
295 } 290 }
296 291
@@ -323,6 +318,5 @@ static void afs_reap_server(struct work_struct *work)
323void __exit afs_purge_servers(void) 318void __exit afs_purge_servers(void)
324{ 319{
325 afs_server_timeout = 0; 320 afs_server_timeout = 0;
326 cancel_delayed_work(&afs_server_reaper); 321 mod_delayed_work(afs_wq, &afs_server_reaper, 0);
327 queue_delayed_work(afs_wq, &afs_server_reaper, 0);
328} 322}
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 431984d2e372..57bcb1596530 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -561,12 +561,7 @@ static void afs_vlocation_reaper(struct work_struct *work)
561 if (expiry > now) { 561 if (expiry > now) {
562 delay = (expiry - now) * HZ; 562 delay = (expiry - now) * HZ;
563 _debug("delay %lu", delay); 563 _debug("delay %lu", delay);
564 if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, 564 mod_delayed_work(afs_wq, &afs_vlocation_reap, delay);
565 delay)) {
566 cancel_delayed_work(&afs_vlocation_reap);
567 queue_delayed_work(afs_wq, &afs_vlocation_reap,
568 delay);
569 }
570 break; 565 break;
571 } 566 }
572 567
@@ -614,13 +609,10 @@ void afs_vlocation_purge(void)
614 spin_lock(&afs_vlocation_updates_lock); 609 spin_lock(&afs_vlocation_updates_lock);
615 list_del_init(&afs_vlocation_updates); 610 list_del_init(&afs_vlocation_updates);
616 spin_unlock(&afs_vlocation_updates_lock); 611 spin_unlock(&afs_vlocation_updates_lock);
617 cancel_delayed_work(&afs_vlocation_update); 612 mod_delayed_work(afs_vlocation_update_worker, &afs_vlocation_update, 0);
618 queue_delayed_work(afs_vlocation_update_worker,
619 &afs_vlocation_update, 0);
620 destroy_workqueue(afs_vlocation_update_worker); 613 destroy_workqueue(afs_vlocation_update_worker);
621 614
622 cancel_delayed_work(&afs_vlocation_reap); 615 mod_delayed_work(afs_wq, &afs_vlocation_reap, 0);
623 queue_delayed_work(afs_wq, &afs_vlocation_reap, 0);
624} 616}
625 617
626/* 618/*
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 4a38db739ca0..0fb6539b0c8c 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1289,7 +1289,7 @@ static void gdlm_unmount(struct gfs2_sbd *sdp)
1289 spin_lock(&ls->ls_recover_spin); 1289 spin_lock(&ls->ls_recover_spin);
1290 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); 1290 set_bit(DFL_UNMOUNT, &ls->ls_recover_flags);
1291 spin_unlock(&ls->ls_recover_spin); 1291 spin_unlock(&ls->ls_recover_spin);
1292 flush_delayed_work_sync(&sdp->sd_control_work); 1292 flush_delayed_work(&sdp->sd_control_work);
1293 1293
1294 /* mounted_lock and control_lock will be purged in dlm recovery */ 1294 /* mounted_lock and control_lock will be purged in dlm recovery */
1295release: 1295release:
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index a8d90f2f576c..bc737261f234 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1579,7 +1579,7 @@ out:
1579 clear_inode(inode); 1579 clear_inode(inode);
1580 gfs2_dir_hash_inval(ip); 1580 gfs2_dir_hash_inval(ip);
1581 ip->i_gl->gl_object = NULL; 1581 ip->i_gl->gl_object = NULL;
1582 flush_delayed_work_sync(&ip->i_gl->gl_work); 1582 flush_delayed_work(&ip->i_gl->gl_work);
1583 gfs2_glock_add_to_lru(ip->i_gl); 1583 gfs2_glock_add_to_lru(ip->i_gl);
1584 gfs2_glock_put(ip->i_gl); 1584 gfs2_glock_put(ip->i_gl);
1585 ip->i_gl = NULL; 1585 ip->i_gl = NULL;
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index ee1bc55677f1..553909395270 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -644,7 +644,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
644 644
645 /* sync the superblock to buffers */ 645 /* sync the superblock to buffers */
646 sb = inode->i_sb; 646 sb = inode->i_sb;
647 flush_delayed_work_sync(&HFS_SB(sb)->mdb_work); 647 flush_delayed_work(&HFS_SB(sb)->mdb_work);
648 /* .. finally sync the buffers to disk */ 648 /* .. finally sync the buffers to disk */
649 err = sync_blockdev(sb->s_bdev); 649 err = sync_blockdev(sb->s_bdev);
650 if (!ret) 650 if (!ret)
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 333df07ae3bd..eaa74323663a 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -314,11 +314,11 @@ static void ncp_stop_tasks(struct ncp_server *server) {
314 release_sock(sk); 314 release_sock(sk);
315 del_timer_sync(&server->timeout_tm); 315 del_timer_sync(&server->timeout_tm);
316 316
317 flush_work_sync(&server->rcv.tq); 317 flush_work(&server->rcv.tq);
318 if (sk->sk_socket->type == SOCK_STREAM) 318 if (sk->sk_socket->type == SOCK_STREAM)
319 flush_work_sync(&server->tx.tq); 319 flush_work(&server->tx.tq);
320 else 320 else
321 flush_work_sync(&server->timeout_tq); 321 flush_work(&server->timeout_tq);
322} 322}
323 323
324static int ncp_show_options(struct seq_file *seq, struct dentry *root) 324static int ncp_show_options(struct seq_file *seq, struct dentry *root)
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 6930bec91bca..1720d32ffa54 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -117,8 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
117 timeout = 5 * HZ; 117 timeout = 5 * HZ;
118 dprintk("%s: requeueing work. Lease period = %ld\n", 118 dprintk("%s: requeueing work. Lease period = %ld\n",
119 __func__, (timeout + HZ - 1) / HZ); 119 __func__, (timeout + HZ - 1) / HZ);
120 cancel_delayed_work(&clp->cl_renewd); 120 mod_delayed_work(system_wq, &clp->cl_renewd, timeout);
121 schedule_delayed_work(&clp->cl_renewd, timeout);
122 set_bit(NFS_CS_RENEWD, &clp->cl_res_state); 121 set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
123 spin_unlock(&clp->cl_lock); 122 spin_unlock(&clp->cl_lock);
124} 123}
diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c
index 8f9cea1597af..c19897d0fe14 100644
--- a/fs/ocfs2/cluster/quorum.c
+++ b/fs/ocfs2/cluster/quorum.c
@@ -327,5 +327,5 @@ void o2quo_exit(void)
327{ 327{
328 struct o2quo_state *qs = &o2quo_state; 328 struct o2quo_state *qs = &o2quo_state;
329 329
330 flush_work_sync(&qs->qs_work); 330 flush_work(&qs->qs_work);
331} 331}
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 19e2380fb867..001537f92caf 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -954,7 +954,7 @@ xfs_fs_sync_fs(
954 * We schedule xfssyncd now (now that the disk is 954 * We schedule xfssyncd now (now that the disk is
955 * active) instead of later (when it might not be). 955 * active) instead of later (when it might not be).
956 */ 956 */
957 flush_delayed_work_sync(&mp->m_sync_work); 957 flush_delayed_work(&mp->m_sync_work);
958 } 958 }
959 959
960 return 0; 960 return 0;
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index 96548176db80..9500caf15acf 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -475,7 +475,7 @@ xfs_flush_inodes(
475 struct xfs_mount *mp = ip->i_mount; 475 struct xfs_mount *mp = ip->i_mount;
476 476
477 queue_work(xfs_syncd_wq, &mp->m_flush_work); 477 queue_work(xfs_syncd_wq, &mp->m_flush_work);
478 flush_work_sync(&mp->m_flush_work); 478 flush_work(&mp->m_flush_work);
479} 479}
480 480
481STATIC void 481STATIC void
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index af155450cabb..2b58905d3504 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -16,6 +16,7 @@ struct workqueue_struct;
16 16
17struct work_struct; 17struct work_struct;
18typedef void (*work_func_t)(struct work_struct *work); 18typedef void (*work_func_t)(struct work_struct *work);
19void delayed_work_timer_fn(unsigned long __data);
19 20
20/* 21/*
21 * The first word is the work queue pointer and the flags rolled into 22 * The first word is the work queue pointer and the flags rolled into
@@ -67,9 +68,18 @@ enum {
67 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + 68 WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
68 WORK_STRUCT_COLOR_BITS, 69 WORK_STRUCT_COLOR_BITS,
69 70
71 /* data contains off-queue information when !WORK_STRUCT_CWQ */
72 WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
73
74 WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
75
76 WORK_OFFQ_FLAG_BITS = 1,
77 WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
78
79 /* convenience constants */
70 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, 80 WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
71 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, 81 WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
72 WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, 82 WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
73 83
74 /* bit mask for work_busy() return values */ 84 /* bit mask for work_busy() return values */
75 WORK_BUSY_PENDING = 1 << 0, 85 WORK_BUSY_PENDING = 1 << 0,
@@ -92,6 +102,7 @@ struct work_struct {
92struct delayed_work { 102struct delayed_work {
93 struct work_struct work; 103 struct work_struct work;
94 struct timer_list timer; 104 struct timer_list timer;
105 int cpu;
95}; 106};
96 107
97static inline struct delayed_work *to_delayed_work(struct work_struct *work) 108static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -115,41 +126,38 @@ struct execute_work {
115#define __WORK_INIT_LOCKDEP_MAP(n, k) 126#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif 127#endif
117 128
118#define __WORK_INITIALIZER(n, f) { \ 129#define __WORK_INITIALIZER(n, f) { \
119 .data = WORK_DATA_STATIC_INIT(), \ 130 .data = WORK_DATA_STATIC_INIT(), \
120 .entry = { &(n).entry, &(n).entry }, \ 131 .entry = { &(n).entry, &(n).entry }, \
121 .func = (f), \ 132 .func = (f), \
122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 133 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
123 } 134 }
124 135
125#define __DELAYED_WORK_INITIALIZER(n, f) { \ 136#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
126 .work = __WORK_INITIALIZER((n).work, (f)), \ 137 .work = __WORK_INITIALIZER((n).work, (f)), \
127 .timer = TIMER_INITIALIZER(NULL, 0, 0), \ 138 .timer = __TIMER_INITIALIZER(delayed_work_timer_fn, \
139 0, (unsigned long)&(n), \
140 (tflags) | TIMER_IRQSAFE), \
128 } 141 }
129 142
130#define __DEFERRED_WORK_INITIALIZER(n, f) { \ 143#define DECLARE_WORK(n, f) \
131 .work = __WORK_INITIALIZER((n).work, (f)), \
132 .timer = TIMER_DEFERRED_INITIALIZER(NULL, 0, 0), \
133 }
134
135#define DECLARE_WORK(n, f) \
136 struct work_struct n = __WORK_INITIALIZER(n, f) 144 struct work_struct n = __WORK_INITIALIZER(n, f)
137 145
138#define DECLARE_DELAYED_WORK(n, f) \ 146#define DECLARE_DELAYED_WORK(n, f) \
139 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f) 147 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, 0)
140 148
141#define DECLARE_DEFERRED_WORK(n, f) \ 149#define DECLARE_DEFERRABLE_WORK(n, f) \
142 struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f) 150 struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f, TIMER_DEFERRABLE)
143 151
144/* 152/*
145 * initialize a work item's function pointer 153 * initialize a work item's function pointer
146 */ 154 */
147#define PREPARE_WORK(_work, _func) \ 155#define PREPARE_WORK(_work, _func) \
148 do { \ 156 do { \
149 (_work)->func = (_func); \ 157 (_work)->func = (_func); \
150 } while (0) 158 } while (0)
151 159
152#define PREPARE_DELAYED_WORK(_work, _func) \ 160#define PREPARE_DELAYED_WORK(_work, _func) \
153 PREPARE_WORK(&(_work)->work, (_func)) 161 PREPARE_WORK(&(_work)->work, (_func))
154 162
155#ifdef CONFIG_DEBUG_OBJECTS_WORK 163#ifdef CONFIG_DEBUG_OBJECTS_WORK
@@ -179,7 +187,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
179 \ 187 \
180 __init_work((_work), _onstack); \ 188 __init_work((_work), _onstack); \
181 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ 189 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
182 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ 190 lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
183 INIT_LIST_HEAD(&(_work)->entry); \ 191 INIT_LIST_HEAD(&(_work)->entry); \
184 PREPARE_WORK((_work), (_func)); \ 192 PREPARE_WORK((_work), (_func)); \
185 } while (0) 193 } while (0)
@@ -193,33 +201,44 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
193 } while (0) 201 } while (0)
194#endif 202#endif
195 203
196#define INIT_WORK(_work, _func) \ 204#define INIT_WORK(_work, _func) \
197 do { \ 205 do { \
198 __INIT_WORK((_work), (_func), 0); \ 206 __INIT_WORK((_work), (_func), 0); \
199 } while (0) 207 } while (0)
200 208
201#define INIT_WORK_ONSTACK(_work, _func) \ 209#define INIT_WORK_ONSTACK(_work, _func) \
202 do { \ 210 do { \
203 __INIT_WORK((_work), (_func), 1); \ 211 __INIT_WORK((_work), (_func), 1); \
204 } while (0) 212 } while (0)
205 213
206#define INIT_DELAYED_WORK(_work, _func) \ 214#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
207 do { \ 215 do { \
208 INIT_WORK(&(_work)->work, (_func)); \ 216 INIT_WORK(&(_work)->work, (_func)); \
209 init_timer(&(_work)->timer); \ 217 __setup_timer(&(_work)->timer, delayed_work_timer_fn, \
218 (unsigned long)(_work), \
219 (_tflags) | TIMER_IRQSAFE); \
210 } while (0) 220 } while (0)
211 221
212#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \ 222#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
213 do { \ 223 do { \
214 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \ 224 INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
215 init_timer_on_stack(&(_work)->timer); \ 225 __setup_timer_on_stack(&(_work)->timer, \
226 delayed_work_timer_fn, \
227 (unsigned long)(_work), \
228 (_tflags) | TIMER_IRQSAFE); \
216 } while (0) 229 } while (0)
217 230
218#define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ 231#define INIT_DELAYED_WORK(_work, _func) \
219 do { \ 232 __INIT_DELAYED_WORK(_work, _func, 0)
220 INIT_WORK(&(_work)->work, (_func)); \ 233
221 init_timer_deferrable(&(_work)->timer); \ 234#define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
222 } while (0) 235 __INIT_DELAYED_WORK_ONSTACK(_work, _func, 0)
236
237#define INIT_DEFERRABLE_WORK(_work, _func) \
238 __INIT_DELAYED_WORK(_work, _func, TIMER_DEFERRABLE)
239
240#define INIT_DEFERRABLE_WORK_ONSTACK(_work, _func) \
241 __INIT_DELAYED_WORK_ONSTACK(_work, _func, TIMER_DEFERRABLE)
223 242
224/** 243/**
225 * work_pending - Find out whether a work item is currently pending 244 * work_pending - Find out whether a work item is currently pending
@@ -278,10 +297,6 @@ enum {
278 * system_long_wq is similar to system_wq but may host long running 297 * system_long_wq is similar to system_wq but may host long running
279 * works. Queue flushing might take relatively long. 298 * works. Queue flushing might take relatively long.
280 * 299 *
281 * system_nrt_wq is non-reentrant and guarantees that any given work
282 * item is never executed in parallel by multiple CPUs. Queue
283 * flushing might take relatively long.
284 *
285 * system_unbound_wq is unbound workqueue. Workers are not bound to 300 * system_unbound_wq is unbound workqueue. Workers are not bound to
286 * any specific CPU, not concurrency managed, and all queued works are 301 * any specific CPU, not concurrency managed, and all queued works are
287 * executed immediately as long as max_active limit is not reached and 302 * executed immediately as long as max_active limit is not reached and
@@ -289,16 +304,25 @@ enum {
289 * 304 *
290 * system_freezable_wq is equivalent to system_wq except that it's 305 * system_freezable_wq is equivalent to system_wq except that it's
291 * freezable. 306 * freezable.
292 *
293 * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
294 * it's freezable.
295 */ 307 */
296extern struct workqueue_struct *system_wq; 308extern struct workqueue_struct *system_wq;
297extern struct workqueue_struct *system_long_wq; 309extern struct workqueue_struct *system_long_wq;
298extern struct workqueue_struct *system_nrt_wq;
299extern struct workqueue_struct *system_unbound_wq; 310extern struct workqueue_struct *system_unbound_wq;
300extern struct workqueue_struct *system_freezable_wq; 311extern struct workqueue_struct *system_freezable_wq;
301extern struct workqueue_struct *system_nrt_freezable_wq; 312
313static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
314{
315 return system_wq;
316}
317
318static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
319{
320 return system_freezable_wq;
321}
322
323/* equivlalent to system_wq and system_freezable_wq, deprecated */
324#define system_nrt_wq __system_nrt_wq()
325#define system_nrt_freezable_wq __system_nrt_freezable_wq()
302 326
303extern struct workqueue_struct * 327extern struct workqueue_struct *
304__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, 328__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
@@ -321,22 +345,22 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
321 * Pointer to the allocated workqueue on success, %NULL on failure. 345 * Pointer to the allocated workqueue on success, %NULL on failure.
322 */ 346 */
323#ifdef CONFIG_LOCKDEP 347#ifdef CONFIG_LOCKDEP
324#define alloc_workqueue(fmt, flags, max_active, args...) \ 348#define alloc_workqueue(fmt, flags, max_active, args...) \
325({ \ 349({ \
326 static struct lock_class_key __key; \ 350 static struct lock_class_key __key; \
327 const char *__lock_name; \ 351 const char *__lock_name; \
328 \ 352 \
329 if (__builtin_constant_p(fmt)) \ 353 if (__builtin_constant_p(fmt)) \
330 __lock_name = (fmt); \ 354 __lock_name = (fmt); \
331 else \ 355 else \
332 __lock_name = #fmt; \ 356 __lock_name = #fmt; \
333 \ 357 \
334 __alloc_workqueue_key((fmt), (flags), (max_active), \ 358 __alloc_workqueue_key((fmt), (flags), (max_active), \
335 &__key, __lock_name, ##args); \ 359 &__key, __lock_name, ##args); \
336}) 360})
337#else 361#else
338#define alloc_workqueue(fmt, flags, max_active, args...) \ 362#define alloc_workqueue(fmt, flags, max_active, args...) \
339 __alloc_workqueue_key((fmt), (flags), (max_active), \ 363 __alloc_workqueue_key((fmt), (flags), (max_active), \
340 NULL, NULL, ##args) 364 NULL, NULL, ##args)
341#endif 365#endif
342 366
@@ -353,46 +377,50 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
353 * RETURNS: 377 * RETURNS:
354 * Pointer to the allocated workqueue on success, %NULL on failure. 378 * Pointer to the allocated workqueue on success, %NULL on failure.
355 */ 379 */
356#define alloc_ordered_workqueue(fmt, flags, args...) \ 380#define alloc_ordered_workqueue(fmt, flags, args...) \
357 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args) 381 alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
358 382
359#define create_workqueue(name) \ 383#define create_workqueue(name) \
360 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 384 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
361#define create_freezable_workqueue(name) \ 385#define create_freezable_workqueue(name) \
362 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 386 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
363#define create_singlethread_workqueue(name) \ 387#define create_singlethread_workqueue(name) \
364 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 388 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
365 389
366extern void destroy_workqueue(struct workqueue_struct *wq); 390extern void destroy_workqueue(struct workqueue_struct *wq);
367 391
368extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); 392extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
369extern int queue_work_on(int cpu, struct workqueue_struct *wq,
370 struct work_struct *work); 393 struct work_struct *work);
371extern int queue_delayed_work(struct workqueue_struct *wq, 394extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
395extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
372 struct delayed_work *work, unsigned long delay); 396 struct delayed_work *work, unsigned long delay);
373extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 397extern bool queue_delayed_work(struct workqueue_struct *wq,
374 struct delayed_work *work, unsigned long delay); 398 struct delayed_work *work, unsigned long delay);
399extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
400 struct delayed_work *dwork, unsigned long delay);
401extern bool mod_delayed_work(struct workqueue_struct *wq,
402 struct delayed_work *dwork, unsigned long delay);
375 403
376extern void flush_workqueue(struct workqueue_struct *wq); 404extern void flush_workqueue(struct workqueue_struct *wq);
377extern void drain_workqueue(struct workqueue_struct *wq); 405extern void drain_workqueue(struct workqueue_struct *wq);
378extern void flush_scheduled_work(void); 406extern void flush_scheduled_work(void);
379 407
380extern int schedule_work(struct work_struct *work); 408extern bool schedule_work_on(int cpu, struct work_struct *work);
381extern int schedule_work_on(int cpu, struct work_struct *work); 409extern bool schedule_work(struct work_struct *work);
382extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 410extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
383extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, 411 unsigned long delay);
384 unsigned long delay); 412extern bool schedule_delayed_work(struct delayed_work *work,
413 unsigned long delay);
385extern int schedule_on_each_cpu(work_func_t func); 414extern int schedule_on_each_cpu(work_func_t func);
386extern int keventd_up(void); 415extern int keventd_up(void);
387 416
388int execute_in_process_context(work_func_t fn, struct execute_work *); 417int execute_in_process_context(work_func_t fn, struct execute_work *);
389 418
390extern bool flush_work(struct work_struct *work); 419extern bool flush_work(struct work_struct *work);
391extern bool flush_work_sync(struct work_struct *work);
392extern bool cancel_work_sync(struct work_struct *work); 420extern bool cancel_work_sync(struct work_struct *work);
393 421
394extern bool flush_delayed_work(struct delayed_work *dwork); 422extern bool flush_delayed_work(struct delayed_work *dwork);
395extern bool flush_delayed_work_sync(struct delayed_work *work); 423extern bool cancel_delayed_work(struct delayed_work *dwork);
396extern bool cancel_delayed_work_sync(struct delayed_work *dwork); 424extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
397 425
398extern void workqueue_set_max_active(struct workqueue_struct *wq, 426extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -402,27 +430,11 @@ extern unsigned int work_cpu(struct work_struct *work);
402extern unsigned int work_busy(struct work_struct *work); 430extern unsigned int work_busy(struct work_struct *work);
403 431
404/* 432/*
405 * Kill off a pending schedule_delayed_work(). Note that the work callback
406 * function may still be running on return from cancel_delayed_work(), unless
407 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
408 * cancel_work_sync() to wait on it.
409 */
410static inline bool cancel_delayed_work(struct delayed_work *work)
411{
412 bool ret;
413
414 ret = del_timer_sync(&work->timer);
415 if (ret)
416 work_clear_pending(&work->work);
417 return ret;
418}
419
420/*
421 * Like above, but uses del_timer() instead of del_timer_sync(). This means, 433 * Like above, but uses del_timer() instead of del_timer_sync(). This means,
422 * if it returns 0 the timer function may be running and the queueing is in 434 * if it returns 0 the timer function may be running and the queueing is in
423 * progress. 435 * progress.
424 */ 436 */
425static inline bool __cancel_delayed_work(struct delayed_work *work) 437static inline bool __deprecated __cancel_delayed_work(struct delayed_work *work)
426{ 438{
427 bool ret; 439 bool ret;
428 440
@@ -432,6 +444,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
432 return ret; 444 return ret;
433} 445}
434 446
447/* used to be different but now identical to flush_work(), deprecated */
448static inline bool __deprecated flush_work_sync(struct work_struct *work)
449{
450 return flush_work(work);
451}
452
453/* used to be different but now identical to flush_delayed_work(), deprecated */
454static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwork)
455{
456 return flush_delayed_work(dwork);
457}
458
435#ifndef CONFIG_SMP 459#ifndef CONFIG_SMP
436static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 460static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
437{ 461{
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 2095be3318d5..97c465ebd844 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -379,7 +379,7 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
379 rcu_batch_queue(&sp->batch_queue, head); 379 rcu_batch_queue(&sp->batch_queue, head);
380 if (!sp->running) { 380 if (!sp->running) {
381 sp->running = true; 381 sp->running = true;
382 queue_delayed_work(system_nrt_wq, &sp->work, 0); 382 schedule_delayed_work(&sp->work, 0);
383 } 383 }
384 spin_unlock_irqrestore(&sp->queue_lock, flags); 384 spin_unlock_irqrestore(&sp->queue_lock, flags);
385} 385}
@@ -631,7 +631,7 @@ static void srcu_reschedule(struct srcu_struct *sp)
631 } 631 }
632 632
633 if (pending) 633 if (pending)
634 queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL); 634 schedule_delayed_work(&sp->work, SRCU_INTERVAL);
635} 635}
636 636
637/* 637/*
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3c5a79e2134c..d951daa0ca9a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -58,7 +58,7 @@ enum {
58 * be executing on any CPU. The gcwq behaves as an unbound one. 58 * be executing on any CPU. The gcwq behaves as an unbound one.
59 * 59 *
60 * Note that DISASSOCIATED can be flipped only while holding 60 * Note that DISASSOCIATED can be flipped only while holding
61 * managership of all pools on the gcwq to avoid changing binding 61 * assoc_mutex of all pools on the gcwq to avoid changing binding
62 * state while create_worker() is in progress. 62 * state while create_worker() is in progress.
63 */ 63 */
64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */ 64 GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
@@ -73,11 +73,10 @@ enum {
73 WORKER_DIE = 1 << 1, /* die die die */ 73 WORKER_DIE = 1 << 1, /* die die die */
74 WORKER_IDLE = 1 << 2, /* is idle */ 74 WORKER_IDLE = 1 << 2, /* is idle */
75 WORKER_PREP = 1 << 3, /* preparing to run works */ 75 WORKER_PREP = 1 << 3, /* preparing to run works */
76 WORKER_REBIND = 1 << 5, /* mom is home, come back */
77 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */ 76 WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
78 WORKER_UNBOUND = 1 << 7, /* worker is unbound */ 77 WORKER_UNBOUND = 1 << 7, /* worker is unbound */
79 78
80 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND | 79 WORKER_NOT_RUNNING = WORKER_PREP | WORKER_UNBOUND |
81 WORKER_CPU_INTENSIVE, 80 WORKER_CPU_INTENSIVE,
82 81
83 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ 82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
@@ -126,7 +125,6 @@ enum {
126 125
127struct global_cwq; 126struct global_cwq;
128struct worker_pool; 127struct worker_pool;
129struct idle_rebind;
130 128
131/* 129/*
132 * The poor guys doing the actual heavy lifting. All on-duty workers 130 * The poor guys doing the actual heavy lifting. All on-duty workers
@@ -150,7 +148,6 @@ struct worker {
150 int id; /* I: worker id */ 148 int id; /* I: worker id */
151 149
152 /* for rebinding worker to CPU */ 150 /* for rebinding worker to CPU */
153 struct idle_rebind *idle_rebind; /* L: for idle worker */
154 struct work_struct rebind_work; /* L: for busy worker */ 151 struct work_struct rebind_work; /* L: for busy worker */
155}; 152};
156 153
@@ -160,13 +157,15 @@ struct worker_pool {
160 157
161 struct list_head worklist; /* L: list of pending works */ 158 struct list_head worklist; /* L: list of pending works */
162 int nr_workers; /* L: total number of workers */ 159 int nr_workers; /* L: total number of workers */
160
161 /* nr_idle includes the ones off idle_list for rebinding */
163 int nr_idle; /* L: currently idle ones */ 162 int nr_idle; /* L: currently idle ones */
164 163
165 struct list_head idle_list; /* X: list of idle workers */ 164 struct list_head idle_list; /* X: list of idle workers */
166 struct timer_list idle_timer; /* L: worker idle timeout */ 165 struct timer_list idle_timer; /* L: worker idle timeout */
167 struct timer_list mayday_timer; /* L: SOS timer for workers */ 166 struct timer_list mayday_timer; /* L: SOS timer for workers */
168 167
169 struct mutex manager_mutex; /* mutex manager should hold */ 168 struct mutex assoc_mutex; /* protect GCWQ_DISASSOCIATED */
170 struct ida worker_ida; /* L: for worker IDs */ 169 struct ida worker_ida; /* L: for worker IDs */
171}; 170};
172 171
@@ -184,9 +183,8 @@ struct global_cwq {
184 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 183 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
185 /* L: hash of busy workers */ 184 /* L: hash of busy workers */
186 185
187 struct worker_pool pools[2]; /* normal and highpri pools */ 186 struct worker_pool pools[NR_WORKER_POOLS];
188 187 /* normal and highpri pools */
189 wait_queue_head_t rebind_hold; /* rebind hold wait */
190} ____cacheline_aligned_in_smp; 188} ____cacheline_aligned_in_smp;
191 189
192/* 190/*
@@ -269,17 +267,15 @@ struct workqueue_struct {
269}; 267};
270 268
271struct workqueue_struct *system_wq __read_mostly; 269struct workqueue_struct *system_wq __read_mostly;
272struct workqueue_struct *system_long_wq __read_mostly;
273struct workqueue_struct *system_nrt_wq __read_mostly;
274struct workqueue_struct *system_unbound_wq __read_mostly;
275struct workqueue_struct *system_freezable_wq __read_mostly;
276struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
277EXPORT_SYMBOL_GPL(system_wq); 270EXPORT_SYMBOL_GPL(system_wq);
271struct workqueue_struct *system_highpri_wq __read_mostly;
272EXPORT_SYMBOL_GPL(system_highpri_wq);
273struct workqueue_struct *system_long_wq __read_mostly;
278EXPORT_SYMBOL_GPL(system_long_wq); 274EXPORT_SYMBOL_GPL(system_long_wq);
279EXPORT_SYMBOL_GPL(system_nrt_wq); 275struct workqueue_struct *system_unbound_wq __read_mostly;
280EXPORT_SYMBOL_GPL(system_unbound_wq); 276EXPORT_SYMBOL_GPL(system_unbound_wq);
277struct workqueue_struct *system_freezable_wq __read_mostly;
281EXPORT_SYMBOL_GPL(system_freezable_wq); 278EXPORT_SYMBOL_GPL(system_freezable_wq);
282EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
283 279
284#define CREATE_TRACE_POINTS 280#define CREATE_TRACE_POINTS
285#include <trace/events/workqueue.h> 281#include <trace/events/workqueue.h>
@@ -534,18 +530,24 @@ static int work_next_color(int color)
534} 530}
535 531
536/* 532/*
537 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the 533 * While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
538 * work is on queue. Once execution starts, WORK_STRUCT_CWQ is 534 * contain the pointer to the queued cwq. Once execution starts, the flag
539 * cleared and the work data contains the cpu number it was last on. 535 * is cleared and the high bits contain OFFQ flags and CPU number.
540 * 536 *
541 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the 537 * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
542 * cwq, cpu or clear work->data. These functions should only be 538 * and clear_work_data() can be used to set the cwq, cpu or clear
543 * called while the work is owned - ie. while the PENDING bit is set. 539 * work->data. These functions should only be called while the work is
540 * owned - ie. while the PENDING bit is set.
544 * 541 *
545 * get_work_[g]cwq() can be used to obtain the gcwq or cwq 542 * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
546 * corresponding to a work. gcwq is available once the work has been 543 * a work. gcwq is available once the work has been queued anywhere after
547 * queued anywhere after initialization. cwq is available only from 544 * initialization until it is sync canceled. cwq is available only while
548 * queueing until execution starts. 545 * the work item is queued.
546 *
547 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
548 * canceled. While being canceled, a work item may have its PENDING set
549 * but stay off timer and worklist for arbitrarily long and nobody should
550 * try to steal the PENDING bit.
549 */ 551 */
550static inline void set_work_data(struct work_struct *work, unsigned long data, 552static inline void set_work_data(struct work_struct *work, unsigned long data,
551 unsigned long flags) 553 unsigned long flags)
@@ -562,13 +564,22 @@ static void set_work_cwq(struct work_struct *work,
562 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags); 564 WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
563} 565}
564 566
565static void set_work_cpu(struct work_struct *work, unsigned int cpu) 567static void set_work_cpu_and_clear_pending(struct work_struct *work,
568 unsigned int cpu)
566{ 569{
567 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING); 570 /*
571 * The following wmb is paired with the implied mb in
572 * test_and_set_bit(PENDING) and ensures all updates to @work made
573 * here are visible to and precede any updates by the next PENDING
574 * owner.
575 */
576 smp_wmb();
577 set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
568} 578}
569 579
570static void clear_work_data(struct work_struct *work) 580static void clear_work_data(struct work_struct *work)
571{ 581{
582 smp_wmb(); /* see set_work_cpu_and_clear_pending() */
572 set_work_data(work, WORK_STRUCT_NO_CPU, 0); 583 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
573} 584}
574 585
@@ -591,7 +602,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
591 return ((struct cpu_workqueue_struct *) 602 return ((struct cpu_workqueue_struct *)
592 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq; 603 (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
593 604
594 cpu = data >> WORK_STRUCT_FLAG_BITS; 605 cpu = data >> WORK_OFFQ_CPU_SHIFT;
595 if (cpu == WORK_CPU_NONE) 606 if (cpu == WORK_CPU_NONE)
596 return NULL; 607 return NULL;
597 608
@@ -599,6 +610,22 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
599 return get_gcwq(cpu); 610 return get_gcwq(cpu);
600} 611}
601 612
613static void mark_work_canceling(struct work_struct *work)
614{
615 struct global_cwq *gcwq = get_work_gcwq(work);
616 unsigned long cpu = gcwq ? gcwq->cpu : WORK_CPU_NONE;
617
618 set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
619 WORK_STRUCT_PENDING);
620}
621
622static bool work_is_canceling(struct work_struct *work)
623{
624 unsigned long data = atomic_long_read(&work->data);
625
626 return !(data & WORK_STRUCT_CWQ) && (data & WORK_OFFQ_CANCELING);
627}
628
602/* 629/*
603 * Policy functions. These define the policies on how the global worker 630 * Policy functions. These define the policies on how the global worker
604 * pools are managed. Unless noted otherwise, these functions assume that 631 * pools are managed. Unless noted otherwise, these functions assume that
@@ -657,6 +684,13 @@ static bool too_many_workers(struct worker_pool *pool)
657 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 684 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
658 int nr_busy = pool->nr_workers - nr_idle; 685 int nr_busy = pool->nr_workers - nr_idle;
659 686
687 /*
688 * nr_idle and idle_list may disagree if idle rebinding is in
689 * progress. Never return %true if idle_list is empty.
690 */
691 if (list_empty(&pool->idle_list))
692 return false;
693
660 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 694 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
661} 695}
662 696
@@ -903,6 +937,206 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
903} 937}
904 938
905/** 939/**
940 * move_linked_works - move linked works to a list
941 * @work: start of series of works to be scheduled
942 * @head: target list to append @work to
943 * @nextp: out paramter for nested worklist walking
944 *
945 * Schedule linked works starting from @work to @head. Work series to
946 * be scheduled starts at @work and includes any consecutive work with
947 * WORK_STRUCT_LINKED set in its predecessor.
948 *
949 * If @nextp is not NULL, it's updated to point to the next work of
950 * the last scheduled work. This allows move_linked_works() to be
951 * nested inside outer list_for_each_entry_safe().
952 *
953 * CONTEXT:
954 * spin_lock_irq(gcwq->lock).
955 */
956static void move_linked_works(struct work_struct *work, struct list_head *head,
957 struct work_struct **nextp)
958{
959 struct work_struct *n;
960
961 /*
962 * Linked worklist will always end before the end of the list,
963 * use NULL for list head.
964 */
965 list_for_each_entry_safe_from(work, n, NULL, entry) {
966 list_move_tail(&work->entry, head);
967 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
968 break;
969 }
970
971 /*
972 * If we're already inside safe list traversal and have moved
973 * multiple works to the scheduled queue, the next position
974 * needs to be updated.
975 */
976 if (nextp)
977 *nextp = n;
978}
979
980static void cwq_activate_delayed_work(struct work_struct *work)
981{
982 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
983
984 trace_workqueue_activate_work(work);
985 move_linked_works(work, &cwq->pool->worklist, NULL);
986 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
987 cwq->nr_active++;
988}
989
990static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
991{
992 struct work_struct *work = list_first_entry(&cwq->delayed_works,
993 struct work_struct, entry);
994
995 cwq_activate_delayed_work(work);
996}
997
998/**
999 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1000 * @cwq: cwq of interest
1001 * @color: color of work which left the queue
1002 *
1003 * A work either has completed or is removed from pending queue,
1004 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1005 *
1006 * CONTEXT:
1007 * spin_lock_irq(gcwq->lock).
1008 */
1009static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1010{
1011 /* ignore uncolored works */
1012 if (color == WORK_NO_COLOR)
1013 return;
1014
1015 cwq->nr_in_flight[color]--;
1016
1017 cwq->nr_active--;
1018 if (!list_empty(&cwq->delayed_works)) {
1019 /* one down, submit a delayed one */
1020 if (cwq->nr_active < cwq->max_active)
1021 cwq_activate_first_delayed(cwq);
1022 }
1023
1024 /* is flush in progress and are we at the flushing tip? */
1025 if (likely(cwq->flush_color != color))
1026 return;
1027
1028 /* are there still in-flight works? */
1029 if (cwq->nr_in_flight[color])
1030 return;
1031
1032 /* this cwq is done, clear flush_color */
1033 cwq->flush_color = -1;
1034
1035 /*
1036 * If this was the last cwq, wake up the first flusher. It
1037 * will handle the rest.
1038 */
1039 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1040 complete(&cwq->wq->first_flusher->done);
1041}
1042
1043/**
1044 * try_to_grab_pending - steal work item from worklist and disable irq
1045 * @work: work item to steal
1046 * @is_dwork: @work is a delayed_work
1047 * @flags: place to store irq state
1048 *
1049 * Try to grab PENDING bit of @work. This function can handle @work in any
1050 * stable state - idle, on timer or on worklist. Return values are
1051 *
1052 * 1 if @work was pending and we successfully stole PENDING
1053 * 0 if @work was idle and we claimed PENDING
1054 * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
1055 * -ENOENT if someone else is canceling @work, this state may persist
1056 * for arbitrarily long
1057 *
1058 * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
1059 * interrupted while holding PENDING and @work off queue, irq must be
1060 * disabled on entry. This, combined with delayed_work->timer being
1061 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1062 *
1063 * On successful return, >= 0, irq is disabled and the caller is
1064 * responsible for releasing it using local_irq_restore(*@flags).
1065 *
1066 * This function is safe to call from any context including IRQ handler.
1067 */
1068static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1069 unsigned long *flags)
1070{
1071 struct global_cwq *gcwq;
1072
1073 local_irq_save(*flags);
1074
1075 /* try to steal the timer if it exists */
1076 if (is_dwork) {
1077 struct delayed_work *dwork = to_delayed_work(work);
1078
1079 /*
1080 * dwork->timer is irqsafe. If del_timer() fails, it's
1081 * guaranteed that the timer is not queued anywhere and not
1082 * running on the local CPU.
1083 */
1084 if (likely(del_timer(&dwork->timer)))
1085 return 1;
1086 }
1087
1088 /* try to claim PENDING the normal way */
1089 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1090 return 0;
1091
1092 /*
1093 * The queueing is in progress, or it is already queued. Try to
1094 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1095 */
1096 gcwq = get_work_gcwq(work);
1097 if (!gcwq)
1098 goto fail;
1099
1100 spin_lock(&gcwq->lock);
1101 if (!list_empty(&work->entry)) {
1102 /*
1103 * This work is queued, but perhaps we locked the wrong gcwq.
1104 * In that case we must see the new value after rmb(), see
1105 * insert_work()->wmb().
1106 */
1107 smp_rmb();
1108 if (gcwq == get_work_gcwq(work)) {
1109 debug_work_deactivate(work);
1110
1111 /*
1112 * A delayed work item cannot be grabbed directly
1113 * because it might have linked NO_COLOR work items
1114 * which, if left on the delayed_list, will confuse
1115 * cwq->nr_active management later on and cause
1116 * stall. Make sure the work item is activated
1117 * before grabbing.
1118 */
1119 if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1120 cwq_activate_delayed_work(work);
1121
1122 list_del_init(&work->entry);
1123 cwq_dec_nr_in_flight(get_work_cwq(work),
1124 get_work_color(work));
1125
1126 spin_unlock(&gcwq->lock);
1127 return 1;
1128 }
1129 }
1130 spin_unlock(&gcwq->lock);
1131fail:
1132 local_irq_restore(*flags);
1133 if (work_is_canceling(work))
1134 return -ENOENT;
1135 cpu_relax();
1136 return -EAGAIN;
1137}
1138
1139/**
906 * insert_work - insert a work into gcwq 1140 * insert_work - insert a work into gcwq
907 * @cwq: cwq @work belongs to 1141 * @cwq: cwq @work belongs to
908 * @work: work to insert 1142 * @work: work to insert
@@ -982,7 +1216,15 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
982 struct cpu_workqueue_struct *cwq; 1216 struct cpu_workqueue_struct *cwq;
983 struct list_head *worklist; 1217 struct list_head *worklist;
984 unsigned int work_flags; 1218 unsigned int work_flags;
985 unsigned long flags; 1219 unsigned int req_cpu = cpu;
1220
1221 /*
1222 * While a work item is PENDING && off queue, a task trying to
1223 * steal the PENDING will busy-loop waiting for it to either get
1224 * queued or lose PENDING. Grabbing PENDING and queueing should
1225 * happen with IRQ disabled.
1226 */
1227 WARN_ON_ONCE(!irqs_disabled());
986 1228
987 debug_work_activate(work); 1229 debug_work_activate(work);
988 1230
@@ -995,21 +1237,22 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
995 if (!(wq->flags & WQ_UNBOUND)) { 1237 if (!(wq->flags & WQ_UNBOUND)) {
996 struct global_cwq *last_gcwq; 1238 struct global_cwq *last_gcwq;
997 1239
998 if (unlikely(cpu == WORK_CPU_UNBOUND)) 1240 if (cpu == WORK_CPU_UNBOUND)
999 cpu = raw_smp_processor_id(); 1241 cpu = raw_smp_processor_id();
1000 1242
1001 /* 1243 /*
1002 * It's multi cpu. If @wq is non-reentrant and @work 1244 * It's multi cpu. If @work was previously on a different
1003 * was previously on a different cpu, it might still 1245 * cpu, it might still be running there, in which case the
1004 * be running there, in which case the work needs to 1246 * work needs to be queued on that cpu to guarantee
1005 * be queued on that cpu to guarantee non-reentrance. 1247 * non-reentrancy.
1006 */ 1248 */
1007 gcwq = get_gcwq(cpu); 1249 gcwq = get_gcwq(cpu);
1008 if (wq->flags & WQ_NON_REENTRANT && 1250 last_gcwq = get_work_gcwq(work);
1009 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) { 1251
1252 if (last_gcwq && last_gcwq != gcwq) {
1010 struct worker *worker; 1253 struct worker *worker;
1011 1254
1012 spin_lock_irqsave(&last_gcwq->lock, flags); 1255 spin_lock(&last_gcwq->lock);
1013 1256
1014 worker = find_worker_executing_work(last_gcwq, work); 1257 worker = find_worker_executing_work(last_gcwq, work);
1015 1258
@@ -1017,22 +1260,23 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1017 gcwq = last_gcwq; 1260 gcwq = last_gcwq;
1018 else { 1261 else {
1019 /* meh... not running there, queue here */ 1262 /* meh... not running there, queue here */
1020 spin_unlock_irqrestore(&last_gcwq->lock, flags); 1263 spin_unlock(&last_gcwq->lock);
1021 spin_lock_irqsave(&gcwq->lock, flags); 1264 spin_lock(&gcwq->lock);
1022 } 1265 }
1023 } else 1266 } else {
1024 spin_lock_irqsave(&gcwq->lock, flags); 1267 spin_lock(&gcwq->lock);
1268 }
1025 } else { 1269 } else {
1026 gcwq = get_gcwq(WORK_CPU_UNBOUND); 1270 gcwq = get_gcwq(WORK_CPU_UNBOUND);
1027 spin_lock_irqsave(&gcwq->lock, flags); 1271 spin_lock(&gcwq->lock);
1028 } 1272 }
1029 1273
1030 /* gcwq determined, get cwq and queue */ 1274 /* gcwq determined, get cwq and queue */
1031 cwq = get_cwq(gcwq->cpu, wq); 1275 cwq = get_cwq(gcwq->cpu, wq);
1032 trace_workqueue_queue_work(cpu, cwq, work); 1276 trace_workqueue_queue_work(req_cpu, cwq, work);
1033 1277
1034 if (WARN_ON(!list_empty(&work->entry))) { 1278 if (WARN_ON(!list_empty(&work->entry))) {
1035 spin_unlock_irqrestore(&gcwq->lock, flags); 1279 spin_unlock(&gcwq->lock);
1036 return; 1280 return;
1037 } 1281 }
1038 1282
@@ -1050,79 +1294,110 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1050 1294
1051 insert_work(cwq, work, worklist, work_flags); 1295 insert_work(cwq, work, worklist, work_flags);
1052 1296
1053 spin_unlock_irqrestore(&gcwq->lock, flags); 1297 spin_unlock(&gcwq->lock);
1054} 1298}
1055 1299
1056/** 1300/**
1057 * queue_work - queue work on a workqueue 1301 * queue_work_on - queue work on specific cpu
1302 * @cpu: CPU number to execute work on
1058 * @wq: workqueue to use 1303 * @wq: workqueue to use
1059 * @work: work to queue 1304 * @work: work to queue
1060 * 1305 *
1061 * Returns 0 if @work was already on a queue, non-zero otherwise. 1306 * Returns %false if @work was already on a queue, %true otherwise.
1062 * 1307 *
1063 * We queue the work to the CPU on which it was submitted, but if the CPU dies 1308 * We queue the work to a specific CPU, the caller must ensure it
1064 * it can be processed by another CPU. 1309 * can't go away.
1065 */ 1310 */
1066int queue_work(struct workqueue_struct *wq, struct work_struct *work) 1311bool queue_work_on(int cpu, struct workqueue_struct *wq,
1312 struct work_struct *work)
1067{ 1313{
1068 int ret; 1314 bool ret = false;
1315 unsigned long flags;
1069 1316
1070 ret = queue_work_on(get_cpu(), wq, work); 1317 local_irq_save(flags);
1071 put_cpu(); 1318
1319 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1320 __queue_work(cpu, wq, work);
1321 ret = true;
1322 }
1072 1323
1324 local_irq_restore(flags);
1073 return ret; 1325 return ret;
1074} 1326}
1075EXPORT_SYMBOL_GPL(queue_work); 1327EXPORT_SYMBOL_GPL(queue_work_on);
1076 1328
1077/** 1329/**
1078 * queue_work_on - queue work on specific cpu 1330 * queue_work - queue work on a workqueue
1079 * @cpu: CPU number to execute work on
1080 * @wq: workqueue to use 1331 * @wq: workqueue to use
1081 * @work: work to queue 1332 * @work: work to queue
1082 * 1333 *
1083 * Returns 0 if @work was already on a queue, non-zero otherwise. 1334 * Returns %false if @work was already on a queue, %true otherwise.
1084 * 1335 *
1085 * We queue the work to a specific CPU, the caller must ensure it 1336 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1086 * can't go away. 1337 * it can be processed by another CPU.
1087 */ 1338 */
1088int 1339bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
1089queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1090{ 1340{
1091 int ret = 0; 1341 return queue_work_on(WORK_CPU_UNBOUND, wq, work);
1092
1093 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1094 __queue_work(cpu, wq, work);
1095 ret = 1;
1096 }
1097 return ret;
1098} 1342}
1099EXPORT_SYMBOL_GPL(queue_work_on); 1343EXPORT_SYMBOL_GPL(queue_work);
1100 1344
1101static void delayed_work_timer_fn(unsigned long __data) 1345void delayed_work_timer_fn(unsigned long __data)
1102{ 1346{
1103 struct delayed_work *dwork = (struct delayed_work *)__data; 1347 struct delayed_work *dwork = (struct delayed_work *)__data;
1104 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work); 1348 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1105 1349
1106 __queue_work(smp_processor_id(), cwq->wq, &dwork->work); 1350 /* should have been called from irqsafe timer with irq already off */
1351 __queue_work(dwork->cpu, cwq->wq, &dwork->work);
1107} 1352}
1353EXPORT_SYMBOL_GPL(delayed_work_timer_fn);
1108 1354
1109/** 1355static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1110 * queue_delayed_work - queue work on a workqueue after delay 1356 struct delayed_work *dwork, unsigned long delay)
1111 * @wq: workqueue to use
1112 * @dwork: delayable work to queue
1113 * @delay: number of jiffies to wait before queueing
1114 *
1115 * Returns 0 if @work was already on a queue, non-zero otherwise.
1116 */
1117int queue_delayed_work(struct workqueue_struct *wq,
1118 struct delayed_work *dwork, unsigned long delay)
1119{ 1357{
1120 if (delay == 0) 1358 struct timer_list *timer = &dwork->timer;
1121 return queue_work(wq, &dwork->work); 1359 struct work_struct *work = &dwork->work;
1360 unsigned int lcpu;
1361
1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1363 timer->data != (unsigned long)dwork);
1364 BUG_ON(timer_pending(timer));
1365 BUG_ON(!list_empty(&work->entry));
1366
1367 timer_stats_timer_set_start_info(&dwork->timer);
1368
1369 /*
1370 * This stores cwq for the moment, for the timer_fn. Note that the
1371 * work's gcwq is preserved to allow reentrance detection for
1372 * delayed works.
1373 */
1374 if (!(wq->flags & WQ_UNBOUND)) {
1375 struct global_cwq *gcwq = get_work_gcwq(work);
1122 1376
1123 return queue_delayed_work_on(-1, wq, dwork, delay); 1377 /*
1378 * If we cannot get the last gcwq from @work directly,
1379 * select the last CPU such that it avoids unnecessarily
1380 * triggering non-reentrancy check in __queue_work().
1381 */
1382 lcpu = cpu;
1383 if (gcwq)
1384 lcpu = gcwq->cpu;
1385 if (lcpu == WORK_CPU_UNBOUND)
1386 lcpu = raw_smp_processor_id();
1387 } else {
1388 lcpu = WORK_CPU_UNBOUND;
1389 }
1390
1391 set_work_cwq(work, get_cwq(lcpu, wq), 0);
1392
1393 dwork->cpu = cpu;
1394 timer->expires = jiffies + delay;
1395
1396 if (unlikely(cpu != WORK_CPU_UNBOUND))
1397 add_timer_on(timer, cpu);
1398 else
1399 add_timer(timer);
1124} 1400}
1125EXPORT_SYMBOL_GPL(queue_delayed_work);
1126 1401
1127/** 1402/**
1128 * queue_delayed_work_on - queue work on specific CPU after delay 1403 * queue_delayed_work_on - queue work on specific CPU after delay
@@ -1131,53 +1406,100 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
1131 * @dwork: work to queue 1406 * @dwork: work to queue
1132 * @delay: number of jiffies to wait before queueing 1407 * @delay: number of jiffies to wait before queueing
1133 * 1408 *
1134 * Returns 0 if @work was already on a queue, non-zero otherwise. 1409 * Returns %false if @work was already on a queue, %true otherwise. If
1410 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1411 * execution.
1135 */ 1412 */
1136int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 1413bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1137 struct delayed_work *dwork, unsigned long delay) 1414 struct delayed_work *dwork, unsigned long delay)
1138{ 1415{
1139 int ret = 0;
1140 struct timer_list *timer = &dwork->timer;
1141 struct work_struct *work = &dwork->work; 1416 struct work_struct *work = &dwork->work;
1417 bool ret = false;
1418 unsigned long flags;
1142 1419
1143 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { 1420 if (!delay)
1144 unsigned int lcpu; 1421 return queue_work_on(cpu, wq, &dwork->work);
1145 1422
1146 BUG_ON(timer_pending(timer)); 1423 /* read the comment in __queue_work() */
1147 BUG_ON(!list_empty(&work->entry)); 1424 local_irq_save(flags);
1148 1425
1149 timer_stats_timer_set_start_info(&dwork->timer); 1426 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1427 __queue_delayed_work(cpu, wq, dwork, delay);
1428 ret = true;
1429 }
1150 1430
1151 /* 1431 local_irq_restore(flags);
1152 * This stores cwq for the moment, for the timer_fn. 1432 return ret;
1153 * Note that the work's gcwq is preserved to allow 1433}
1154 * reentrance detection for delayed works. 1434EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1155 */
1156 if (!(wq->flags & WQ_UNBOUND)) {
1157 struct global_cwq *gcwq = get_work_gcwq(work);
1158 1435
1159 if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) 1436/**
1160 lcpu = gcwq->cpu; 1437 * queue_delayed_work - queue work on a workqueue after delay
1161 else 1438 * @wq: workqueue to use
1162 lcpu = raw_smp_processor_id(); 1439 * @dwork: delayable work to queue
1163 } else 1440 * @delay: number of jiffies to wait before queueing
1164 lcpu = WORK_CPU_UNBOUND; 1441 *
1442 * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
1443 */
1444bool queue_delayed_work(struct workqueue_struct *wq,
1445 struct delayed_work *dwork, unsigned long delay)
1446{
1447 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1448}
1449EXPORT_SYMBOL_GPL(queue_delayed_work);
1165 1450
1166 set_work_cwq(work, get_cwq(lcpu, wq), 0); 1451/**
1452 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1453 * @cpu: CPU number to execute work on
1454 * @wq: workqueue to use
1455 * @dwork: work to queue
1456 * @delay: number of jiffies to wait before queueing
1457 *
1458 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1459 * modify @dwork's timer so that it expires after @delay. If @delay is
1460 * zero, @work is guaranteed to be scheduled immediately regardless of its
1461 * current state.
1462 *
1463 * Returns %false if @dwork was idle and queued, %true if @dwork was
1464 * pending and its timer was modified.
1465 *
1466 * This function is safe to call from any context including IRQ handler.
1467 * See try_to_grab_pending() for details.
1468 */
1469bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1470 struct delayed_work *dwork, unsigned long delay)
1471{
1472 unsigned long flags;
1473 int ret;
1167 1474
1168 timer->expires = jiffies + delay; 1475 do {
1169 timer->data = (unsigned long)dwork; 1476 ret = try_to_grab_pending(&dwork->work, true, &flags);
1170 timer->function = delayed_work_timer_fn; 1477 } while (unlikely(ret == -EAGAIN));
1171 1478
1172 if (unlikely(cpu >= 0)) 1479 if (likely(ret >= 0)) {
1173 add_timer_on(timer, cpu); 1480 __queue_delayed_work(cpu, wq, dwork, delay);
1174 else 1481 local_irq_restore(flags);
1175 add_timer(timer);
1176 ret = 1;
1177 } 1482 }
1483
1484 /* -ENOENT from try_to_grab_pending() becomes %true */
1178 return ret; 1485 return ret;
1179} 1486}
1180EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1487EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1488
1489/**
1490 * mod_delayed_work - modify delay of or queue a delayed work
1491 * @wq: workqueue to use
1492 * @dwork: work to queue
1493 * @delay: number of jiffies to wait before queueing
1494 *
1495 * mod_delayed_work_on() on local CPU.
1496 */
1497bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
1498 unsigned long delay)
1499{
1500 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
1501}
1502EXPORT_SYMBOL_GPL(mod_delayed_work);
1181 1503
1182/** 1504/**
1183 * worker_enter_idle - enter idle state 1505 * worker_enter_idle - enter idle state
@@ -1305,37 +1627,21 @@ __acquires(&gcwq->lock)
1305 } 1627 }
1306} 1628}
1307 1629
1308struct idle_rebind {
1309 int cnt; /* # workers to be rebound */
1310 struct completion done; /* all workers rebound */
1311};
1312
1313/* 1630/*
1314 * Rebind an idle @worker to its CPU. During CPU onlining, this has to 1631 * Rebind an idle @worker to its CPU. worker_thread() will test
1315 * happen synchronously for idle workers. worker_thread() will test 1632 * list_empty(@worker->entry) before leaving idle and call this function.
1316 * %WORKER_REBIND before leaving idle and call this function.
1317 */ 1633 */
1318static void idle_worker_rebind(struct worker *worker) 1634static void idle_worker_rebind(struct worker *worker)
1319{ 1635{
1320 struct global_cwq *gcwq = worker->pool->gcwq; 1636 struct global_cwq *gcwq = worker->pool->gcwq;
1321 1637
1322 /* CPU must be online at this point */ 1638 /* CPU may go down again inbetween, clear UNBOUND only on success */
1323 WARN_ON(!worker_maybe_bind_and_lock(worker)); 1639 if (worker_maybe_bind_and_lock(worker))
1324 if (!--worker->idle_rebind->cnt) 1640 worker_clr_flags(worker, WORKER_UNBOUND);
1325 complete(&worker->idle_rebind->done);
1326 spin_unlock_irq(&worker->pool->gcwq->lock);
1327 1641
1328 /* we did our part, wait for rebind_workers() to finish up */ 1642 /* rebind complete, become available again */
1329 wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); 1643 list_add(&worker->entry, &worker->pool->idle_list);
1330 1644 spin_unlock_irq(&gcwq->lock);
1331 /*
1332 * rebind_workers() shouldn't finish until all workers passed the
1333 * above WORKER_REBIND wait. Tell it when done.
1334 */
1335 spin_lock_irq(&worker->pool->gcwq->lock);
1336 if (!--worker->idle_rebind->cnt)
1337 complete(&worker->idle_rebind->done);
1338 spin_unlock_irq(&worker->pool->gcwq->lock);
1339} 1645}
1340 1646
1341/* 1647/*
@@ -1349,16 +1655,8 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1349 struct worker *worker = container_of(work, struct worker, rebind_work); 1655 struct worker *worker = container_of(work, struct worker, rebind_work);
1350 struct global_cwq *gcwq = worker->pool->gcwq; 1656 struct global_cwq *gcwq = worker->pool->gcwq;
1351 1657
1352 worker_maybe_bind_and_lock(worker); 1658 if (worker_maybe_bind_and_lock(worker))
1353 1659 worker_clr_flags(worker, WORKER_UNBOUND);
1354 /*
1355 * %WORKER_REBIND must be cleared even if the above binding failed;
1356 * otherwise, we may confuse the next CPU_UP cycle or oops / get
1357 * stuck by calling idle_worker_rebind() prematurely. If CPU went
1358 * down again inbetween, %WORKER_UNBOUND would be set, so clearing
1359 * %WORKER_REBIND is always safe.
1360 */
1361 worker_clr_flags(worker, WORKER_REBIND);
1362 1660
1363 spin_unlock_irq(&gcwq->lock); 1661 spin_unlock_irq(&gcwq->lock);
1364} 1662}
@@ -1370,123 +1668,74 @@ static void busy_worker_rebind_fn(struct work_struct *work)
1370 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding 1668 * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
1371 * is different for idle and busy ones. 1669 * is different for idle and busy ones.
1372 * 1670 *
1373 * The idle ones should be rebound synchronously and idle rebinding should 1671 * Idle ones will be removed from the idle_list and woken up. They will
1374 * be complete before any worker starts executing work items with 1672 * add themselves back after completing rebind. This ensures that the
1375 * concurrency management enabled; otherwise, scheduler may oops trying to 1673 * idle_list doesn't contain any unbound workers when re-bound busy workers
1376 * wake up non-local idle worker from wq_worker_sleeping(). 1674 * try to perform local wake-ups for concurrency management.
1377 * 1675 *
1378 * This is achieved by repeatedly requesting rebinding until all idle 1676 * Busy workers can rebind after they finish their current work items.
1379 * workers are known to have been rebound under @gcwq->lock and holding all 1677 * Queueing the rebind work item at the head of the scheduled list is
1380 * idle workers from becoming busy until idle rebinding is complete. 1678 * enough. Note that nr_running will be properly bumped as busy workers
1679 * rebind.
1381 * 1680 *
1382 * Once idle workers are rebound, busy workers can be rebound as they 1681 * On return, all non-manager workers are scheduled for rebind - see
1383 * finish executing their current work items. Queueing the rebind work at 1682 * manage_workers() for the manager special case. Any idle worker
1384 * the head of their scheduled lists is enough. Note that nr_running will 1683 * including the manager will not appear on @idle_list until rebind is
1385 * be properbly bumped as busy workers rebind. 1684 * complete, making local wake-ups safe.
1386 *
1387 * On return, all workers are guaranteed to either be bound or have rebind
1388 * work item scheduled.
1389 */ 1685 */
1390static void rebind_workers(struct global_cwq *gcwq) 1686static void rebind_workers(struct global_cwq *gcwq)
1391 __releases(&gcwq->lock) __acquires(&gcwq->lock)
1392{ 1687{
1393 struct idle_rebind idle_rebind;
1394 struct worker_pool *pool; 1688 struct worker_pool *pool;
1395 struct worker *worker; 1689 struct worker *worker, *n;
1396 struct hlist_node *pos; 1690 struct hlist_node *pos;
1397 int i; 1691 int i;
1398 1692
1399 lockdep_assert_held(&gcwq->lock); 1693 lockdep_assert_held(&gcwq->lock);
1400 1694
1401 for_each_worker_pool(pool, gcwq) 1695 for_each_worker_pool(pool, gcwq)
1402 lockdep_assert_held(&pool->manager_mutex); 1696 lockdep_assert_held(&pool->assoc_mutex);
1403 1697
1404 /* 1698 /* dequeue and kick idle ones */
1405 * Rebind idle workers. Interlocked both ways. We wait for
1406 * workers to rebind via @idle_rebind.done. Workers will wait for
1407 * us to finish up by watching %WORKER_REBIND.
1408 */
1409 init_completion(&idle_rebind.done);
1410retry:
1411 idle_rebind.cnt = 1;
1412 INIT_COMPLETION(idle_rebind.done);
1413
1414 /* set REBIND and kick idle ones, we'll wait for these later */
1415 for_each_worker_pool(pool, gcwq) { 1699 for_each_worker_pool(pool, gcwq) {
1416 list_for_each_entry(worker, &pool->idle_list, entry) { 1700 list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
1417 unsigned long worker_flags = worker->flags; 1701 /*
1418 1702 * idle workers should be off @pool->idle_list
1419 if (worker->flags & WORKER_REBIND) 1703 * until rebind is complete to avoid receiving
1420 continue; 1704 * premature local wake-ups.
1421 1705 */
1422 /* morph UNBOUND to REBIND atomically */ 1706 list_del_init(&worker->entry);
1423 worker_flags &= ~WORKER_UNBOUND;
1424 worker_flags |= WORKER_REBIND;
1425 ACCESS_ONCE(worker->flags) = worker_flags;
1426
1427 idle_rebind.cnt++;
1428 worker->idle_rebind = &idle_rebind;
1429 1707
1430 /* worker_thread() will call idle_worker_rebind() */ 1708 /*
1709 * worker_thread() will see the above dequeuing
1710 * and call idle_worker_rebind().
1711 */
1431 wake_up_process(worker->task); 1712 wake_up_process(worker->task);
1432 } 1713 }
1433 } 1714 }
1434 1715
1435 if (--idle_rebind.cnt) { 1716 /* rebind busy workers */
1436 spin_unlock_irq(&gcwq->lock);
1437 wait_for_completion(&idle_rebind.done);
1438 spin_lock_irq(&gcwq->lock);
1439 /* busy ones might have become idle while waiting, retry */
1440 goto retry;
1441 }
1442
1443 /* all idle workers are rebound, rebind busy workers */
1444 for_each_busy_worker(worker, i, pos, gcwq) { 1717 for_each_busy_worker(worker, i, pos, gcwq) {
1445 struct work_struct *rebind_work = &worker->rebind_work; 1718 struct work_struct *rebind_work = &worker->rebind_work;
1446 unsigned long worker_flags = worker->flags; 1719 struct workqueue_struct *wq;
1447
1448 /* morph UNBOUND to REBIND atomically */
1449 worker_flags &= ~WORKER_UNBOUND;
1450 worker_flags |= WORKER_REBIND;
1451 ACCESS_ONCE(worker->flags) = worker_flags;
1452 1720
1453 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 1721 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
1454 work_data_bits(rebind_work))) 1722 work_data_bits(rebind_work)))
1455 continue; 1723 continue;
1456 1724
1457 /* wq doesn't matter, use the default one */
1458 debug_work_activate(rebind_work); 1725 debug_work_activate(rebind_work);
1459 insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
1460 worker->scheduled.next,
1461 work_color_to_flags(WORK_NO_COLOR));
1462 }
1463
1464 /*
1465 * All idle workers are rebound and waiting for %WORKER_REBIND to
1466 * be cleared inside idle_worker_rebind(). Clear and release.
1467 * Clearing %WORKER_REBIND from this foreign context is safe
1468 * because these workers are still guaranteed to be idle.
1469 *
1470 * We need to make sure all idle workers passed WORKER_REBIND wait
1471 * in idle_worker_rebind() before returning; otherwise, workers can
1472 * get stuck at the wait if hotplug cycle repeats.
1473 */
1474 idle_rebind.cnt = 1;
1475 INIT_COMPLETION(idle_rebind.done);
1476
1477 for_each_worker_pool(pool, gcwq) {
1478 list_for_each_entry(worker, &pool->idle_list, entry) {
1479 worker->flags &= ~WORKER_REBIND;
1480 idle_rebind.cnt++;
1481 }
1482 }
1483 1726
1484 wake_up_all(&gcwq->rebind_hold); 1727 /*
1728 * wq doesn't really matter but let's keep @worker->pool
1729 * and @cwq->pool consistent for sanity.
1730 */
1731 if (worker_pool_pri(worker->pool))
1732 wq = system_highpri_wq;
1733 else
1734 wq = system_wq;
1485 1735
1486 if (--idle_rebind.cnt) { 1736 insert_work(get_cwq(gcwq->cpu, wq), rebind_work,
1487 spin_unlock_irq(&gcwq->lock); 1737 worker->scheduled.next,
1488 wait_for_completion(&idle_rebind.done); 1738 work_color_to_flags(WORK_NO_COLOR));
1489 spin_lock_irq(&gcwq->lock);
1490 } 1739 }
1491} 1740}
1492 1741
@@ -1844,22 +2093,22 @@ static bool manage_workers(struct worker *worker)
1844 * grab %POOL_MANAGING_WORKERS to achieve this because that can 2093 * grab %POOL_MANAGING_WORKERS to achieve this because that can
1845 * lead to idle worker depletion (all become busy thinking someone 2094 * lead to idle worker depletion (all become busy thinking someone
1846 * else is managing) which in turn can result in deadlock under 2095 * else is managing) which in turn can result in deadlock under
1847 * extreme circumstances. Use @pool->manager_mutex to synchronize 2096 * extreme circumstances. Use @pool->assoc_mutex to synchronize
1848 * manager against CPU hotplug. 2097 * manager against CPU hotplug.
1849 * 2098 *
1850 * manager_mutex would always be free unless CPU hotplug is in 2099 * assoc_mutex would always be free unless CPU hotplug is in
1851 * progress. trylock first without dropping @gcwq->lock. 2100 * progress. trylock first without dropping @gcwq->lock.
1852 */ 2101 */
1853 if (unlikely(!mutex_trylock(&pool->manager_mutex))) { 2102 if (unlikely(!mutex_trylock(&pool->assoc_mutex))) {
1854 spin_unlock_irq(&pool->gcwq->lock); 2103 spin_unlock_irq(&pool->gcwq->lock);
1855 mutex_lock(&pool->manager_mutex); 2104 mutex_lock(&pool->assoc_mutex);
1856 /* 2105 /*
1857 * CPU hotplug could have happened while we were waiting 2106 * CPU hotplug could have happened while we were waiting
1858 * for manager_mutex. Hotplug itself can't handle us 2107 * for assoc_mutex. Hotplug itself can't handle us
1859 * because manager isn't either on idle or busy list, and 2108 * because manager isn't either on idle or busy list, and
1860 * @gcwq's state and ours could have deviated. 2109 * @gcwq's state and ours could have deviated.
1861 * 2110 *
1862 * As hotplug is now excluded via manager_mutex, we can 2111 * As hotplug is now excluded via assoc_mutex, we can
1863 * simply try to bind. It will succeed or fail depending 2112 * simply try to bind. It will succeed or fail depending
1864 * on @gcwq's current state. Try it and adjust 2113 * on @gcwq's current state. Try it and adjust
1865 * %WORKER_UNBOUND accordingly. 2114 * %WORKER_UNBOUND accordingly.
@@ -1882,112 +2131,11 @@ static bool manage_workers(struct worker *worker)
1882 ret |= maybe_create_worker(pool); 2131 ret |= maybe_create_worker(pool);
1883 2132
1884 pool->flags &= ~POOL_MANAGING_WORKERS; 2133 pool->flags &= ~POOL_MANAGING_WORKERS;
1885 mutex_unlock(&pool->manager_mutex); 2134 mutex_unlock(&pool->assoc_mutex);
1886 return ret; 2135 return ret;
1887} 2136}
1888 2137
1889/** 2138/**
1890 * move_linked_works - move linked works to a list
1891 * @work: start of series of works to be scheduled
1892 * @head: target list to append @work to
1893 * @nextp: out paramter for nested worklist walking
1894 *
1895 * Schedule linked works starting from @work to @head. Work series to
1896 * be scheduled starts at @work and includes any consecutive work with
1897 * WORK_STRUCT_LINKED set in its predecessor.
1898 *
1899 * If @nextp is not NULL, it's updated to point to the next work of
1900 * the last scheduled work. This allows move_linked_works() to be
1901 * nested inside outer list_for_each_entry_safe().
1902 *
1903 * CONTEXT:
1904 * spin_lock_irq(gcwq->lock).
1905 */
1906static void move_linked_works(struct work_struct *work, struct list_head *head,
1907 struct work_struct **nextp)
1908{
1909 struct work_struct *n;
1910
1911 /*
1912 * Linked worklist will always end before the end of the list,
1913 * use NULL for list head.
1914 */
1915 list_for_each_entry_safe_from(work, n, NULL, entry) {
1916 list_move_tail(&work->entry, head);
1917 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1918 break;
1919 }
1920
1921 /*
1922 * If we're already inside safe list traversal and have moved
1923 * multiple works to the scheduled queue, the next position
1924 * needs to be updated.
1925 */
1926 if (nextp)
1927 *nextp = n;
1928}
1929
1930static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1931{
1932 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1933 struct work_struct, entry);
1934
1935 trace_workqueue_activate_work(work);
1936 move_linked_works(work, &cwq->pool->worklist, NULL);
1937 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1938 cwq->nr_active++;
1939}
1940
1941/**
1942 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1943 * @cwq: cwq of interest
1944 * @color: color of work which left the queue
1945 * @delayed: for a delayed work
1946 *
1947 * A work either has completed or is removed from pending queue,
1948 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1949 *
1950 * CONTEXT:
1951 * spin_lock_irq(gcwq->lock).
1952 */
1953static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1954 bool delayed)
1955{
1956 /* ignore uncolored works */
1957 if (color == WORK_NO_COLOR)
1958 return;
1959
1960 cwq->nr_in_flight[color]--;
1961
1962 if (!delayed) {
1963 cwq->nr_active--;
1964 if (!list_empty(&cwq->delayed_works)) {
1965 /* one down, submit a delayed one */
1966 if (cwq->nr_active < cwq->max_active)
1967 cwq_activate_first_delayed(cwq);
1968 }
1969 }
1970
1971 /* is flush in progress and are we at the flushing tip? */
1972 if (likely(cwq->flush_color != color))
1973 return;
1974
1975 /* are there still in-flight works? */
1976 if (cwq->nr_in_flight[color])
1977 return;
1978
1979 /* this cwq is done, clear flush_color */
1980 cwq->flush_color = -1;
1981
1982 /*
1983 * If this was the last cwq, wake up the first flusher. It
1984 * will handle the rest.
1985 */
1986 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1987 complete(&cwq->wq->first_flusher->done);
1988}
1989
1990/**
1991 * process_one_work - process single work 2139 * process_one_work - process single work
1992 * @worker: self 2140 * @worker: self
1993 * @work: work to process 2141 * @work: work to process
@@ -2030,7 +2178,7 @@ __acquires(&gcwq->lock)
2030 * necessary to avoid spurious warnings from rescuers servicing the 2178 * necessary to avoid spurious warnings from rescuers servicing the
2031 * unbound or a disassociated gcwq. 2179 * unbound or a disassociated gcwq.
2032 */ 2180 */
2033 WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) && 2181 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2034 !(gcwq->flags & GCWQ_DISASSOCIATED) && 2182 !(gcwq->flags & GCWQ_DISASSOCIATED) &&
2035 raw_smp_processor_id() != gcwq->cpu); 2183 raw_smp_processor_id() != gcwq->cpu);
2036 2184
@@ -2046,15 +2194,13 @@ __acquires(&gcwq->lock)
2046 return; 2194 return;
2047 } 2195 }
2048 2196
2049 /* claim and process */ 2197 /* claim and dequeue */
2050 debug_work_deactivate(work); 2198 debug_work_deactivate(work);
2051 hlist_add_head(&worker->hentry, bwh); 2199 hlist_add_head(&worker->hentry, bwh);
2052 worker->current_work = work; 2200 worker->current_work = work;
2053 worker->current_cwq = cwq; 2201 worker->current_cwq = cwq;
2054 work_color = get_work_color(work); 2202 work_color = get_work_color(work);
2055 2203
2056 /* record the current cpu number in the work data and dequeue */
2057 set_work_cpu(work, gcwq->cpu);
2058 list_del_init(&work->entry); 2204 list_del_init(&work->entry);
2059 2205
2060 /* 2206 /*
@@ -2071,9 +2217,16 @@ __acquires(&gcwq->lock)
2071 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2217 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
2072 wake_up_worker(pool); 2218 wake_up_worker(pool);
2073 2219
2220 /*
2221 * Record the last CPU and clear PENDING which should be the last
2222 * update to @work. Also, do this inside @gcwq->lock so that
2223 * PENDING and queued state changes happen together while IRQ is
2224 * disabled.
2225 */
2226 set_work_cpu_and_clear_pending(work, gcwq->cpu);
2227
2074 spin_unlock_irq(&gcwq->lock); 2228 spin_unlock_irq(&gcwq->lock);
2075 2229
2076 work_clear_pending(work);
2077 lock_map_acquire_read(&cwq->wq->lockdep_map); 2230 lock_map_acquire_read(&cwq->wq->lockdep_map);
2078 lock_map_acquire(&lockdep_map); 2231 lock_map_acquire(&lockdep_map);
2079 trace_workqueue_execute_start(work); 2232 trace_workqueue_execute_start(work);
@@ -2087,11 +2240,9 @@ __acquires(&gcwq->lock)
2087 lock_map_release(&cwq->wq->lockdep_map); 2240 lock_map_release(&cwq->wq->lockdep_map);
2088 2241
2089 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 2242 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2090 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 2243 pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2091 "%s/0x%08x/%d\n", 2244 " last function: %pf\n",
2092 current->comm, preempt_count(), task_pid_nr(current)); 2245 current->comm, preempt_count(), task_pid_nr(current), f);
2093 printk(KERN_ERR " last function: ");
2094 print_symbol("%s\n", (unsigned long)f);
2095 debug_show_held_locks(current); 2246 debug_show_held_locks(current);
2096 dump_stack(); 2247 dump_stack();
2097 } 2248 }
@@ -2106,7 +2257,7 @@ __acquires(&gcwq->lock)
2106 hlist_del_init(&worker->hentry); 2257 hlist_del_init(&worker->hentry);
2107 worker->current_work = NULL; 2258 worker->current_work = NULL;
2108 worker->current_cwq = NULL; 2259 worker->current_cwq = NULL;
2109 cwq_dec_nr_in_flight(cwq, work_color, false); 2260 cwq_dec_nr_in_flight(cwq, work_color);
2110} 2261}
2111 2262
2112/** 2263/**
@@ -2151,18 +2302,17 @@ static int worker_thread(void *__worker)
2151woke_up: 2302woke_up:
2152 spin_lock_irq(&gcwq->lock); 2303 spin_lock_irq(&gcwq->lock);
2153 2304
2154 /* 2305 /* we are off idle list if destruction or rebind is requested */
2155 * DIE can be set only while idle and REBIND set while busy has 2306 if (unlikely(list_empty(&worker->entry))) {
2156 * @worker->rebind_work scheduled. Checking here is enough.
2157 */
2158 if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
2159 spin_unlock_irq(&gcwq->lock); 2307 spin_unlock_irq(&gcwq->lock);
2160 2308
2309 /* if DIE is set, destruction is requested */
2161 if (worker->flags & WORKER_DIE) { 2310 if (worker->flags & WORKER_DIE) {
2162 worker->task->flags &= ~PF_WQ_WORKER; 2311 worker->task->flags &= ~PF_WQ_WORKER;
2163 return 0; 2312 return 0;
2164 } 2313 }
2165 2314
2315 /* otherwise, rebind */
2166 idle_worker_rebind(worker); 2316 idle_worker_rebind(worker);
2167 goto woke_up; 2317 goto woke_up;
2168 } 2318 }
@@ -2645,8 +2795,8 @@ reflush:
2645 2795
2646 if (++flush_cnt == 10 || 2796 if (++flush_cnt == 10 ||
2647 (flush_cnt % 100 == 0 && flush_cnt <= 1000)) 2797 (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2648 pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n", 2798 pr_warn("workqueue %s: flush on destruction isn't complete after %u tries\n",
2649 wq->name, flush_cnt); 2799 wq->name, flush_cnt);
2650 goto reflush; 2800 goto reflush;
2651 } 2801 }
2652 2802
@@ -2657,8 +2807,7 @@ reflush:
2657} 2807}
2658EXPORT_SYMBOL_GPL(drain_workqueue); 2808EXPORT_SYMBOL_GPL(drain_workqueue);
2659 2809
2660static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, 2810static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2661 bool wait_executing)
2662{ 2811{
2663 struct worker *worker = NULL; 2812 struct worker *worker = NULL;
2664 struct global_cwq *gcwq; 2813 struct global_cwq *gcwq;
@@ -2680,13 +2829,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2680 cwq = get_work_cwq(work); 2829 cwq = get_work_cwq(work);
2681 if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) 2830 if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
2682 goto already_gone; 2831 goto already_gone;
2683 } else if (wait_executing) { 2832 } else {
2684 worker = find_worker_executing_work(gcwq, work); 2833 worker = find_worker_executing_work(gcwq, work);
2685 if (!worker) 2834 if (!worker)
2686 goto already_gone; 2835 goto already_gone;
2687 cwq = worker->current_cwq; 2836 cwq = worker->current_cwq;
2688 } else 2837 }
2689 goto already_gone;
2690 2838
2691 insert_wq_barrier(cwq, barr, work, worker); 2839 insert_wq_barrier(cwq, barr, work, worker);
2692 spin_unlock_irq(&gcwq->lock); 2840 spin_unlock_irq(&gcwq->lock);
@@ -2713,15 +2861,8 @@ already_gone:
2713 * flush_work - wait for a work to finish executing the last queueing instance 2861 * flush_work - wait for a work to finish executing the last queueing instance
2714 * @work: the work to flush 2862 * @work: the work to flush
2715 * 2863 *
2716 * Wait until @work has finished execution. This function considers 2864 * Wait until @work has finished execution. @work is guaranteed to be idle
2717 * only the last queueing instance of @work. If @work has been 2865 * on return if it hasn't been requeued since flush started.
2718 * enqueued across different CPUs on a non-reentrant workqueue or on
2719 * multiple workqueues, @work might still be executing on return on
2720 * some of the CPUs from earlier queueing.
2721 *
2722 * If @work was queued only on a non-reentrant, ordered or unbound
2723 * workqueue, @work is guaranteed to be idle on return if it hasn't
2724 * been requeued since flush started.
2725 * 2866 *
2726 * RETURNS: 2867 * RETURNS:
2727 * %true if flush_work() waited for the work to finish execution, 2868 * %true if flush_work() waited for the work to finish execution,
@@ -2734,140 +2875,36 @@ bool flush_work(struct work_struct *work)
2734 lock_map_acquire(&work->lockdep_map); 2875 lock_map_acquire(&work->lockdep_map);
2735 lock_map_release(&work->lockdep_map); 2876 lock_map_release(&work->lockdep_map);
2736 2877
2737 if (start_flush_work(work, &barr, true)) { 2878 if (start_flush_work(work, &barr)) {
2738 wait_for_completion(&barr.done); 2879 wait_for_completion(&barr.done);
2739 destroy_work_on_stack(&barr.work); 2880 destroy_work_on_stack(&barr.work);
2740 return true; 2881 return true;
2741 } else 2882 } else {
2742 return false;
2743}
2744EXPORT_SYMBOL_GPL(flush_work);
2745
2746static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2747{
2748 struct wq_barrier barr;
2749 struct worker *worker;
2750
2751 spin_lock_irq(&gcwq->lock);
2752
2753 worker = find_worker_executing_work(gcwq, work);
2754 if (unlikely(worker))
2755 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2756
2757 spin_unlock_irq(&gcwq->lock);
2758
2759 if (unlikely(worker)) {
2760 wait_for_completion(&barr.done);
2761 destroy_work_on_stack(&barr.work);
2762 return true;
2763 } else
2764 return false; 2883 return false;
2765}
2766
2767static bool wait_on_work(struct work_struct *work)
2768{
2769 bool ret = false;
2770 int cpu;
2771
2772 might_sleep();
2773
2774 lock_map_acquire(&work->lockdep_map);
2775 lock_map_release(&work->lockdep_map);
2776
2777 for_each_gcwq_cpu(cpu)
2778 ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2779 return ret;
2780}
2781
2782/**
2783 * flush_work_sync - wait until a work has finished execution
2784 * @work: the work to flush
2785 *
2786 * Wait until @work has finished execution. On return, it's
2787 * guaranteed that all queueing instances of @work which happened
2788 * before this function is called are finished. In other words, if
2789 * @work hasn't been requeued since this function was called, @work is
2790 * guaranteed to be idle on return.
2791 *
2792 * RETURNS:
2793 * %true if flush_work_sync() waited for the work to finish execution,
2794 * %false if it was already idle.
2795 */
2796bool flush_work_sync(struct work_struct *work)
2797{
2798 struct wq_barrier barr;
2799 bool pending, waited;
2800
2801 /* we'll wait for executions separately, queue barr only if pending */
2802 pending = start_flush_work(work, &barr, false);
2803
2804 /* wait for executions to finish */
2805 waited = wait_on_work(work);
2806
2807 /* wait for the pending one */
2808 if (pending) {
2809 wait_for_completion(&barr.done);
2810 destroy_work_on_stack(&barr.work);
2811 } 2884 }
2812
2813 return pending || waited;
2814}
2815EXPORT_SYMBOL_GPL(flush_work_sync);
2816
2817/*
2818 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2819 * so this work can't be re-armed in any way.
2820 */
2821static int try_to_grab_pending(struct work_struct *work)
2822{
2823 struct global_cwq *gcwq;
2824 int ret = -1;
2825
2826 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2827 return 0;
2828
2829 /*
2830 * The queueing is in progress, or it is already queued. Try to
2831 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2832 */
2833 gcwq = get_work_gcwq(work);
2834 if (!gcwq)
2835 return ret;
2836
2837 spin_lock_irq(&gcwq->lock);
2838 if (!list_empty(&work->entry)) {
2839 /*
2840 * This work is queued, but perhaps we locked the wrong gcwq.
2841 * In that case we must see the new value after rmb(), see
2842 * insert_work()->wmb().
2843 */
2844 smp_rmb();
2845 if (gcwq == get_work_gcwq(work)) {
2846 debug_work_deactivate(work);
2847 list_del_init(&work->entry);
2848 cwq_dec_nr_in_flight(get_work_cwq(work),
2849 get_work_color(work),
2850 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2851 ret = 1;
2852 }
2853 }
2854 spin_unlock_irq(&gcwq->lock);
2855
2856 return ret;
2857} 2885}
2886EXPORT_SYMBOL_GPL(flush_work);
2858 2887
2859static bool __cancel_work_timer(struct work_struct *work, 2888static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2860 struct timer_list* timer)
2861{ 2889{
2890 unsigned long flags;
2862 int ret; 2891 int ret;
2863 2892
2864 do { 2893 do {
2865 ret = (timer && likely(del_timer(timer))); 2894 ret = try_to_grab_pending(work, is_dwork, &flags);
2866 if (!ret) 2895 /*
2867 ret = try_to_grab_pending(work); 2896 * If someone else is canceling, wait for the same event it
2868 wait_on_work(work); 2897 * would be waiting for before retrying.
2898 */
2899 if (unlikely(ret == -ENOENT))
2900 flush_work(work);
2869 } while (unlikely(ret < 0)); 2901 } while (unlikely(ret < 0));
2870 2902
2903 /* tell other tasks trying to grab @work to back off */
2904 mark_work_canceling(work);
2905 local_irq_restore(flags);
2906
2907 flush_work(work);
2871 clear_work_data(work); 2908 clear_work_data(work);
2872 return ret; 2909 return ret;
2873} 2910}
@@ -2892,7 +2929,7 @@ static bool __cancel_work_timer(struct work_struct *work,
2892 */ 2929 */
2893bool cancel_work_sync(struct work_struct *work) 2930bool cancel_work_sync(struct work_struct *work)
2894{ 2931{
2895 return __cancel_work_timer(work, NULL); 2932 return __cancel_work_timer(work, false);
2896} 2933}
2897EXPORT_SYMBOL_GPL(cancel_work_sync); 2934EXPORT_SYMBOL_GPL(cancel_work_sync);
2898 2935
@@ -2910,33 +2947,44 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
2910 */ 2947 */
2911bool flush_delayed_work(struct delayed_work *dwork) 2948bool flush_delayed_work(struct delayed_work *dwork)
2912{ 2949{
2950 local_irq_disable();
2913 if (del_timer_sync(&dwork->timer)) 2951 if (del_timer_sync(&dwork->timer))
2914 __queue_work(raw_smp_processor_id(), 2952 __queue_work(dwork->cpu,
2915 get_work_cwq(&dwork->work)->wq, &dwork->work); 2953 get_work_cwq(&dwork->work)->wq, &dwork->work);
2954 local_irq_enable();
2916 return flush_work(&dwork->work); 2955 return flush_work(&dwork->work);
2917} 2956}
2918EXPORT_SYMBOL(flush_delayed_work); 2957EXPORT_SYMBOL(flush_delayed_work);
2919 2958
2920/** 2959/**
2921 * flush_delayed_work_sync - wait for a dwork to finish 2960 * cancel_delayed_work - cancel a delayed work
2922 * @dwork: the delayed work to flush 2961 * @dwork: delayed_work to cancel
2923 * 2962 *
2924 * Delayed timer is cancelled and the pending work is queued for 2963 * Kill off a pending delayed_work. Returns %true if @dwork was pending
2925 * execution immediately. Other than timer handling, its behavior 2964 * and canceled; %false if wasn't pending. Note that the work callback
2926 * is identical to flush_work_sync(). 2965 * function may still be running on return, unless it returns %true and the
2966 * work doesn't re-arm itself. Explicitly flush or use
2967 * cancel_delayed_work_sync() to wait on it.
2927 * 2968 *
2928 * RETURNS: 2969 * This function is safe to call from any context including IRQ handler.
2929 * %true if flush_work_sync() waited for the work to finish execution,
2930 * %false if it was already idle.
2931 */ 2970 */
2932bool flush_delayed_work_sync(struct delayed_work *dwork) 2971bool cancel_delayed_work(struct delayed_work *dwork)
2933{ 2972{
2934 if (del_timer_sync(&dwork->timer)) 2973 unsigned long flags;
2935 __queue_work(raw_smp_processor_id(), 2974 int ret;
2936 get_work_cwq(&dwork->work)->wq, &dwork->work); 2975
2937 return flush_work_sync(&dwork->work); 2976 do {
2977 ret = try_to_grab_pending(&dwork->work, true, &flags);
2978 } while (unlikely(ret == -EAGAIN));
2979
2980 if (unlikely(ret < 0))
2981 return false;
2982
2983 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
2984 local_irq_restore(flags);
2985 return true;
2938} 2986}
2939EXPORT_SYMBOL(flush_delayed_work_sync); 2987EXPORT_SYMBOL(cancel_delayed_work);
2940 2988
2941/** 2989/**
2942 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish 2990 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
@@ -2949,54 +2997,39 @@ EXPORT_SYMBOL(flush_delayed_work_sync);
2949 */ 2997 */
2950bool cancel_delayed_work_sync(struct delayed_work *dwork) 2998bool cancel_delayed_work_sync(struct delayed_work *dwork)
2951{ 2999{
2952 return __cancel_work_timer(&dwork->work, &dwork->timer); 3000 return __cancel_work_timer(&dwork->work, true);
2953} 3001}
2954EXPORT_SYMBOL(cancel_delayed_work_sync); 3002EXPORT_SYMBOL(cancel_delayed_work_sync);
2955 3003
2956/** 3004/**
2957 * schedule_work - put work task in global workqueue
2958 * @work: job to be done
2959 *
2960 * Returns zero if @work was already on the kernel-global workqueue and
2961 * non-zero otherwise.
2962 *
2963 * This puts a job in the kernel-global workqueue if it was not already
2964 * queued and leaves it in the same position on the kernel-global
2965 * workqueue otherwise.
2966 */
2967int schedule_work(struct work_struct *work)
2968{
2969 return queue_work(system_wq, work);
2970}
2971EXPORT_SYMBOL(schedule_work);
2972
2973/*
2974 * schedule_work_on - put work task on a specific cpu 3005 * schedule_work_on - put work task on a specific cpu
2975 * @cpu: cpu to put the work task on 3006 * @cpu: cpu to put the work task on
2976 * @work: job to be done 3007 * @work: job to be done
2977 * 3008 *
2978 * This puts a job on a specific cpu 3009 * This puts a job on a specific cpu
2979 */ 3010 */
2980int schedule_work_on(int cpu, struct work_struct *work) 3011bool schedule_work_on(int cpu, struct work_struct *work)
2981{ 3012{
2982 return queue_work_on(cpu, system_wq, work); 3013 return queue_work_on(cpu, system_wq, work);
2983} 3014}
2984EXPORT_SYMBOL(schedule_work_on); 3015EXPORT_SYMBOL(schedule_work_on);
2985 3016
2986/** 3017/**
2987 * schedule_delayed_work - put work task in global workqueue after delay 3018 * schedule_work - put work task in global workqueue
2988 * @dwork: job to be done 3019 * @work: job to be done
2989 * @delay: number of jiffies to wait or 0 for immediate execution
2990 * 3020 *
2991 * After waiting for a given time this puts a job in the kernel-global 3021 * Returns %false if @work was already on the kernel-global workqueue and
2992 * workqueue. 3022 * %true otherwise.
3023 *
3024 * This puts a job in the kernel-global workqueue if it was not already
3025 * queued and leaves it in the same position on the kernel-global
3026 * workqueue otherwise.
2993 */ 3027 */
2994int schedule_delayed_work(struct delayed_work *dwork, 3028bool schedule_work(struct work_struct *work)
2995 unsigned long delay)
2996{ 3029{
2997 return queue_delayed_work(system_wq, dwork, delay); 3030 return queue_work(system_wq, work);
2998} 3031}
2999EXPORT_SYMBOL(schedule_delayed_work); 3032EXPORT_SYMBOL(schedule_work);
3000 3033
3001/** 3034/**
3002 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay 3035 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
@@ -3007,14 +3040,28 @@ EXPORT_SYMBOL(schedule_delayed_work);
3007 * After waiting for a given time this puts a job in the kernel-global 3040 * After waiting for a given time this puts a job in the kernel-global
3008 * workqueue on the specified CPU. 3041 * workqueue on the specified CPU.
3009 */ 3042 */
3010int schedule_delayed_work_on(int cpu, 3043bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3011 struct delayed_work *dwork, unsigned long delay) 3044 unsigned long delay)
3012{ 3045{
3013 return queue_delayed_work_on(cpu, system_wq, dwork, delay); 3046 return queue_delayed_work_on(cpu, system_wq, dwork, delay);
3014} 3047}
3015EXPORT_SYMBOL(schedule_delayed_work_on); 3048EXPORT_SYMBOL(schedule_delayed_work_on);
3016 3049
3017/** 3050/**
3051 * schedule_delayed_work - put work task in global workqueue after delay
3052 * @dwork: job to be done
3053 * @delay: number of jiffies to wait or 0 for immediate execution
3054 *
3055 * After waiting for a given time this puts a job in the kernel-global
3056 * workqueue.
3057 */
3058bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay)
3059{
3060 return queue_delayed_work(system_wq, dwork, delay);
3061}
3062EXPORT_SYMBOL(schedule_delayed_work);
3063
3064/**
3018 * schedule_on_each_cpu - execute a function synchronously on each online CPU 3065 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3019 * @func: the function to call 3066 * @func: the function to call
3020 * 3067 *
@@ -3161,9 +3208,8 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
3161 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE; 3208 int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3162 3209
3163 if (max_active < 1 || max_active > lim) 3210 if (max_active < 1 || max_active > lim)
3164 printk(KERN_WARNING "workqueue: max_active %d requested for %s " 3211 pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3165 "is out of range, clamping between %d and %d\n", 3212 max_active, name, 1, lim);
3166 max_active, name, 1, lim);
3167 3213
3168 return clamp_val(max_active, 1, lim); 3214 return clamp_val(max_active, 1, lim);
3169} 3215}
@@ -3319,6 +3365,26 @@ void destroy_workqueue(struct workqueue_struct *wq)
3319EXPORT_SYMBOL_GPL(destroy_workqueue); 3365EXPORT_SYMBOL_GPL(destroy_workqueue);
3320 3366
3321/** 3367/**
3368 * cwq_set_max_active - adjust max_active of a cwq
3369 * @cwq: target cpu_workqueue_struct
3370 * @max_active: new max_active value.
3371 *
3372 * Set @cwq->max_active to @max_active and activate delayed works if
3373 * increased.
3374 *
3375 * CONTEXT:
3376 * spin_lock_irq(gcwq->lock).
3377 */
3378static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active)
3379{
3380 cwq->max_active = max_active;
3381
3382 while (!list_empty(&cwq->delayed_works) &&
3383 cwq->nr_active < cwq->max_active)
3384 cwq_activate_first_delayed(cwq);
3385}
3386
3387/**
3322 * workqueue_set_max_active - adjust max_active of a workqueue 3388 * workqueue_set_max_active - adjust max_active of a workqueue
3323 * @wq: target workqueue 3389 * @wq: target workqueue
3324 * @max_active: new max_active value. 3390 * @max_active: new max_active value.
@@ -3345,7 +3411,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3345 3411
3346 if (!(wq->flags & WQ_FREEZABLE) || 3412 if (!(wq->flags & WQ_FREEZABLE) ||
3347 !(gcwq->flags & GCWQ_FREEZING)) 3413 !(gcwq->flags & GCWQ_FREEZING))
3348 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3414 cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active);
3349 3415
3350 spin_unlock_irq(&gcwq->lock); 3416 spin_unlock_irq(&gcwq->lock);
3351 } 3417 }
@@ -3440,23 +3506,23 @@ EXPORT_SYMBOL_GPL(work_busy);
3440 */ 3506 */
3441 3507
3442/* claim manager positions of all pools */ 3508/* claim manager positions of all pools */
3443static void gcwq_claim_management_and_lock(struct global_cwq *gcwq) 3509static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq)
3444{ 3510{
3445 struct worker_pool *pool; 3511 struct worker_pool *pool;
3446 3512
3447 for_each_worker_pool(pool, gcwq) 3513 for_each_worker_pool(pool, gcwq)
3448 mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools); 3514 mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools);
3449 spin_lock_irq(&gcwq->lock); 3515 spin_lock_irq(&gcwq->lock);
3450} 3516}
3451 3517
3452/* release manager positions */ 3518/* release manager positions */
3453static void gcwq_release_management_and_unlock(struct global_cwq *gcwq) 3519static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq)
3454{ 3520{
3455 struct worker_pool *pool; 3521 struct worker_pool *pool;
3456 3522
3457 spin_unlock_irq(&gcwq->lock); 3523 spin_unlock_irq(&gcwq->lock);
3458 for_each_worker_pool(pool, gcwq) 3524 for_each_worker_pool(pool, gcwq)
3459 mutex_unlock(&pool->manager_mutex); 3525 mutex_unlock(&pool->assoc_mutex);
3460} 3526}
3461 3527
3462static void gcwq_unbind_fn(struct work_struct *work) 3528static void gcwq_unbind_fn(struct work_struct *work)
@@ -3469,7 +3535,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3469 3535
3470 BUG_ON(gcwq->cpu != smp_processor_id()); 3536 BUG_ON(gcwq->cpu != smp_processor_id());
3471 3537
3472 gcwq_claim_management_and_lock(gcwq); 3538 gcwq_claim_assoc_and_lock(gcwq);
3473 3539
3474 /* 3540 /*
3475 * We've claimed all manager positions. Make all workers unbound 3541 * We've claimed all manager positions. Make all workers unbound
@@ -3486,7 +3552,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3486 3552
3487 gcwq->flags |= GCWQ_DISASSOCIATED; 3553 gcwq->flags |= GCWQ_DISASSOCIATED;
3488 3554
3489 gcwq_release_management_and_unlock(gcwq); 3555 gcwq_release_assoc_and_unlock(gcwq);
3490 3556
3491 /* 3557 /*
3492 * Call schedule() so that we cross rq->lock and thus can guarantee 3558 * Call schedule() so that we cross rq->lock and thus can guarantee
@@ -3514,7 +3580,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
3514 * Workqueues should be brought up before normal priority CPU notifiers. 3580 * Workqueues should be brought up before normal priority CPU notifiers.
3515 * This will be registered high priority CPU notifier. 3581 * This will be registered high priority CPU notifier.
3516 */ 3582 */
3517static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb, 3583static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3518 unsigned long action, 3584 unsigned long action,
3519 void *hcpu) 3585 void *hcpu)
3520{ 3586{
@@ -3542,10 +3608,10 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3542 3608
3543 case CPU_DOWN_FAILED: 3609 case CPU_DOWN_FAILED:
3544 case CPU_ONLINE: 3610 case CPU_ONLINE:
3545 gcwq_claim_management_and_lock(gcwq); 3611 gcwq_claim_assoc_and_lock(gcwq);
3546 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3612 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3547 rebind_workers(gcwq); 3613 rebind_workers(gcwq);
3548 gcwq_release_management_and_unlock(gcwq); 3614 gcwq_release_assoc_and_unlock(gcwq);
3549 break; 3615 break;
3550 } 3616 }
3551 return NOTIFY_OK; 3617 return NOTIFY_OK;
@@ -3555,7 +3621,7 @@ static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
3555 * Workqueues should be brought down after normal priority CPU notifiers. 3621 * Workqueues should be brought down after normal priority CPU notifiers.
3556 * This will be registered as low priority CPU notifier. 3622 * This will be registered as low priority CPU notifier.
3557 */ 3623 */
3558static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, 3624static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3559 unsigned long action, 3625 unsigned long action,
3560 void *hcpu) 3626 void *hcpu)
3561{ 3627{
@@ -3566,7 +3632,7 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
3566 case CPU_DOWN_PREPARE: 3632 case CPU_DOWN_PREPARE:
3567 /* unbinding should happen on the local CPU */ 3633 /* unbinding should happen on the local CPU */
3568 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn); 3634 INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
3569 schedule_work_on(cpu, &unbind_work); 3635 queue_work_on(cpu, system_highpri_wq, &unbind_work);
3570 flush_work(&unbind_work); 3636 flush_work(&unbind_work);
3571 break; 3637 break;
3572 } 3638 }
@@ -3735,11 +3801,7 @@ void thaw_workqueues(void)
3735 continue; 3801 continue;
3736 3802
3737 /* restore max_active and repopulate worklist */ 3803 /* restore max_active and repopulate worklist */
3738 cwq->max_active = wq->saved_max_active; 3804 cwq_set_max_active(cwq, wq->saved_max_active);
3739
3740 while (!list_empty(&cwq->delayed_works) &&
3741 cwq->nr_active < cwq->max_active)
3742 cwq_activate_first_delayed(cwq);
3743 } 3805 }
3744 3806
3745 for_each_worker_pool(pool, gcwq) 3807 for_each_worker_pool(pool, gcwq)
@@ -3759,8 +3821,12 @@ static int __init init_workqueues(void)
3759 unsigned int cpu; 3821 unsigned int cpu;
3760 int i; 3822 int i;
3761 3823
3824 /* make sure we have enough bits for OFFQ CPU number */
3825 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
3826 WORK_CPU_LAST);
3827
3762 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); 3828 cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
3763 cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); 3829 hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
3764 3830
3765 /* initialize gcwqs */ 3831 /* initialize gcwqs */
3766 for_each_gcwq_cpu(cpu) { 3832 for_each_gcwq_cpu(cpu) {
@@ -3786,11 +3852,9 @@ static int __init init_workqueues(void)
3786 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout, 3852 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3787 (unsigned long)pool); 3853 (unsigned long)pool);
3788 3854
3789 mutex_init(&pool->manager_mutex); 3855 mutex_init(&pool->assoc_mutex);
3790 ida_init(&pool->worker_ida); 3856 ida_init(&pool->worker_ida);
3791 } 3857 }
3792
3793 init_waitqueue_head(&gcwq->rebind_hold);
3794 } 3858 }
3795 3859
3796 /* create the initial worker */ 3860 /* create the initial worker */
@@ -3813,17 +3877,14 @@ static int __init init_workqueues(void)
3813 } 3877 }
3814 3878
3815 system_wq = alloc_workqueue("events", 0, 0); 3879 system_wq = alloc_workqueue("events", 0, 0);
3880 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
3816 system_long_wq = alloc_workqueue("events_long", 0, 0); 3881 system_long_wq = alloc_workqueue("events_long", 0, 0);
3817 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3818 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3882 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3819 WQ_UNBOUND_MAX_ACTIVE); 3883 WQ_UNBOUND_MAX_ACTIVE);
3820 system_freezable_wq = alloc_workqueue("events_freezable", 3884 system_freezable_wq = alloc_workqueue("events_freezable",
3821 WQ_FREEZABLE, 0); 3885 WQ_FREEZABLE, 0);
3822 system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable", 3886 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
3823 WQ_NON_REENTRANT | WQ_FREEZABLE, 0); 3887 !system_unbound_wq || !system_freezable_wq);
3824 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3825 !system_unbound_wq || !system_freezable_wq ||
3826 !system_nrt_freezable_wq);
3827 return 0; 3888 return 0;
3828} 3889}
3829early_initcall(init_workqueues); 3890early_initcall(init_workqueues);
diff --git a/mm/slab.c b/mm/slab.c
index c6854759bcf1..11339110271e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -900,7 +900,7 @@ static void __cpuinit start_cpu_timer(int cpu)
900 */ 900 */
901 if (keventd_up() && reap_work->work.func == NULL) { 901 if (keventd_up() && reap_work->work.func == NULL) {
902 init_reap_node(cpu); 902 init_reap_node(cpu);
903 INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap); 903 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
904 schedule_delayed_work_on(cpu, reap_work, 904 schedule_delayed_work_on(cpu, reap_work,
905 __round_jiffies_relative(HZ, cpu)); 905 __round_jiffies_relative(HZ, cpu));
906 } 906 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index df7a6748231d..b3e3b9d525d0 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1157,7 +1157,7 @@ static void __cpuinit start_cpu_timer(int cpu)
1157{ 1157{
1158 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 1158 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
1159 1159
1160 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); 1160 INIT_DEFERRABLE_WORK(work, vmstat_update);
1161 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); 1161 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
1162} 1162}
1163 1163
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 6449bae15702..505f0ce3f10b 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -1083,7 +1083,7 @@ int p9_trans_fd_init(void)
1083 1083
1084void p9_trans_fd_exit(void) 1084void p9_trans_fd_exit(void)
1085{ 1085{
1086 flush_work_sync(&p9_poll_work); 1086 flush_work(&p9_poll_work);
1087 v9fs_unregister_trans(&p9_tcp_trans); 1087 v9fs_unregister_trans(&p9_tcp_trans);
1088 v9fs_unregister_trans(&p9_unix_trans); 1088 v9fs_unregister_trans(&p9_unix_trans);
1089 v9fs_unregister_trans(&p9_fd_trans); 1089 v9fs_unregister_trans(&p9_fd_trans);
diff --git a/net/core/dst.c b/net/core/dst.c
index 56d63612e1e4..b8d7c700541d 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -222,8 +222,8 @@ void __dst_free(struct dst_entry *dst)
222 if (dst_garbage.timer_inc > DST_GC_INC) { 222 if (dst_garbage.timer_inc > DST_GC_INC) {
223 dst_garbage.timer_inc = DST_GC_INC; 223 dst_garbage.timer_inc = DST_GC_INC;
224 dst_garbage.timer_expires = DST_GC_MIN; 224 dst_garbage.timer_expires = DST_GC_MIN;
225 cancel_delayed_work(&dst_gc_work); 225 mod_delayed_work(system_wq, &dst_gc_work,
226 schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires); 226 dst_garbage.timer_expires);
227 } 227 }
228 spin_unlock_bh(&dst_garbage.lock); 228 spin_unlock_bh(&dst_garbage.lock);
229} 229}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index c3519c6d1b16..8e397a69005a 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -120,22 +120,13 @@ static void linkwatch_schedule_work(int urgent)
120 delay = 0; 120 delay = 0;
121 121
122 /* 122 /*
123 * This is true if we've scheduled it immeditately or if we don't 123 * If urgent, schedule immediate execution; otherwise, don't
124 * need an immediate execution and it's already pending. 124 * override the existing timer.
125 */ 125 */
126 if (schedule_delayed_work(&linkwatch_work, delay) == !delay) 126 if (test_bit(LW_URGENT, &linkwatch_flags))
127 return; 127 mod_delayed_work(system_wq, &linkwatch_work, 0);
128 128 else
129 /* Don't bother if there is nothing urgent. */ 129 schedule_delayed_work(&linkwatch_work, delay);
130 if (!test_bit(LW_URGENT, &linkwatch_flags))
131 return;
132
133 /* It's already running which is good enough. */
134 if (!__cancel_delayed_work(&linkwatch_work))
135 return;
136
137 /* Otherwise we reschedule it again for immediate execution. */
138 schedule_delayed_work(&linkwatch_work, 0);
139} 130}
140 131
141 132
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 117afaf51268..112c6e2266e9 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1545,7 +1545,7 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1545 panic("cannot allocate neighbour cache hashes"); 1545 panic("cannot allocate neighbour cache hashes");
1546 1546
1547 rwlock_init(&tbl->lock); 1547 rwlock_init(&tbl->lock);
1548 INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work); 1548 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1549 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time); 1549 schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
1550 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl); 1550 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1551 skb_queue_head_init_class(&tbl->proxy_queue, 1551 skb_queue_head_init_class(&tbl->proxy_queue,
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 88e7c2f3fa0d..45295ca09571 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -370,7 +370,7 @@ static int dsa_remove(struct platform_device *pdev)
370 if (dst->link_poll_needed) 370 if (dst->link_poll_needed)
371 del_timer_sync(&dst->link_poll_timer); 371 del_timer_sync(&dst->link_poll_timer);
372 372
373 flush_work_sync(&dst->link_poll_work); 373 flush_work(&dst->link_poll_work);
374 374
375 for (i = 0; i < dst->pd->nr_chips; i++) { 375 for (i = 0; i < dst->pd->nr_chips; i++) {
376 struct dsa_switch *ds = dst->ds[i]; 376 struct dsa_switch *ds = dst->ds[i];
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index c7527f6b9ad9..000e3d239d64 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -194,7 +194,7 @@ void __init inet_initpeers(void)
194 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 194 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
195 NULL); 195 NULL);
196 196
197 INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); 197 INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
198} 198}
199 199
200static int addr_compare(const struct inetpeer_addr *a, 200static int addr_compare(const struct inetpeer_addr *a,
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 24c55c53e6a2..c9d931e7ffec 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -164,8 +164,7 @@ static void rfkill_schedule_global_op(enum rfkill_sched_op op)
164 rfkill_op_pending = true; 164 rfkill_op_pending = true;
165 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) { 165 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
166 /* bypass the limiter for EPO */ 166 /* bypass the limiter for EPO */
167 cancel_delayed_work(&rfkill_op_work); 167 mod_delayed_work(system_wq, &rfkill_op_work, 0);
168 schedule_delayed_work(&rfkill_op_work, 0);
169 rfkill_last_scheduled = jiffies; 168 rfkill_last_scheduled = jiffies;
170 } else 169 } else
171 rfkill_schedule_ratelimited(); 170 rfkill_schedule_ratelimited();
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2afd2a84dc35..2a68bb3db772 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1635,7 +1635,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1635 1635
1636void __init cache_initialize(void) 1636void __init cache_initialize(void)
1637{ 1637{
1638 INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); 1638 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1639} 1639}
1640 1640
1641int cache_register_net(struct cache_detail *cd, struct net *net) 1641int cache_register_net(struct cache_detail *cd, struct net *net)
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 61ab7c82ebb1..d67c97bb1025 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
62 62
63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { 63 if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
64 kdebug("IMMEDIATE"); 64 kdebug("IMMEDIATE");
65 queue_work(system_nrt_wq, &key_gc_work); 65 schedule_work(&key_gc_work);
66 } else if (gc_at < key_gc_next_run) { 66 } else if (gc_at < key_gc_next_run) {
67 kdebug("DEFERRED"); 67 kdebug("DEFERRED");
68 key_gc_next_run = gc_at; 68 key_gc_next_run = gc_at;
@@ -77,7 +77,7 @@ void key_schedule_gc(time_t gc_at)
77void key_schedule_gc_links(void) 77void key_schedule_gc_links(void)
78{ 78{
79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); 79 set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
80 queue_work(system_nrt_wq, &key_gc_work); 80 schedule_work(&key_gc_work);
81} 81}
82 82
83/* 83/*
@@ -120,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); 120 set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
121 121
122 kdebug("schedule"); 122 kdebug("schedule");
123 queue_work(system_nrt_wq, &key_gc_work); 123 schedule_work(&key_gc_work);
124 124
125 kdebug("sleep"); 125 kdebug("sleep");
126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, 126 wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -369,7 +369,7 @@ maybe_resched:
369 } 369 }
370 370
371 if (gc_state & KEY_GC_REAP_AGAIN) 371 if (gc_state & KEY_GC_REAP_AGAIN)
372 queue_work(system_nrt_wq, &key_gc_work); 372 schedule_work(&key_gc_work);
373 kleave(" [end %x]", gc_state); 373 kleave(" [end %x]", gc_state);
374 return; 374 return;
375 375
diff --git a/security/keys/key.c b/security/keys/key.c
index 50d96d4e06f2..3cbe3529c418 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -598,7 +598,7 @@ void key_put(struct key *key)
598 key_check(key); 598 key_check(key);
599 599
600 if (atomic_dec_and_test(&key->usage)) 600 if (atomic_dec_and_test(&key->usage))
601 queue_work(system_nrt_wq, &key_gc_work); 601 schedule_work(&key_gc_work);
602 } 602 }
603} 603}
604EXPORT_SYMBOL(key_put); 604EXPORT_SYMBOL(key_put);
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index dde5c9c92132..ef68d710d08c 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -141,7 +141,7 @@ void snd_ak4113_reinit(struct ak4113 *chip)
141{ 141{
142 chip->init = 1; 142 chip->init = 1;
143 mb(); 143 mb();
144 flush_delayed_work_sync(&chip->work); 144 flush_delayed_work(&chip->work);
145 ak4113_init_regs(chip); 145 ak4113_init_regs(chip);
146 /* bring up statistics / event queing */ 146 /* bring up statistics / event queing */
147 chip->init = 0; 147 chip->init = 0;
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index fdf3c1b65e38..816e7d225fb0 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -154,7 +154,7 @@ void snd_ak4114_reinit(struct ak4114 *chip)
154{ 154{
155 chip->init = 1; 155 chip->init = 1;
156 mb(); 156 mb();
157 flush_delayed_work_sync(&chip->work); 157 flush_delayed_work(&chip->work);
158 ak4114_init_regs(chip); 158 ak4114_init_regs(chip);
159 /* bring up statistics / event queing */ 159 /* bring up statistics / event queing */
160 chip->init = 0; 160 chip->init = 0;
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index ab8738e21ad1..e9fa2d07951d 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -573,8 +573,8 @@ static void oxygen_card_free(struct snd_card *card)
573 oxygen_shutdown(chip); 573 oxygen_shutdown(chip);
574 if (chip->irq >= 0) 574 if (chip->irq >= 0)
575 free_irq(chip->irq, chip); 575 free_irq(chip->irq, chip);
576 flush_work_sync(&chip->spdif_input_bits_work); 576 flush_work(&chip->spdif_input_bits_work);
577 flush_work_sync(&chip->gpio_work); 577 flush_work(&chip->gpio_work);
578 chip->model.cleanup(chip); 578 chip->model.cleanup(chip);
579 kfree(chip->model_data); 579 kfree(chip->model_data);
580 mutex_destroy(&chip->mutex); 580 mutex_destroy(&chip->mutex);
@@ -751,8 +751,8 @@ static int oxygen_pci_suspend(struct device *dev)
751 spin_unlock_irq(&chip->reg_lock); 751 spin_unlock_irq(&chip->reg_lock);
752 752
753 synchronize_irq(chip->irq); 753 synchronize_irq(chip->irq);
754 flush_work_sync(&chip->spdif_input_bits_work); 754 flush_work(&chip->spdif_input_bits_work);
755 flush_work_sync(&chip->gpio_work); 755 flush_work(&chip->gpio_work);
756 chip->interrupt_mask = saved_interrupt_mask; 756 chip->interrupt_mask = saved_interrupt_mask;
757 757
758 pci_disable_device(pci); 758 pci_disable_device(pci);
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index d26c8ae4e6d9..a4cae060bf26 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1601,7 +1601,7 @@ static int wm8350_codec_remove(struct snd_soc_codec *codec)
1601 1601
1602 /* if there was any work waiting then we run it now and 1602 /* if there was any work waiting then we run it now and
1603 * wait for its completion */ 1603 * wait for its completion */
1604 flush_delayed_work_sync(&codec->dapm.delayed_work); 1604 flush_delayed_work(&codec->dapm.delayed_work);
1605 1605
1606 wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF); 1606 wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
1607 1607
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 13bff87ddcf5..2e4a775ae560 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1509,7 +1509,7 @@ static int wm8753_probe(struct snd_soc_codec *codec)
1509/* power down chip */ 1509/* power down chip */
1510static int wm8753_remove(struct snd_soc_codec *codec) 1510static int wm8753_remove(struct snd_soc_codec *codec)
1511{ 1511{
1512 flush_delayed_work_sync(&codec->dapm.delayed_work); 1512 flush_delayed_work(&codec->dapm.delayed_work);
1513 wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF); 1513 wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
1514 1514
1515 return 0; 1515 return 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index c501af6d8dbe..cf3d0b0c71b9 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -591,7 +591,7 @@ int snd_soc_suspend(struct device *dev)
591 591
592 /* close any waiting streams and save state */ 592 /* close any waiting streams and save state */
593 for (i = 0; i < card->num_rtd; i++) { 593 for (i = 0; i < card->num_rtd; i++) {
594 flush_delayed_work_sync(&card->rtd[i].delayed_work); 594 flush_delayed_work(&card->rtd[i].delayed_work);
595 card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level; 595 card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
596 } 596 }
597 597
@@ -1848,7 +1848,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
1848 /* make sure any delayed work runs */ 1848 /* make sure any delayed work runs */
1849 for (i = 0; i < card->num_rtd; i++) { 1849 for (i = 0; i < card->num_rtd; i++) {
1850 struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; 1850 struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
1851 flush_delayed_work_sync(&rtd->delayed_work); 1851 flush_delayed_work(&rtd->delayed_work);
1852 } 1852 }
1853 1853
1854 /* remove auxiliary devices */ 1854 /* remove auxiliary devices */
@@ -1892,7 +1892,7 @@ int snd_soc_poweroff(struct device *dev)
1892 * now, we're shutting down so no imminent restart. */ 1892 * now, we're shutting down so no imminent restart. */
1893 for (i = 0; i < card->num_rtd; i++) { 1893 for (i = 0; i < card->num_rtd; i++) {
1894 struct snd_soc_pcm_runtime *rtd = &card->rtd[i]; 1894 struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
1895 flush_delayed_work_sync(&rtd->delayed_work); 1895 flush_delayed_work(&rtd->delayed_work);
1896 } 1896 }
1897 1897
1898 snd_soc_dapm_shutdown(card); 1898 snd_soc_dapm_shutdown(card);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 7d7e2aaffece..67a35e90384c 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -90,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
90 * We know no new events will be scheduled at this point, so block 90 * We know no new events will be scheduled at this point, so block
91 * until all previously outstanding events have completed 91 * until all previously outstanding events have completed
92 */ 92 */
93 flush_work_sync(&irqfd->inject); 93 flush_work(&irqfd->inject);
94 94
95 /* 95 /*
96 * It is now safe to release the object's resources 96 * It is now safe to release the object's resources